hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a26bc021925afcec77a9c60b07998b318cfc32d | 2,948 | py | Python | dev/Tools/lmbr_aws/cli.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 8 | 2019-10-07T16:33:47.000Z | 2020-12-07T03:59:58.000Z | dev/Tools/lmbr_aws/cli.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | null | null | null | dev/Tools/lmbr_aws/cli.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 5 | 2020-08-27T20:44:18.000Z | 2021-08-21T22:54:11.000Z | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #1 $
from __future__ import print_function
import argparse
import os
import sys
import bootstrap
def main():
try:
framework_directory_path = get_framework_directory_path()
cli = bootstrap.load_resource_manager_module(framework_directory_path, 'resource_manager.cli')
return cli.main()
except RuntimeError as e:
print('\nERROR: ' + e.message);
def get_framework_directory_path():
'''Determines the CloudGemFramework directory to use based on select command line arguments and the game project's gems.json file.'''
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--root-directory', default=os.getcwd(), help='Lumberyard install directory and location of bootstrap.cfg file. Default is the current working directory.')
parser.add_argument('--game-directory', help='Location of the game project directory. The default is {root-directory}\{game} where {game} is determined by the sys_game_folder setting in the {root-directory}\bootstrap.cfg file.')
parser.add_argument('--framework-directory', help='Specify the CloudGemFramework version used to execute the command. The default is determined by examining the gems.json file for the project identified by the bootstrap.cfg file.')
args, unknown_args = parser.parse_known_args()
if args.framework_directory:
i = sys.argv.index('--framework-directory')
sys.argv[i:i+2] = [] # remove arg because cli in resource manager doesn't know about it.
return args.framework_directory
else:
if args.game_directory:
game_directory_path = os.path.abspath(args.game_directory)
if not os.path.isdir(game_directory_path):
raise RuntimeError('The specified game directory does not exist: {}.'.format(game_directory_path))
else:
game_directory_path = bootstrap.get_game_directory_path(args.root_directory)
framework_directory_path = bootstrap.get_framework_directory_path(args.root_directory, game_directory_path)
if framework_directory_path is None:
raise RuntimeError('The gems.json file at {} does not contain an entry for the CloudGemFramework gem. You must enable this gem for your project.'.format(game_directory_path))
return framework_directory_path
if __name__ == "__main__":
sys.exit(main())
| 45.353846 | 235 | 0.72863 |
4a26beeccdc06ef480105e99edcb3feed89192df | 4,655 | py | Python | sdk/python/pulumi_aws/codestarconnections/get_connection.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/codestarconnections/get_connection.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/codestarconnections/get_connection.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetConnectionResult',
'AwaitableGetConnectionResult',
'get_connection',
]
@pulumi.output_type
class GetConnectionResult:
"""
A collection of values returned by getConnection.
"""
def __init__(__self__, arn=None, connection_status=None, id=None, name=None, provider_type=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if connection_status and not isinstance(connection_status, str):
raise TypeError("Expected argument 'connection_status' to be a str")
pulumi.set(__self__, "connection_status", connection_status)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provider_type and not isinstance(provider_type, str):
raise TypeError("Expected argument 'provider_type' to be a str")
pulumi.set(__self__, "provider_type", provider_type)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> str:
"""
The CodeStar Connection status. Possible values are `PENDING`, `AVAILABLE` and `ERROR`.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the CodeStar Connection. The name is unique in the calling AWS account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> str:
"""
The name of the external provider where your third-party code repository is configured. Possible values are `Bitbucket`, `GitHub`, or `GitHubEnterpriseServer`.
"""
return pulumi.get(self, "provider_type")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Map of key-value resource tags to associate with the resource.
"""
return pulumi.get(self, "tags")
class AwaitableGetConnectionResult(GetConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectionResult(
arn=self.arn,
connection_status=self.connection_status,
id=self.id,
name=self.name,
provider_type=self.provider_type,
tags=self.tags)
def get_connection(arn: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionResult:
"""
Provides details about CodeStar Connection.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.get_connection(arn=aws_codestarconnections_connection["example"]["arn"])
```
:param str arn: The CodeStar Connection ARN.
:param Mapping[str, str] tags: Map of key-value resource tags to associate with the resource.
"""
__args__ = dict()
__args__['arn'] = arn
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:codestarconnections/getConnection:getConnection', __args__, opts=opts, typ=GetConnectionResult).value
return AwaitableGetConnectionResult(
arn=__ret__.arn,
connection_status=__ret__.connection_status,
id=__ret__.id,
name=__ret__.name,
provider_type=__ret__.provider_type,
tags=__ret__.tags)
| 33.978102 | 167 | 0.64812 |
4a26c0a268f444c831bd9f5f8d1897f593fe7994 | 1,914 | py | Python | contrib/devtools/check-doc.py | SicNull/tiercoin | f5f52da02c1d23ffb571123582c6049359a9d4d0 | [
"MIT"
] | null | null | null | contrib/devtools/check-doc.py | SicNull/tiercoin | f5f52da02c1d23ffb571123582c6049359a9d4d0 | [
"MIT"
] | null | null | null | contrib/devtools/check-doc.py | SicNull/tiercoin | f5f52da02c1d23ffb571123582c6049359a9d4d0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions', '-checklevel', '-liquidityprovider', '-anonymizetiercoinamount'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
docd = check_output(CMD_GREP_DOCS, shell=True)
args_used = set(re.findall(REGEX_ARG,used))
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print "Args used : %s" % len(args_used)
print "Args documented : %s" % len(args_docd)
print "Args undocumented: %s" % len(args_need_doc)
print args_need_doc
print "Args unknown : %s" % len(args_unknown)
print args_unknown
exit(len(args_need_doc))
if __name__ == "__main__":
main()
| 42.533333 | 303 | 0.6907 |
4a26c1776b8bc89ea333624f090f331d1d755c7c | 2,210 | py | Python | migration/rack/commits/commit643839e7d8036731ba1da767942c8e74c2876e2e.py | tuxji/RACK | 74b59b9a89b48cf2da91d7d9ac23ab3408e32bcf | [
"BSD-3-Clause"
] | 4 | 2021-07-02T08:58:05.000Z | 2022-02-02T03:02:32.000Z | migration/rack/commits/commit643839e7d8036731ba1da767942c8e74c2876e2e.py | tuxji/RACK | 74b59b9a89b48cf2da91d7d9ac23ab3408e32bcf | [
"BSD-3-Clause"
] | 309 | 2020-11-02T19:46:14.000Z | 2022-03-24T21:35:28.000Z | migration/rack/commits/commit643839e7d8036731ba1da767942c8e74c2876e2e.py | tuxji/RACK | 74b59b9a89b48cf2da91d7d9ac23ab3408e32bcf | [
"BSD-3-Clause"
] | 7 | 2020-11-30T22:22:06.000Z | 2022-02-02T03:09:12.000Z | # Copyright (c) 2021, Galois, Inc.
#
# All Rights Reserved
#
# This material is based upon work supported by the Defense Advanced Research
# Projects Agency (DARPA) under Contract No. FA8750-20-C-0203.
#
# Any opinions, findings and conclusions or recommendations expressed in this
# material are those of the author(s) and do not necessarily reflect the views
# of the Defense Advanced Research Projects Agency (DARPA).
from migration_helpers.name_space import rack
from ontology_changes import (
AtMost,
ChangeCardinality,
ChangePropertyIsATypeOf,
ChangePropertyRange,
Commit,
RenameProperty,
SingleValue,
)
FILE = rack("FILE")
PROV_S = rack("PROV-S")
SOFTWARE = rack("SOFTWARE")
commit = Commit(
number="643839e7d8036731ba1da767942c8e74c2876e2e",
changes=[
# FILE.sadl
ChangeCardinality(
name_space=FILE,
class_id="FILE",
property_id="filename",
to_cardinality=SingleValue(),
),
RenameProperty(
from_name_space=FILE,
from_class="FILE",
from_name="fileParent",
to_name_space=FILE,
to_class="FILE",
to_name="definedIn",
),
ChangePropertyIsATypeOf(
name_space=FILE,
class_id="FILE",
property_id="satisfies",
from_name_space=PROV_S,
from_property_id="wasDerivedFrom",
to_name_space=PROV_S,
to_property_id="wasImpactedBy",
),
ChangeCardinality(
name_space=FILE,
class_id="FILE",
property_id="createBy",
to_cardinality=AtMost(1),
),
# FILE.sadl / SOFTWARE.sadl
RenameProperty(
from_name_space=SOFTWARE,
from_class="FILE",
from_name="definedIn",
to_name_space=FILE,
to_class="FILE",
to_name="definedIn",
),
ChangePropertyRange(
prop_name_space=FILE,
prop_name="definedIn",
from_name_space=PROV_S,
from_range="ENTITY",
to_name_space=FILE,
to_range="FILE",
),
],
)
| 27.974684 | 78 | 0.593213 |
4a26c230f7a21cc6dd4a3cdb52e32730b1ce73ca | 12,731 | py | Python | v1_api_demo/gan/gan_trainer.py | shenchaohua/Paddle | 9c5942db13308d53cc115708058c1e885f4b57a3 | [
"Apache-2.0"
] | 5 | 2017-06-20T06:13:01.000Z | 2021-04-21T03:54:08.000Z | v1_api_demo/gan/gan_trainer.py | shenchaohua/Paddle | 9c5942db13308d53cc115708058c1e885f4b57a3 | [
"Apache-2.0"
] | 9 | 2017-09-13T07:39:31.000Z | 2017-10-18T05:58:23.000Z | v1_api_demo/gan/gan_trainer.py | shenchaohua/Paddle | 9c5942db13308d53cc115708058c1e885f4b57a3 | [
"Apache-2.0"
] | 2 | 2017-07-16T03:19:48.000Z | 2019-01-16T05:33:20.000Z | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
import numpy
import cPickle
import sys, os
from PIL import Image
from paddle.trainer.config_parser import parse_config
from paddle.trainer.config_parser import logger
import py_paddle.swig_paddle as api
import matplotlib.pyplot as plt
def plot2DScatter(data, outputfile):
'''
Plot the data as a 2D scatter plot and save to outputfile
data needs to be two dimensinoal
'''
x = data[:, 0]
y = data[:, 1]
logger.info("The mean vector is %s" % numpy.mean(data, 0))
logger.info("The std vector is %s" % numpy.std(data, 0))
heatmap, xedges, yedges = numpy.histogram2d(x, y, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.scatter(x, y)
plt.savefig(outputfile, bbox_inches='tight')
def CHECK_EQ(a, b):
assert a == b, "a=%s, b=%s" % (a, b)
def copy_shared_parameters(src, dst):
'''
copy the parameters from src to dst
:param src: the source of the parameters
:type src: GradientMachine
:param dst: the destination of the parameters
:type dst: GradientMachine
'''
src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())]
src_params = dict([(p.getName(), p) for p in src_params])
for i in xrange(dst.getParameterSize()):
dst_param = dst.getParameter(i)
src_param = src_params.get(dst_param.getName(), None)
if src_param is None:
continue
src_value = src_param.getBuf(api.PARAMETER_VALUE)
dst_value = dst_param.getBuf(api.PARAMETER_VALUE)
CHECK_EQ(len(src_value), len(dst_value))
dst_value.copyFrom(src_value)
dst_param.setValueUpdated()
def print_parameters(src):
src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())]
print "***************"
for p in src_params:
print "Name is %s" % p.getName()
print "value is %s \n" % p.getBuf(api.PARAMETER_VALUE).copyToNumpyArray(
)
def load_mnist_data(imageFile):
f = open(imageFile, "rb")
f.read(16)
# Define number of samples for train/test
if "train" in imageFile:
n = 60000
else:
n = 10000
data = numpy.fromfile(f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28))
data = data / 255.0 * 2.0 - 1.0
f.close()
return data.astype('float32')
def load_cifar_data(cifar_path):
batch_size = 10000
data = numpy.zeros((5 * batch_size, 32 * 32 * 3), dtype="float32")
for i in range(1, 6):
file = cifar_path + "/data_batch_" + str(i)
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
data[(i - 1) * batch_size:(i * batch_size), :] = dict["data"]
data = data / 255.0 * 2.0 - 1.0
return data
# synthesize 2-D uniform data
def load_uniform_data():
data = numpy.random.rand(1000000, 2).astype('float32')
return data
def merge(images, size):
if images.shape[1] == 28 * 28:
h, w, c = 28, 28, 1
else:
h, w, c = 32, 32, 3
img = numpy.zeros((h * size[0], w * size[1], c))
for idx in xrange(size[0] * size[1]):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = \
((images[idx, :].reshape((h, w, c), order="F").transpose(1, 0, 2) + 1.0) / 2.0 * 255.0)
return img.astype('uint8')
def save_images(images, path):
merged_img = merge(images, [8, 8])
if merged_img.shape[2] == 1:
im = Image.fromarray(numpy.squeeze(merged_img)).convert('RGB')
else:
im = Image.fromarray(merged_img, mode="RGB")
im.save(path)
def get_real_samples(batch_size, data_np):
return data_np[numpy.random.choice(
data_np.shape[0], batch_size, replace=False), :]
def get_noise(batch_size, noise_dim):
return numpy.random.normal(size=(batch_size, noise_dim)).astype('float32')
def get_fake_samples(generator_machine, batch_size, noise):
gen_inputs = api.Arguments.createArguments(1)
gen_inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise))
gen_outputs = api.Arguments.createArguments(0)
generator_machine.forward(gen_inputs, gen_outputs, api.PASS_TEST)
fake_samples = gen_outputs.getSlotValue(0).copyToNumpyMat()
return fake_samples
def get_training_loss(training_machine, inputs):
outputs = api.Arguments.createArguments(0)
training_machine.forward(inputs, outputs, api.PASS_TEST)
loss = outputs.getSlotValue(0).copyToNumpyMat()
return numpy.mean(loss)
def prepare_discriminator_data_batch_pos(batch_size, data_np):
real_samples = get_real_samples(batch_size, data_np)
labels = numpy.ones(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(real_samples))
inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(labels))
return inputs
def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise):
fake_samples = get_fake_samples(generator_machine, batch_size, noise)
labels = numpy.zeros(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(fake_samples))
inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(labels))
return inputs
def prepare_generator_data_batch(batch_size, noise):
label = numpy.ones(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise))
inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(label))
return inputs
def find(iterable, cond):
for item in iterable:
if cond(item):
return item
return None
def get_layer_size(model_conf, layer_name):
layer_conf = find(model_conf.layers, lambda x: x.name == layer_name)
assert layer_conf is not None, "Cannot find '%s' layer" % layer_name
return layer_conf.size
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data_source", help="mnist or cifar or uniform")
parser.add_argument(
"--use_gpu", default="1", help="1 means use gpu for training")
parser.add_argument("--gpu_id", default="0", help="the gpu_id parameter")
args = parser.parse_args()
data_source = args.data_source
use_gpu = args.use_gpu
assert data_source in ["mnist", "cifar", "uniform"]
assert use_gpu in ["0", "1"]
if not os.path.exists("./%s_samples/" % data_source):
os.makedirs("./%s_samples/" % data_source)
if not os.path.exists("./%s_params/" % data_source):
os.makedirs("./%s_params/" % data_source)
api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10',
'--log_period=100', '--gpu_id=' + args.gpu_id,
'--save_dir=' + "./%s_params/" % data_source)
if data_source == "uniform":
conf = "gan_conf.py"
num_iter = 10000
else:
conf = "gan_conf_image.py"
num_iter = 1000
gen_conf = parse_config(conf, "mode=generator_training,data=" + data_source)
dis_conf = parse_config(conf,
"mode=discriminator_training,data=" + data_source)
generator_conf = parse_config(conf, "mode=generator,data=" + data_source)
batch_size = dis_conf.opt_config.batch_size
noise_dim = get_layer_size(gen_conf.model_config, "noise")
if data_source == "mnist":
data_np = load_mnist_data("./data/mnist_data/train-images-idx3-ubyte")
elif data_source == "cifar":
data_np = load_cifar_data("./data/cifar-10-batches-py/")
else:
data_np = load_uniform_data()
# this creates a gradient machine for discriminator
dis_training_machine = api.GradientMachine.createFromConfigProto(
dis_conf.model_config)
# this create a gradient machine for generator
gen_training_machine = api.GradientMachine.createFromConfigProto(
gen_conf.model_config)
# generator_machine is used to generate data only, which is used for
# training discriminator
logger.info(str(generator_conf.model_config))
generator_machine = api.GradientMachine.createFromConfigProto(
generator_conf.model_config)
dis_trainer = api.Trainer.create(dis_conf, dis_training_machine)
gen_trainer = api.Trainer.create(gen_conf, gen_training_machine)
dis_trainer.startTrain()
gen_trainer.startTrain()
# Sync parameters between networks (GradientMachine) at the beginning
copy_shared_parameters(gen_training_machine, dis_training_machine)
copy_shared_parameters(gen_training_machine, generator_machine)
# constrain that either discriminator or generator can not be trained
# consecutively more than MAX_strike times
curr_train = "dis"
curr_strike = 0
MAX_strike = 5
for train_pass in xrange(100):
dis_trainer.startTrainPass()
gen_trainer.startTrainPass()
for i in xrange(num_iter):
# Do forward pass in discriminator to get the dis_loss
noise = get_noise(batch_size, noise_dim)
data_batch_dis_pos = prepare_discriminator_data_batch_pos(
batch_size, data_np)
dis_loss_pos = get_training_loss(dis_training_machine,
data_batch_dis_pos)
data_batch_dis_neg = prepare_discriminator_data_batch_neg(
generator_machine, batch_size, noise)
dis_loss_neg = get_training_loss(dis_training_machine,
data_batch_dis_neg)
dis_loss = (dis_loss_pos + dis_loss_neg) / 2.0
# Do forward pass in generator to get the gen_loss
data_batch_gen = prepare_generator_data_batch(batch_size, noise)
gen_loss = get_training_loss(gen_training_machine, data_batch_gen)
if i % 100 == 0:
print "d_pos_loss is %s d_neg_loss is %s" % (dis_loss_pos,
dis_loss_neg)
print "d_loss is %s g_loss is %s" % (dis_loss, gen_loss)
# Decide which network to train based on the training history
# And the relative size of the loss
if (not (curr_train == "dis" and curr_strike == MAX_strike)) and \
((curr_train == "gen" and curr_strike == MAX_strike) or dis_loss > gen_loss):
if curr_train == "dis":
curr_strike += 1
else:
curr_train = "dis"
curr_strike = 1
dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_neg)
dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_pos)
copy_shared_parameters(dis_training_machine,
gen_training_machine)
else:
if curr_train == "gen":
curr_strike += 1
else:
curr_train = "gen"
curr_strike = 1
gen_trainer.trainOneDataBatch(batch_size, data_batch_gen)
# TODO: add API for paddle to allow true parameter sharing between different GradientMachines
# so that we do not need to copy shared parameters.
copy_shared_parameters(gen_training_machine,
dis_training_machine)
copy_shared_parameters(gen_training_machine, generator_machine)
dis_trainer.finishTrainPass()
gen_trainer.finishTrainPass()
# At the end of each pass, save the generated samples/images
fake_samples = get_fake_samples(generator_machine, batch_size, noise)
if data_source == "uniform":
plot2DScatter(fake_samples, "./%s_samples/train_pass%s.png" %
(data_source, train_pass))
else:
save_images(fake_samples, "./%s_samples/train_pass%s.png" %
(data_source, train_pass))
dis_trainer.finishTrain()
gen_trainer.finishTrain()
if __name__ == '__main__':
main()
| 36.374286 | 110 | 0.650774 |
4a26c2c4a2f686ef8a4a8e0511fec3385309d1e4 | 37,034 | py | Python | google/ads/google_ads/v2/services/shared_set_service_client.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v2/services/shared_set_service_client.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v2/services/shared_set_service_client.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v2.services SharedSetService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import grpc
from google.ads.google_ads.v2.services import enums
from google.ads.google_ads.v2.services import shared_set_service_client_config
from google.ads.google_ads.v2.services.transports import shared_set_service_grpc_transport
from google.ads.google_ads.v2.proto.resources import account_budget_pb2
from google.ads.google_ads.v2.proto.resources import account_budget_proposal_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_asset_view_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_ad_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_audience_view_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_bid_modifier_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_criterion_simulation_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_feed_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_label_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_pb2
from google.ads.google_ads.v2.proto.resources import ad_group_simulation_pb2
from google.ads.google_ads.v2.proto.resources import ad_parameter_pb2
from google.ads.google_ads.v2.proto.resources import ad_pb2
from google.ads.google_ads.v2.proto.resources import ad_schedule_view_pb2
from google.ads.google_ads.v2.proto.resources import age_range_view_pb2
from google.ads.google_ads.v2.proto.resources import asset_pb2
from google.ads.google_ads.v2.proto.resources import bidding_strategy_pb2
from google.ads.google_ads.v2.proto.resources import billing_setup_pb2
from google.ads.google_ads.v2.proto.resources import campaign_audience_view_pb2
from google.ads.google_ads.v2.proto.resources import campaign_bid_modifier_pb2
from google.ads.google_ads.v2.proto.resources import campaign_budget_pb2
from google.ads.google_ads.v2.proto.resources import campaign_criterion_pb2
from google.ads.google_ads.v2.proto.resources import campaign_criterion_simulation_pb2
from google.ads.google_ads.v2.proto.resources import campaign_draft_pb2
from google.ads.google_ads.v2.proto.resources import campaign_experiment_pb2
from google.ads.google_ads.v2.proto.resources import campaign_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import campaign_feed_pb2
from google.ads.google_ads.v2.proto.resources import campaign_label_pb2
from google.ads.google_ads.v2.proto.resources import campaign_pb2
from google.ads.google_ads.v2.proto.resources import campaign_shared_set_pb2
from google.ads.google_ads.v2.proto.resources import carrier_constant_pb2
from google.ads.google_ads.v2.proto.resources import change_status_pb2
from google.ads.google_ads.v2.proto.resources import click_view_pb2
from google.ads.google_ads.v2.proto.resources import conversion_action_pb2
from google.ads.google_ads.v2.proto.resources import custom_interest_pb2
from google.ads.google_ads.v2.proto.resources import customer_client_link_pb2
from google.ads.google_ads.v2.proto.resources import customer_client_pb2
from google.ads.google_ads.v2.proto.resources import customer_extension_setting_pb2
from google.ads.google_ads.v2.proto.resources import customer_feed_pb2
from google.ads.google_ads.v2.proto.resources import customer_label_pb2
from google.ads.google_ads.v2.proto.resources import customer_manager_link_pb2
from google.ads.google_ads.v2.proto.resources import customer_negative_criterion_pb2
from google.ads.google_ads.v2.proto.resources import customer_pb2
from google.ads.google_ads.v2.proto.resources import detail_placement_view_pb2
from google.ads.google_ads.v2.proto.resources import display_keyword_view_pb2
from google.ads.google_ads.v2.proto.resources import distance_view_pb2
from google.ads.google_ads.v2.proto.resources import domain_category_pb2
from google.ads.google_ads.v2.proto.resources import dynamic_search_ads_search_term_view_pb2
from google.ads.google_ads.v2.proto.resources import expanded_landing_page_view_pb2
from google.ads.google_ads.v2.proto.resources import extension_feed_item_pb2
from google.ads.google_ads.v2.proto.resources import feed_item_pb2
from google.ads.google_ads.v2.proto.resources import feed_item_target_pb2
from google.ads.google_ads.v2.proto.resources import feed_mapping_pb2
from google.ads.google_ads.v2.proto.resources import feed_pb2
from google.ads.google_ads.v2.proto.resources import feed_placeholder_view_pb2
from google.ads.google_ads.v2.proto.resources import gender_view_pb2
from google.ads.google_ads.v2.proto.resources import geo_target_constant_pb2
from google.ads.google_ads.v2.proto.resources import geographic_view_pb2
from google.ads.google_ads.v2.proto.resources import google_ads_field_pb2
from google.ads.google_ads.v2.proto.resources import group_placement_view_pb2
from google.ads.google_ads.v2.proto.resources import hotel_group_view_pb2
from google.ads.google_ads.v2.proto.resources import hotel_performance_view_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_ad_group_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_campaign_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_keyword_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_negative_keyword_pb2
from google.ads.google_ads.v2.proto.resources import keyword_plan_pb2
from google.ads.google_ads.v2.proto.resources import keyword_view_pb2
from google.ads.google_ads.v2.proto.resources import label_pb2
from google.ads.google_ads.v2.proto.resources import landing_page_view_pb2
from google.ads.google_ads.v2.proto.resources import language_constant_pb2
from google.ads.google_ads.v2.proto.resources import location_view_pb2
from google.ads.google_ads.v2.proto.resources import managed_placement_view_pb2
from google.ads.google_ads.v2.proto.resources import media_file_pb2
from google.ads.google_ads.v2.proto.resources import merchant_center_link_pb2
from google.ads.google_ads.v2.proto.resources import mobile_app_category_constant_pb2
from google.ads.google_ads.v2.proto.resources import mobile_device_constant_pb2
from google.ads.google_ads.v2.proto.resources import mutate_job_pb2
from google.ads.google_ads.v2.proto.resources import operating_system_version_constant_pb2
from google.ads.google_ads.v2.proto.resources import paid_organic_search_term_view_pb2
from google.ads.google_ads.v2.proto.resources import parental_status_view_pb2
from google.ads.google_ads.v2.proto.resources import product_bidding_category_constant_pb2
from google.ads.google_ads.v2.proto.resources import product_group_view_pb2
from google.ads.google_ads.v2.proto.resources import recommendation_pb2
from google.ads.google_ads.v2.proto.resources import remarketing_action_pb2
from google.ads.google_ads.v2.proto.resources import search_term_view_pb2
from google.ads.google_ads.v2.proto.resources import shared_criterion_pb2
from google.ads.google_ads.v2.proto.resources import shared_set_pb2
from google.ads.google_ads.v2.proto.services import account_budget_proposal_service_pb2
from google.ads.google_ads.v2.proto.services import account_budget_proposal_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import account_budget_service_pb2
from google.ads.google_ads.v2.proto.services import account_budget_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_ad_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_audience_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_audience_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_bid_modifier_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_bid_modifier_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_criterion_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_criterion_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_extension_setting_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_feed_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_label_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_group_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import ad_group_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_parameter_service_pb2
from google.ads.google_ads.v2.proto.services import ad_parameter_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_schedule_view_service_pb2
from google.ads.google_ads.v2.proto.services import ad_schedule_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import ad_service_pb2
from google.ads.google_ads.v2.proto.services import ad_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import age_range_view_service_pb2
from google.ads.google_ads.v2.proto.services import age_range_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import asset_service_pb2
from google.ads.google_ads.v2.proto.services import asset_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import bidding_strategy_service_pb2
from google.ads.google_ads.v2.proto.services import bidding_strategy_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import billing_setup_service_pb2
from google.ads.google_ads.v2.proto.services import billing_setup_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_audience_view_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_audience_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_bid_modifier_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_bid_modifier_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_budget_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_budget_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_criterion_simulation_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_criterion_simulation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_draft_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_draft_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_experiment_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_experiment_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_extension_setting_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_feed_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_label_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import campaign_shared_set_service_pb2
from google.ads.google_ads.v2.proto.services import campaign_shared_set_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import carrier_constant_service_pb2
from google.ads.google_ads.v2.proto.services import carrier_constant_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import change_status_service_pb2
from google.ads.google_ads.v2.proto.services import change_status_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import click_view_service_pb2
from google.ads.google_ads.v2.proto.services import click_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import conversion_action_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_action_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import conversion_adjustment_upload_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_adjustment_upload_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import conversion_upload_service_pb2
from google.ads.google_ads.v2.proto.services import conversion_upload_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import custom_interest_service_pb2
from google.ads.google_ads.v2.proto.services import custom_interest_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_client_link_service_pb2
from google.ads.google_ads.v2.proto.services import customer_client_link_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_client_service_pb2
from google.ads.google_ads.v2.proto.services import customer_client_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_extension_setting_service_pb2
from google.ads.google_ads.v2.proto.services import customer_extension_setting_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_feed_service_pb2
from google.ads.google_ads.v2.proto.services import customer_feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_label_service_pb2
from google.ads.google_ads.v2.proto.services import customer_label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_manager_link_service_pb2
from google.ads.google_ads.v2.proto.services import customer_manager_link_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_negative_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import customer_negative_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import customer_service_pb2
from google.ads.google_ads.v2.proto.services import customer_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import detail_placement_view_service_pb2
from google.ads.google_ads.v2.proto.services import detail_placement_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import display_keyword_view_service_pb2
from google.ads.google_ads.v2.proto.services import display_keyword_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import distance_view_service_pb2
from google.ads.google_ads.v2.proto.services import distance_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import domain_category_service_pb2
from google.ads.google_ads.v2.proto.services import domain_category_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import dynamic_search_ads_search_term_view_service_pb2
from google.ads.google_ads.v2.proto.services import dynamic_search_ads_search_term_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import expanded_landing_page_view_service_pb2
from google.ads.google_ads.v2.proto.services import expanded_landing_page_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import extension_feed_item_service_pb2
from google.ads.google_ads.v2.proto.services import extension_feed_item_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import feed_item_service_pb2
from google.ads.google_ads.v2.proto.services import feed_item_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import feed_item_target_service_pb2
from google.ads.google_ads.v2.proto.services import feed_item_target_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import feed_mapping_service_pb2
from google.ads.google_ads.v2.proto.services import feed_mapping_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import feed_placeholder_view_service_pb2
from google.ads.google_ads.v2.proto.services import feed_placeholder_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import feed_service_pb2
from google.ads.google_ads.v2.proto.services import feed_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import gender_view_service_pb2
from google.ads.google_ads.v2.proto.services import gender_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import geo_target_constant_service_pb2
from google.ads.google_ads.v2.proto.services import geo_target_constant_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import geographic_view_service_pb2
from google.ads.google_ads.v2.proto.services import geographic_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import google_ads_field_service_pb2
from google.ads.google_ads.v2.proto.services import google_ads_field_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import google_ads_service_pb2
from google.ads.google_ads.v2.proto.services import google_ads_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import group_placement_view_service_pb2
from google.ads.google_ads.v2.proto.services import group_placement_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import hotel_group_view_service_pb2
from google.ads.google_ads.v2.proto.services import hotel_group_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import hotel_performance_view_service_pb2
from google.ads.google_ads.v2.proto.services import hotel_performance_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import invoice_service_pb2
from google.ads.google_ads.v2.proto.services import invoice_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_ad_group_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_ad_group_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_campaign_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_campaign_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_idea_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_idea_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_keyword_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_keyword_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_negative_keyword_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_negative_keyword_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_plan_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_plan_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import keyword_view_service_pb2
from google.ads.google_ads.v2.proto.services import keyword_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import label_service_pb2
from google.ads.google_ads.v2.proto.services import label_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import landing_page_view_service_pb2
from google.ads.google_ads.v2.proto.services import landing_page_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import language_constant_service_pb2
from google.ads.google_ads.v2.proto.services import language_constant_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import location_view_service_pb2
from google.ads.google_ads.v2.proto.services import location_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import managed_placement_view_service_pb2
from google.ads.google_ads.v2.proto.services import managed_placement_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import media_file_service_pb2
from google.ads.google_ads.v2.proto.services import media_file_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import merchant_center_link_service_pb2
from google.ads.google_ads.v2.proto.services import merchant_center_link_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import mobile_app_category_constant_service_pb2
from google.ads.google_ads.v2.proto.services import mobile_app_category_constant_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import mobile_device_constant_service_pb2
from google.ads.google_ads.v2.proto.services import mobile_device_constant_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import mutate_job_service_pb2
from google.ads.google_ads.v2.proto.services import mutate_job_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import operating_system_version_constant_service_pb2
from google.ads.google_ads.v2.proto.services import operating_system_version_constant_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import paid_organic_search_term_view_service_pb2
from google.ads.google_ads.v2.proto.services import paid_organic_search_term_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import parental_status_view_service_pb2
from google.ads.google_ads.v2.proto.services import parental_status_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import payments_account_service_pb2
from google.ads.google_ads.v2.proto.services import payments_account_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import product_bidding_category_constant_service_pb2
from google.ads.google_ads.v2.proto.services import product_bidding_category_constant_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import product_group_view_service_pb2
from google.ads.google_ads.v2.proto.services import product_group_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import reach_plan_service_pb2
from google.ads.google_ads.v2.proto.services import reach_plan_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import recommendation_service_pb2
from google.ads.google_ads.v2.proto.services import recommendation_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import remarketing_action_service_pb2
from google.ads.google_ads.v2.proto.services import remarketing_action_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import search_term_view_service_pb2
from google.ads.google_ads.v2.proto.services import search_term_view_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import shared_criterion_service_pb2
from google.ads.google_ads.v2.proto.services import shared_criterion_service_pb2_grpc
from google.ads.google_ads.v2.proto.services import shared_set_service_pb2
from google.ads.google_ads.v2.proto.services import shared_set_service_pb2_grpc
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import wrappers_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads',
).version
class SharedSetServiceClient(object):
"""Service to manage shared sets."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v2.services.SharedSetService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SharedSetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def shared_set_path(cls, customer, shared_set):
"""Return a fully-qualified shared_set string."""
return google.api_core.path_template.expand(
'customers/{customer}/sharedSets/{shared_set}',
customer=customer,
shared_set=shared_set,
)
def __init__(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None):
"""Constructor.
Args:
transport (Union[~.SharedSetServiceGrpcTransport,
Callable[[~.Credentials, type], ~.SharedSetServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning, stacklevel=2)
else:
client_config = shared_set_service_client_config.config
if channel:
warnings.warn('The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning, stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=shared_set_service_grpc_transport.SharedSetServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = shared_set_service_grpc_transport.SharedSetServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_shared_set(
self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the requested shared set in full detail.
Args:
resource_name (str): The resource name of the shared set to fetch.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v2.types.SharedSet` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_shared_set' not in self._inner_api_calls:
self._inner_api_calls['get_shared_set'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_shared_set,
default_retry=self._method_configs['GetSharedSet'].retry,
default_timeout=self._method_configs['GetSharedSet'].timeout,
client_info=self._client_info,
)
request = shared_set_service_pb2.GetSharedSetRequest(
resource_name=resource_name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('resource_name', resource_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['get_shared_set'](request, retry=retry, timeout=timeout, metadata=metadata)
def mutate_shared_sets(
self,
customer_id,
operations,
partial_failure=None,
validate_only=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates, updates, or removes shared sets. Operation statuses are returned.
Args:
customer_id (str): The ID of the customer whose shared sets are being modified.
operations (list[Union[dict, ~google.ads.googleads_v2.types.SharedSetOperation]]): The list of operations to perform on individual shared sets.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.ads.googleads_v2.types.SharedSetOperation`
partial_failure (bool): If true, successful operations will be carried out and invalid
operations will return errors. If false, all operations will be carried
out in one transaction if and only if they are all valid.
Default is false.
validate_only (bool): If true, the request is validated but not executed. Only errors are
returned, not results.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v2.types.MutateSharedSetsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'mutate_shared_sets' not in self._inner_api_calls:
self._inner_api_calls['mutate_shared_sets'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.mutate_shared_sets,
default_retry=self._method_configs['MutateSharedSets'].retry,
default_timeout=self._method_configs['MutateSharedSets'].timeout,
client_info=self._client_info,
)
request = shared_set_service_pb2.MutateSharedSetsRequest(
customer_id=customer_id,
operations=operations,
partial_failure=partial_failure,
validate_only=validate_only,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('customer_id', customer_id)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['mutate_shared_sets'](request, retry=retry, timeout=timeout, metadata=metadata)
| 62.557432 | 155 | 0.802398 |
4a26c2d979cb66afd8b6c37b60af9c3d88eb7ab2 | 1,672 | py | Python | srcs/testset.py | huy-ha/PIXOR | 9ad2b451470aa30c7dd40be23b526f6fbedb6bc1 | [
"MIT"
] | 259 | 2019-02-19T07:38:55.000Z | 2022-03-28T05:58:42.000Z | srcs/testset.py | huy-ha/PIXOR | 9ad2b451470aa30c7dd40be23b526f6fbedb6bc1 | [
"MIT"
] | 30 | 2019-02-18T08:43:27.000Z | 2021-02-05T15:51:36.000Z | srcs/testset.py | huy-ha/PIXOR | 9ad2b451470aa30c7dd40be23b526f6fbedb6bc1 | [
"MIT"
] | 59 | 2018-10-28T17:42:03.000Z | 2022-02-15T11:59:32.000Z | import glob
import pandas as pd
import numpy as np
import os.path
from PIL import Image
class TestSet(object):
def __init__(self, basedir, bagname):
self.basedir = basedir
self.bagname = bagname
self.data_path = os.path.join(basedir, bagname)
self.imtype = 'png'
self._get_file_lists()
def _get_file_lists(self):
"""Find and list data files for each sensor."""
self.cam_files = sorted(glob.glob(
os.path.join(self.data_path, 'img', '*.{}'.format(self.imtype))))
self.velo_files = sorted(glob.glob(
os.path.join(self.data_path, 'pc_data', '*.bin')))
self.cam_stamps = pd.read_csv(os.path.join(self.data_path, 'img', 'imgtimestamps.csv'))
self.velo_stamps = pd.read_csv(os.path.join(self.data_path, 'velo', 'velotimestamps.csv'))
def get_cam2(self, idx):
"""Load an image from file."""
mode = "RGB"
return Image.open(self.cam_files[idx]).convert('RGB')
def get_velo(self, idx):
"""Read velodyne [x,y,z,reflectance] scan at the specified index."""
scan = np.fromfile(self.velo_files[idx], dtype=np.float32)
return scan.reshape((-1, 4))
def test():
basedir = '/mnt/ssd2/od/testset'
date = '_2018-10-30-15-25-07'
dataset = TestSet(basedir, date)
print(dataset.velo_files)
im = dataset.get_cam2(0)
print(im.width)
print(im.height)
scan = dataset.get_velo(0)
np.set_printoptions(precision=3)
print(scan.mean(axis=0))
print(scan.max(axis=0))
print(scan.min(axis=0))
if __name__=="__main__":
test()
| 31.54717 | 98 | 0.610048 |
4a26c309499eeb68c48f7e886174b0e5cd38e061 | 28,031 | py | Python | remofile/cli.py | Sonkun/remofile | 918adf1bfb3f21356848834457ec043afe53b6f1 | [
"MIT"
] | 1 | 2018-10-17T14:22:05.000Z | 2018-10-17T14:22:05.000Z | remofile/cli.py | intjelic/remofile | 918adf1bfb3f21356848834457ec043afe53b6f1 | [
"MIT"
] | null | null | null | remofile/cli.py | intjelic/remofile | 918adf1bfb3f21356848834457ec043afe53b6f1 | [
"MIT"
] | 1 | 2019-01-09T19:46:45.000Z | 2019-01-09T19:46:45.000Z | # Remofile - Quick and easy-to-use alternative to FTP
#
# This file is distributed under the MIT License. See the LICENSE file
# in the root of this project for more information.
#
# Written by Jonathan De Wachter <[email protected]>, March 2018
import os
import sys
from datetime import datetime
from pathlib import PurePosixPath, PosixPath
import click
from remofile.server import Server
from remofile.server import FILE_SIZE_LIMIT, MINIMUM_CHUNK_SIZE, MAXIMUM_CHUNK_SIZE
from remofile.client import Client
from remofile.exceptions import *
from remofile.token import generate_token
from remofile.keys import generate_keys
from remofile.daemon import Daemon
DEFAULT_TIMEOUT_VALUE = 3600
MISCONFIGURED_ENVIRONMENT_MESSAGE = """The environment must be \
configured with the following variables in order for Remofile to \
locate and connect to the server.
REMOFILE_HOSTNAME - The ip address to use (can be 'localhost' or a domain name)
REMOFILE_PORT - The port to use (optional, use 6768 by default)
REMOFILE_TOKEN - The token to use during the authentication process
Configure your environment and try again.
"""
TIMEOUT_ERROR_MESSAGE = """Timeout error; couldn't access the server \
within the expected time.
Check if the server is accessible or try to increase the timeout value \
for slow connection.
"""
GENERATED_TOKEN_MESSAGE = """The generated token is {0}, keep it \
secret because it acts as a password!
A token was generated because no token was specified in command-line \
parameters.
"""
INVALID_ROOT_DIRECTORY_MESSAGE = """The root directory isn't valid; \
either it doesn't exist or the server doesn't have the permissions to \
read it.
The root directory is the directory that is served across the network, \
therefore it must exist and be accessible.
"""
INCORRECT_CONFIG_VALUE_MESSAGE = """One of the following values is \
incorrect.
* file size limit
* minimum chunk size
* maximum chunk size
Read the documentation to understand their possible value.
"""
LIST_ALL_FLAG_DESCRIPTION = "Display additional file information."
LIST_RECURSIVE_FLAG_DESCRIPTION = "List directories and their contents recursively."
FILE_UPDATE_FLAG_DESCRIPTION = "Ignore (and don't fail) if files already exist."
FOLDER_UPDATE_FLAG_DESCRIPTION = "Ignore (and don't fail) if directories already exist."
UPLOAD_RECURSIVE_FLAG_DESCRIPTION = "Upload directories and their content recursively."
DOWNLOAD_RECURSIVE_FLAG_DESCRIPTION = "Download directories and their content recursively."
PROGRESS_FLAG_DESCRIPTION = "Display a progress indicator."
TIMEOUT_FLAG_DESCRIPTION = "Adjust the timeout value in milliseconds."
FILE_SIZE_LIMIT_FLAG_DESCRIPTION = "Foobar"
MIN_CHUNK_SIZE_FLAG_DESCRIPTION = "Foobar"
MAX_CHUNK_SIZE_FLAG_DESCRIPTION = "Foobar"
PIDFILE_FLAG_DESCRIPTION = "Foobar"
def get_info_from_environment():
hostname = os.environ.get("REMOFILE_HOSTNAME")
port = os.environ.get("REMOFILE_PORT", 6768)
token = os.environ.get("REMOFILE_TOKEN")
if hostname == 'localhost':
hostname = '127.0.0.1'
return hostname, port, token
def create_client():
# read environment variable for server information and print
# misconfigured environment error if some variables are missing
hostname, port, token = get_info_from_environment()
if not hostname or not port or not token:
print(MISCONFIGURED_ENVIRONMENT_MESSAGE)
exit(1)
return Client(hostname, port, token)
def adjust_timeout(timeout):
# if not specified, adjust timeout to the default global timeout
if not timeout:
timeout = DEFAULT_TIMEOUT_VALUE
return timeout
def display_generated_token(token):
print(GENERATED_TOKEN_MESSAGE.format(token))
@click.group()
def cli():
pass
@cli.command('list')
@click.argument('directory', default='/')
@click.option('--all', '-a', 'with_metadata', is_flag=True, help=LIST_ALL_FLAG_DESCRIPTION)
@click.option('--recursive', '-r', is_flag=True, help=LIST_RECURSIVE_FLAG_DESCRIPTION)
@click.option('--timeout', '-t', type=click.INT, help=TIMEOUT_FLAG_DESCRIPTION)
def list_files(directory, with_metadata, recursive, timeout):
""" List files in the remote directory.
This is a client-related command that lists files of a given
directory located in the remote directory. This command is akin to
the POSIX **ls** command found in Unix-like OSes.
It takes only one **optional** parameter which is the remote
directory to list files for, and must be an absolute path of an
**existing** directory. By default, it lists the root directory.
By default, it only displays file names and doesn't list the
directory recursively. If the **-l** flag is set, it also lists the
file metadata (file or directory indicator, file size and last
modification time), and if the **-r** flag is set, the
sub-directories are listed as well.
Additionally, the **--timeout** flag allows you to adjust the number
of milliseconds to wait before giving up on the server response.
"""
client = create_client()
timeout = adjust_timeout(timeout)
def display_directory_files(root, directory, with_metadata, recursive):
try:
files = client.list_files(os.path.join(root, directory), timeout)
except ValueError:
print("Unable to list files for '{0}' directory; it must be an absolute path.".format(directory))
exit(1)
except NotADirectoryError:
print("Cannot access '{0}' directory; no such directory exists.".format(directory))
exit(1)
except TimeoutError:
print(TIMEOUT_ERROR_MESSAGE)
exit(1)
subdirectories = []
if not with_metadata:
for name, (is_directory, _, _) in files.items():
print(os.path.join(directory, name))
if is_directory and recursive:
subdirectories.append(os.path.join(directory, name))
else:
# it requires double pass to compute columns width
file_size_column_width = 0
file_time_column_width = 0
file_lines = []
for name, (is_directory, file_size, file_time) in files.items():
file_time = datetime.fromtimestamp(file_time)
file_time_string = file_time.strftime('%Y-%m-%d %H:%M:%S')
if not is_directory:
file_lines.append((os.path.join(directory, name), '[F]', str(file_size), file_time_string))
else:
file_lines.append((os.path.join(directory, name), '[D]', str(file_size), file_time_string))
file_size_column_width = max(file_size_column_width, len(str(file_size)))
file_time_column_width = max(file_time_column_width, len(file_time_string))
if is_directory and recursive:
subdirectories.append(os.path.join(directory, name))
# add padding to column width
file_size_column_width += 2
for name, file_type, file_size, file_time in file_lines:
print('{0} {1} {2} {3}'.format(file_type, file_size.ljust(file_size_column_width), file_time.ljust(file_time_column_width), name))
for subdirectory in subdirectories:
display_directory_files(root, subdirectory, with_metadata, recursive)
display_directory_files(directory, '', with_metadata, recursive)
del client # debug code, for some reason the socket wown't be disconnected
@cli.command('file')
@click.argument('name')
@click.argument('directory', default='/')
@click.option('--update', '-u', is_flag=True, help=FILE_UPDATE_FLAG_DESCRIPTION)
@click.option('--timeout', '-t', type=click.INT, help=TIMEOUT_FLAG_DESCRIPTION)
def create_file(name, directory, update, timeout):
""" Create a file in the remote directory.
This is a client-related command that creates an empty file in the
a given directory located in the remote directory. This command is
akin to the POSIX **touch** command found in Unix-like OSes.
It takes the name of the file and an optional remote directory (in
which to create the file) in parameters. The directory parameter
must be an absolute path of an **existing** directory. By default,
it creates the file in the root directory.
If the file already exists in the given directory, the command fails
unless the **--update** flag is set. Note that unlike the `touch`
command, it doesn't update the file timestamp.
Additionally, the **--timeout** flag allows you to adjust the number
of milliseconds to wait before giving up on the server response.
"""
client = create_client()
timeout = adjust_timeout(timeout)
try:
client.create_file(name, directory, timeout)
except ValueError:
print("Unable to create file in '{0}' directory; it must be an absolute path.".format(directory))
exit(1)
except FileNameError:
print("Unable to create file with name '{0}'; it must be a valid file name.".format(name))
exit(1)
except NotADirectoryError:
print("Cannot access '{0}' directory; no such directory exists.".format(directory))
exit(1)
except FileExistsError:
if not update:
print("Unable to create file with name '{0}'; it's conflicting with an existing file.".format(name))
exit(1)
except TimeoutError:
print(TIMEOUT_ERROR_MESSAGE)
exit(1)
if directory == '/':
print("File '{0}' successfuly created in root directory.".format(name))
else:
print("File '{0}' successfuly created in '{1}' directory.".format(name, directory))
@cli.command('folder')
@click.argument('name')
@click.argument('directory', default='/')
@click.option('--update', '-u', is_flag=True, help=FOLDER_UPDATE_FLAG_DESCRIPTION)
@click.option('--timeout', '-t', type=click.INT, help=TIMEOUT_FLAG_DESCRIPTION)
def make_directory(name, directory, update, timeout):
""" Create a folder in the remote directory.
This is a client-related command that creates an empty folder in the
a given directory located in the remote directory. This command is
akin to the POSIX **mkdir** command found in Unix-like OSes.
It takes the name of the folder and an optional remote directory (in
which to create the folder) in parameters. The directory parameter
must be an absolute path of an **existing** directory. By default,
it creates the folder in the root directory.
If the folder already exists in the given directory, the command
fails unless the **--update** flag is set. Note that it leaves the
existing directory unchanged.
Additionally, the **--timeout** flag allows you to adjust the number
of milliseconds to wait before giving up on the server response.
"""
client = create_client()
timeout = adjust_timeout(timeout)
try:
client.make_directory(name, directory, timeout)
except ValueError:
print("Unable to create folder in '{0}' directory; it must be an absolute path.".format(directory))
exit(1)
except FileNameError:
print("Unable to create folder with name '{0}'; it must be a valid file name.".format(name))
exit(1)
except NotADirectoryError:
print("Cannot access '{0}' directory; no such directory exists.".format(directory))
exit(1)
except FileExistsError:
if not update:
print("Unable to create folder with name '{0}'; it's conflicting with an existing file.".format(name))
exit(1)
except TimeoutError:
print(TIMEOUT_ERROR_MESSAGE)
exit(1)
if directory == '/':
print("Folder '{0}' successfuly created in root directory.".format(name))
else:
print("Folder '{0}' successfuly created in '{1}' directory.".format(name, directory))
#@click.option('--update', '-u')
##-u, --update copy only when the SOURCE file is newer than the destination file or when the destination file is missing
#@click.option('--resume')
#@click.option('--min-size', help="don't transfer any file smaller than SIZE")
#@click.option('--max-size', help="don't transfer any file larger than SIZE")
#--list-only list the files instead of copying them
#--exclude=PATTERN exclude files matching PATTERN
#--exclude-from=FILE read exclude patterns from FILE
#--include=PATTERN don't exclude files matching PATTERN
#--include-from=FILE read include patterns from FILE
#@click.option('--out-format', help="output updates using the specified FORMAT")
#@click.option('--log-file', help="log what we're doing to the specified FILE")
#@click.option('--log-file-format', help="log updates using the specified FMT")
#@click.option('--chunk-size', default=512, type=click.INT)
@cli.command('upload')
@click.argument('source', nargs=-1)
@click.argument('destination', nargs=1)
@click.option('--recursive', '-r', is_flag=True, help=UPLOAD_RECURSIVE_FLAG_DESCRIPTION)
@click.option('--progress', '-p', is_flag=True, help=PROGRESS_FLAG_DESCRIPTION)
@click.option('--timeout', '-t', type=click.INT, help=TIMEOUT_FLAG_DESCRIPTION)
def upload_files(source, destination, recursive, progress, timeout):
""" Upload files to the remote directory.
This is a client-related command that uploads files to the remote
directory. The source must be files or directories located on the
local filesystem and the destination must be an **existing**
directory located in the remote directory. Unlike the source, the
destination must be an absolute path.
If source refers to one or more directories, the recursive flag must
be set otherwise they'll be skipped. The progress flag allows to
display the progression of the transfer which is useful for large
files.
Examples. ::
rmf upload -r -p src/my-file.txt src/my-directory/ /dst
Additionally, the **--timeout** flag allows you to adjust the number
of milliseconds to wait before giving up on the server response.
"""
client = create_client()
timeout = adjust_timeout(timeout)
# ensure we work with pure posix paths
source = (PosixPath(path) for path in source)
destination = PosixPath(destination)
def display_progress(chunk_data, remaining_bytes, file_size, file_name):
chunk_size = 512
progress = (file_size - (remaining_bytes - len(chunk_data))) / file_size * 100
sys.stdout.write("\r{0:0.2f}% | {1}".format(progress, file_name))
sys.stdout.flush()
if remaining_bytes <= chunk_size:
sys.stdout.write('\n')
return True
def upload_file(path):
try:
if progress:
client.upload_file(path, destination, None, 512, display_progress, None)
else:
client.upload_file(path, destination, timeout=timeout)
except ValueError:
print("Unable to upload files to '{0}'; destination must be an absolute path.".format(destination))
exit(1)
except SourceNotFound:
print("Unable to upload file '{0}'; no such file exists.".format(path))
exit(1)
except DestinationNotFound:
print("Unable to upload files to '{0}'; no such directory exists.".format(destination))
exit(1)
except FileExistsError:
print("Unable to upload file '{0}'; it's conflicting with an existing file.".format(path))
exit(1)
except FileNameError:
raise NotImplementedError
except TimeoutError:
print(TIMEOUT_ERROR_MESSAGE)
exit(1)
def upload_directory(path):
try:
if progress:
client.upload_directory(path, destination, None, 512, display_progress, None)
else:
client.upload_directory(path, destination, timeout=timeout)
except ValueError:
print("Unable to upload files to '{0}'; destination must be an absolute path.".format(destination))
exit(1)
except SourceNotFound:
print("Unable to upload folder '{0}'; no such directory exists.".format(path))
exit(1)
except DestinationNotFound:
print("Unable to upload files to '{0}'; no such directory exists.".format(destination))
exit(1)
except FileExistsError:
print("Unable to upload folder '{0}'; it's conflicting with an existing file.".format(path))
exit(1)
except FileNameError:
raise NotImplementedError
except TimeoutError:
print(TIMEOUT_ERROR_MESSAGE)
exit(1)
for path in source:
if not path.exists():
print("Unable to upload file '{0}'; no such file or directory exists.".format(path))
exit(1)
if path.is_file():
upload_file(path)
elif path.is_dir():
if recursive:
upload_directory(path)
else:
print("Skip uploading folder '{0}'; the recursive flag must be set.".format(path))
else:
raise NotImplementedError("Uploading symbolic links isn't supported yet.")
del client # debug code, for some reason the socket wown't be disconnected
@cli.command('download')
@click.argument('source', nargs=-1)
@click.argument('destination', nargs=1)
@click.option('--recursive', '-r', is_flag=True, help=DOWNLOAD_RECURSIVE_FLAG_DESCRIPTION)
@click.option('--progress', '-p', is_flag=True, help=PROGRESS_FLAG_DESCRIPTION)
@click.option('--timeout', '-t', type=click.INT, help=TIMEOUT_FLAG_DESCRIPTION)
def download_files(source, destination, recursive, progress, timeout):
""" Download files from the remote directory.
This is a client-related command that downloads files from the
remote directory. The source must be files or directories located on
the remote directory and the destination must be an **existing**
directory located on the local filesystem. Unlike the destination,
the source must be absolute paths.
If source refers to one or more directories, the recursive flag must
be set otherwise they'll be skipped. The progress flag allows to
display the progression of the transfer which is useful for large
files.
Examples. ::
rmf download -r -p /src/my-file.txt /src/my-directory/ dst/
Additionally, the **--timeout** flag allows you to adjust the number
of milliseconds to wait before giving up on the server response.
"""
client = create_client()
timeout = adjust_timeout(timeout)
# ensure we work with pure posix paths
source = (PurePosixPath(path) for path in source)
destination = PosixPath(destination)
def display_progress(chunk_data, remaining_bytes, file_size, file_name):
chunk_size = 512
progress = (file_size - (remaining_bytes - len(chunk_data))) / file_size * 100
sys.stdout.write("\r{0:0.2f}% | {1}".format(progress, file_name))
sys.stdout.flush()
if remaining_bytes <= chunk_size:
sys.stdout.write('\n')
return True
def download_file(path):
try:
if progress:
client.download_file(path, destination, None, 512, display_progress, None)
else:
client.download_file(path, destination, timeout=timeout)
except Exception as error:
print(error)
exit(1)
#except ValueError:
#print("Unable to upload files to '{0}'; destination must be an absolute path.".format(destination))
#exit(1)
#except SourceNotFound:
#print("Unable to upload file '{0}'; no such file exists.".format(path))
#exit(1)
#except DestinationNotFound:
#print("Unable to upload files to '{0}'; no such directory exists.".format(destination))
#exit(1)
#except FileExistsError:
#print("Unable to upload file '{0}'; it's conflicting with an existing file.".format(path))
#exit(1)
#except FileNameError:
#raise NotImplementedError
#except TimeoutError:
#print(TIMEOUT_ERROR_MESSAGE)
#exit(1)
def download_directory(path):
try:
if progress:
client.download_directory(path, destination, None, 512, display_progress, None)
else:
client.download_directory(path, destination, timeout=timeout)
except Exception as error:
print(error)
exit(1)
#except ValueError:
#print("Unable to upload files to '{0}'; destination must be an absolute path.".format(destination))
#exit(1)
#except SourceNotFound:
#print("Unable to upload folder '{0}'; no such directory exists.".format(path))
#exit(1)
#except DestinationNotFound:
#print("Unable to upload files to '{0}'; no such directory exists.".format(destination))
#exit(1)
#except FileExistsError:
#print("Unable to upload folder '{0}'; it's conflicting with an existing file.".format(path))
#exit(1)
#except FileNameError:
#raise NotImplementedError
#except TimeoutError:
#print(TIMEOUT_ERROR_MESSAGE)
#exit(1)
for path in source:
is_directory = True
if path != '/':
files = client.list_files(path.parent)
if path.name not in files:
print("Unable to download file '{0}'; no such file or directory exists.".format(path))
exit(1)
is_directory = files[path.name][0]
if not is_directory:
download_file(path)
else:
if recursive:
download_directory(path)
else:
print("Skip downloading folder '{0}'; the recursive flag must be set.".format(path))
del client # debug code, for some reason the socket won't be disconnected
@cli.command('remove')
@click.argument('name')
@click.argument('directory', default='/')
@click.option('--timeout', '-t', type=click.INT, help=TIMEOUT_FLAG_DESCRIPTION)
def remove_files(name, directory, timeout):
""" Remove files in the remote directory.
This is a client-related command that removes a file or a folder
located in the remote directory. This command is akin to the POSIX
**rm** command found in Unix-like OSes.
Rest of the description here.
"""
client = create_client()
timeout = adjust_timeout(timeout)
pass
@cli.command('run')
@click.argument('directory')
@click.argument('port', default=6768)
@click.argument('token', required=False)
@click.option('--file-size-limit', default=FILE_SIZE_LIMIT, type=click.INT, help=FILE_SIZE_LIMIT_FLAG_DESCRIPTION)
@click.option('--min-chunk-size', default=MINIMUM_CHUNK_SIZE, type=click.INT, help=MIN_CHUNK_SIZE_FLAG_DESCRIPTION)
@click.option('--max-chunk-size', default=MAXIMUM_CHUNK_SIZE, type=click.INT, help=MAX_CHUNK_SIZE_FLAG_DESCRIPTION)
def run_server(directory, port, token, file_size_limit, min_chunk_size, max_chunk_size):
""" Start a (non-daemonized) server.
This is a server-related command that start a non-daemonized server
(not detached from the shell). The directory parameter is the root
directory which will be served and therefore must be an existing
directory. The server listens on port 6768 by default but it can be
changed with the port parameter. If the token is not specified, it's
generated and printed out to the console before the server starts
running.
Additionally, the file size limit and the chunk size range can be
altered. The file size limit and minimum chunk size must be both be
greater than 0, and maximum chunk size must be greater or equal to
minimum chunk size.
"""
if not token:
token = generate_token()
display_generated_token(token)
try:
server = Server(directory, token,
file_size_limit=file_size_limit,
chunk_size_range=(min_chunk_size, max_chunk_size))
except NotADirectoryError:
print(INVALID_ROOT_DIRECTORY_MESSAGE)
exit(1)
except ValueError:
print(INCORRECT_CONFIG_VALUE_MESSAGE)
exit(1)
server.run(port)
@cli.command('start')
@click.argument('directory')
@click.argument('port', default=6768)
@click.argument('token', required=False)
@click.option('--pidfile', default=os.path.join(os.getcwd(), 'daemon.pid'), help=PIDFILE_FLAG_DESCRIPTION)
@click.option('--file-size-limit', default=FILE_SIZE_LIMIT, type=click.INT, help=FILE_SIZE_LIMIT_FLAG_DESCRIPTION)
@click.option('--min-chunk-size', default=MINIMUM_CHUNK_SIZE, type=click.INT, help=MIN_CHUNK_SIZE_FLAG_DESCRIPTION)
@click.option('--max-chunk-size', default=MAXIMUM_CHUNK_SIZE, type=click.INT, help=MAX_CHUNK_SIZE_FLAG_DESCRIPTION)
def start_server(directory, port, token, pidfile, file_size_limit, min_chunk_size, max_chunk_size):
""" Start a daemonized server.
This is a server-related command that start a daemonized server
(detached from the shell). Unlike the run command, it accepts the
--pidfile flag which tells the pidfile location. By default, the
pidfile is created in the current working directory and named
'daemon.pid'.
Refer to the run command for more information.
"""
if not token:
token = generate_token()
display_generated_token(token)
try:
server = Server(directory, token, file_size_limit=file_size_limit, chunk_size_range=(min_chunk_size, max_chunk_size))
except NotADirectoryError:
print(INVALID_ROOT_DIRECTORY_MESSAGE)
exit(1)
except ValueError:
print(INCORRECT_CONFIG_VALUE_MESSAGE)
exit(1)
def loop():
server.run(port)
daemon = Daemon(loop, pidfile)
daemon.start()
@cli.command('stop')
@click.option('--pidfile', default=os.path.join(os.getcwd(), 'daemon.pid'), help=PIDFILE_FLAG_DESCRIPTION)
def stop_server(pidfile):
""" Stop a daemonized server.
This is a server-related command that stop a daemonized server from
its pidfile. By default, it expects the pidfile in the current
working directory with the name 'daemon.pid' but it can be altered
with the --pidfile flag.
"""
Daemon.stop(pidfile)
@cli.command('generate-token')
def generate_token():
""" Generate a token.
This is an utility command that generates a valid token needed to
configure both the client and the server.
Note that by default, the server will generate a token if none was
explicitly set.
"""
from remofile.token import generate_token
print(generate_token())
@cli.command('generate-keys')
def generate_keys():
""" Generate a pair of keys.
This is an utility command that generates a valid pair of keys to
encrypt communication with clients.
The first key is a public key that must be shared across clients
connecting to the Remofile server and the second key is the private
key that must be kept secret. Both :py:class:`Client` and
:py:class:`Server` instances must be configured with their
respective keys.
"""
from remofile.keys import generate_keys
public_key, private_key = generate_keys()
print("public key: {0}".format(str(public_key, 'utf-8')))
print("private key: {0}".format(str(private_key, 'utf-8')))
cli.add_command(list_files)
cli.add_command(create_file)
cli.add_command(make_directory)
cli.add_command(upload_files)
cli.add_command(download_files)
cli.add_command(remove_files)
cli.add_command(run_server)
cli.add_command(start_server)
cli.add_command(stop_server)
cli.add_command(generate_token)
cli.add_command(generate_keys)
| 39.204196 | 146 | 0.680711 |
4a26c31eece135515456c19395f35833176ce67d | 1,004 | py | Python | userbot/modules/direct_message.py | vckyou/XBot-Remix | ec19357fe7de1ce5f8ad7a7c3cc5fc37553dd955 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/direct_message.py | vckyou/XBot-Remix | ec19357fe7de1ce5f8ad7a7c3cc5fc37553dd955 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/direct_message.py | vckyou/XBot-Remix | ec19357fe7de1ce5f8ad7a7c3cc5fc37553dd955 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | from userbot.events import register
from userbot import CMD_HELP
# Ported By @VckyouuBitch From GeezProject
# Devoted To GeezProject
@register(outgoing=True, pattern=r"^\.(?:dm)\s?(.*)?")
async def remoteaccess(event):
p = event.pattern_match.group(1)
m = p.split(" ")
chat_id = m[0]
try:
chat_id = int(chat_id)
except BaseException:
pass
msg = ""
mssg = await event.get_reply_message()
if event.reply_to_msg_id:
await event.client.send_message(chat_id, mssg)
await event.edit("`Success Mengirim Pesan Anda.`")
for i in m[1:]:
msg += i + " "
if msg == "":
return
try:
await event.client.send_message(chat_id, msg)
await event.edit("`Success Mengirim Pesan Anda.`")
except BaseException:
await event.edit("**Terjadi Error. Gagal Mengirim Pesan.**")
CMD_HELP.update(
{
"message": "`.dm`\
\nMengirim Pesan Dengan Jarak Jauh Dengan .dm <username> <pesan>."
})
| 24.487805 | 70 | 0.619522 |
4a26c3bb8c9fc0048af75e918c7aa4ce31ca0d37 | 3,452 | py | Python | examples/xgb_master.py | rmill040/ptuner | bf76ca75ad79a0def7965dd29f3e44056b1d8f09 | [
"MIT"
] | 1 | 2020-06-24T16:04:44.000Z | 2020-06-24T16:04:44.000Z | examples/xgb_master.py | rmill040/ptuner | bf76ca75ad79a0def7965dd29f3e44056b1d8f09 | [
"MIT"
] | null | null | null | examples/xgb_master.py | rmill040/ptuner | bf76ca75ad79a0def7965dd29f3e44056b1d8f09 | [
"MIT"
] | null | null | null | import numpy as np
import os
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
# Custom imports
from ptuner import ParallelPipelineTuner, STATUS_FAIL, STATUS_OK
from ptuner.spaces import NaiveFeatureSampler, SpaceSampler, XGBClassifierSampler
DATA_DIR = os.path.dirname(os.path.abspath(__file__))
SEED = 1718
def make_data(N, p):
"""Generate toy data."""
X = np.random.normal(0, 1, (N, p))
y = 2 + .59*X[:, 0] + .39*X[:, 1] + .39*X[:, 2]
y = 1 / (1 + np.exp(-y))
y = np.random.binomial(1, y, N)
np.savetxt(os.path.join(DATA_DIR, 'toy_xgb.csv'), np.column_stack([X, y]), delimiter=',')
if __name__ == "__main__":
np.random.seed(SEED)
#############
# Make data #
#############
N, p = 500, 100
make_data(N, p)
# Load into memory and split into train/test
df = np.loadtxt(os.path.join(DATA_DIR, 'toy_xgb.csv'), delimiter=',')
X, y = df[:, :-1], df[:, -1]
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, test_size=.33, random_state=SEED
)
##################
# Define sampler #
##################
sampler = SpaceSampler()
sampler.add_feature_sampler(
name='features',
sampler=NaiveFeatureSampler(p=p)
)
sampler.add_hyperparameter_sampler(
name='xgboost',
sampler=XGBClassifierSampler(early_stopping=True)
)
#############################
# Define objective function #
#############################
def objective(params, current_round):
"""Objective function to be optimized"""
# Extra params to best updated on different rounds
fit_params = {
'eval_set' : None,
'eval_metric' : 'auc',
'early_stopping_rounds' : None,
'verbose' : False
}
early_stopping_rounds = [10, 50, 100, 250, 500]
try:
# Unpack parameters
features = params['features']
hps = params['xgboost']
# Update fit params
fit_params['eval_set'] = [(X_test[:, features], y_test)]
fit_params['early_stopping_rounds'] = early_stopping_rounds[current_round]
# Train/evaluate model
clf = XGBClassifier(**hps).fit(
X_train[:, features],
y_train,
**fit_params
)
# Update n_estimators because of early stopping
params['xgboost']['n_estimators'] = clf.best_iteration+1
# Return information, at least containing metric
return {
'status' : STATUS_OK,
'message' : None,
'metric' : clf.best_score
}
except Exception as e:
return {
'status' : STATUS_FAIL,
'message' : e,
'metric' : 0.0
}
################
# Define tuner #
################
tuner = ParallelPipelineTuner(
db_host='localhost',
db_port=27017,
lower_is_better=False,
experiment_name='ptuner',
role='master',
n_jobs=-1,
backend='threading'
)
tuner.search(
objective=objective,
sampler=sampler,
max_configs_per_round=[500, 400, 300, 200, 100]
) | 27.616 | 93 | 0.519699 |
4a26c432d3ca46baaa9603e334480ab69d6a58df | 8,128 | py | Python | .waf.rej/.waf-1.6.11-30618c54883417962c38f5d395f83584/waflib/Tools/tex.py | acstanton515/Tree-Based-Routing | 90742ec5908061e77b473fd2feb6733ad6592da9 | [
"MIT"
] | null | null | null | .waf.rej/.waf-1.6.11-30618c54883417962c38f5d395f83584/waflib/Tools/tex.py | acstanton515/Tree-Based-Routing | 90742ec5908061e77b473fd2feb6733ad6592da9 | [
"MIT"
] | null | null | null | .waf.rej/.waf-1.6.11-30618c54883417962c38f5d395f83584/waflib/Tools/tex.py | acstanton515/Tree-Based-Routing | 90742ec5908061e77b473fd2feb6733ad6592da9 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re
from waflib import Utils,Task,Errors
from waflib.TaskGen import feature,before_method
from waflib.Logs import error,warn,debug
re_bibunit=re.compile(r'\\(?P<type>putbib)\[(?P<file>[^\[\]]*)\]',re.M)
def bibunitscan(self):
node=self.inputs[0]
nodes=[]
if not node:return nodes
code=Utils.readf(node.abspath())
for match in re_bibunit.finditer(code):
path=match.group('file')
if path:
for k in['','.bib']:
debug('tex: trying %s%s'%(path,k))
fi=node.parent.find_resource(path+k)
if fi:
nodes.append(fi)
else:
debug('tex: could not find %s'%path)
debug("tex: found the following bibunit files: %s"%nodes)
return nodes
exts_deps_tex=['','.ltx','.tex','.bib','.pdf','.png','.eps','.ps']
exts_tex=['.ltx','.tex']
re_tex=re.compile(r'\\(?P<type>include|bibliography|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P<file>[^{}]*)}',re.M)
g_bibtex_re=re.compile('bibdata',re.M)
class tex(Task.Task):
bibtex_fun,_=Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}',shell=False)
bibtex_fun.__doc__="""
Execute the program **bibtex**
"""
makeindex_fun,_=Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}',shell=False)
makeindex_fun.__doc__="""
Execute the program **makeindex**
"""
def scan_aux(self,node):
nodes=[node]
re_aux=re.compile(r'\\@input{(?P<file>[^{}]*)}',re.M)
def parse_node(node):
code=node.read()
for match in re_aux.finditer(code):
path=match.group('file')
found=node.parent.find_or_declare(path)
if found and found not in nodes:
debug('tex: found aux node '+found.abspath())
nodes.append(found)
parse_node(found)
parse_node(node)
return nodes
def scan(self):
node=self.inputs[0]
nodes=[]
names=[]
seen=[]
if not node:return(nodes,names)
def parse_node(node):
if node in seen:
return
seen.append(node)
code=node.read()
global re_tex
for match in re_tex.finditer(code):
for path in match.group('file').split(','):
if path:
add_name=True
found=None
for k in exts_deps_tex:
debug('tex: trying %s%s'%(path,k))
found=node.parent.find_resource(path+k)
if found and not found in self.outputs:
nodes.append(found)
add_name=False
for ext in exts_tex:
if found.name.endswith(ext):
parse_node(found)
break
if add_name:
names.append(path)
parse_node(node)
for x in nodes:
x.parent.get_bld().mkdir()
debug("tex: found the following : %s and names %s"%(nodes,names))
return(nodes,names)
def check_status(self,msg,retcode):
if retcode!=0:
raise Errors.WafError("%r command exit status %r"%(msg,retcode))
def bibfile(self):
need_bibtex=False
try:
for aux_node in self.aux_nodes:
ct=aux_node.read()
if g_bibtex_re.findall(ct):
need_bibtex=True
break
except(OSError,IOError):
error('error bibtex scan')
else:
if need_bibtex:
warn('calling bibtex')
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS':self.TEXINPUTS,'BSTINPUTS':self.TEXINPUTS})
self.env.SRCFILE=self.aux_nodes[0].name[:-4]
self.check_status('error when calling bibtex',self.bibtex_fun())
def bibunits(self):
try:
bibunits=bibunitscan(self)
except FSError:
error('error bibunitscan')
else:
if bibunits:
fn=['bu'+str(i)for i in xrange(1,len(bibunits)+1)]
if fn:
warn('calling bibtex on bibunits')
for f in fn:
self.env.env={'BIBINPUTS':self.TEXINPUTS,'BSTINPUTS':self.TEXINPUTS}
self.env.SRCFILE=f
self.check_status('error when calling bibtex',self.bibtex_fun())
def makeindex(self):
try:
idx_path=self.idx_node.abspath()
os.stat(idx_path)
except OSError:
warn('index file %s absent, not calling makeindex'%idx_path)
else:
warn('calling makeindex')
self.env.SRCFILE=self.idx_node.name
self.env.env={}
self.check_status('error when calling makeindex %s'%idx_path,self.makeindex_fun())
def run(self):
env=self.env
if not env['PROMPT_LATEX']:
env.append_value('LATEXFLAGS','-interaction=batchmode')
env.append_value('PDFLATEXFLAGS','-interaction=batchmode')
env.append_value('XELATEXFLAGS','-interaction=batchmode')
fun=self.texfun
node=self.inputs[0]
srcfile=node.abspath()
texinputs=self.env.TEXINPUTS or''
self.TEXINPUTS=node.parent.get_bld().abspath()+os.pathsep+node.parent.get_src().abspath()+os.pathsep+texinputs+os.pathsep
self.aux_node=node.change_ext('.aux')
self.cwd=self.inputs[0].parent.get_bld().abspath()
warn('first pass on %s'%self.__class__.__name__)
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS':self.TEXINPUTS})
self.env.SRCFILE=srcfile
self.check_status('error when calling latex',fun())
self.aux_nodes=self.scan_aux(node.change_ext('.aux'))
self.idx_node=node.change_ext('.idx')
self.bibfile()
self.bibunits()
self.makeindex()
hash=''
for i in range(10):
prev_hash=hash
try:
hashes=[Utils.h_file(x.abspath())for x in self.aux_nodes]
hash=Utils.h_list(hashes)
except(OSError,IOError):
error('could not read aux.h')
pass
if hash and hash==prev_hash:
break
warn('calling %s'%self.__class__.__name__)
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS':self.TEXINPUTS})
self.env.SRCFILE=srcfile
self.check_status('error when calling %s'%self.__class__.__name__,fun())
class latex(tex):
texfun,vars=Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}',shell=False)
class pdflatex(tex):
texfun,vars=Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}',shell=False)
class xelatex(tex):
texfun,vars=Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}',shell=False)
class dvips(Task.Task):
run_str='${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
class dvipdf(Task.Task):
run_str='${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
class pdf2ps(Task.Task):
run_str='${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
def apply_tex(self):
if not getattr(self,'type',None)in['latex','pdflatex','xelatex']:
self.type='pdflatex'
tree=self.bld
outs=Utils.to_list(getattr(self,'outs',[]))
self.env['PROMPT_LATEX']=getattr(self,'prompt',1)
deps_lst=[]
if getattr(self,'deps',None):
deps=self.to_list(self.deps)
for filename in deps:
n=self.path.find_resource(filename)
if not n in deps_lst:deps_lst.append(n)
for node in self.to_nodes(self.source):
if self.type=='latex':
task=self.create_task('latex',node,node.change_ext('.dvi'))
elif self.type=='pdflatex':
task=self.create_task('pdflatex',node,node.change_ext('.pdf'))
elif self.type=='xelatex':
task=self.create_task('xelatex',node,node.change_ext('.pdf'))
task.env=self.env
if deps_lst:
try:
lst=tree.node_deps[task.uid()]
for n in deps_lst:
if not n in lst:
lst.append(n)
except KeyError:
tree.node_deps[task.uid()]=deps_lst
if self.type=='latex':
if'ps'in outs:
tsk=self.create_task('dvips',task.outputs,node.change_ext('.ps'))
tsk.env.env={'TEXINPUTS':node.parent.abspath()+os.pathsep+self.path.abspath()+os.pathsep+self.path.get_bld().abspath()}
if'pdf'in outs:
tsk=self.create_task('dvipdf',task.outputs,node.change_ext('.pdf'))
tsk.env.env={'TEXINPUTS':node.parent.abspath()+os.pathsep+self.path.abspath()+os.pathsep+self.path.get_bld().abspath()}
elif self.type=='pdflatex':
if'ps'in outs:
self.create_task('pdf2ps',task.outputs,node.change_ext('.ps'))
self.source=[]
def configure(self):
v=self.env
for p in'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps'.split():
try:
self.find_program(p,var=p.upper())
except self.errors.ConfigurationError:
pass
v['DVIPSFLAGS']='-Ppdf'
feature('tex')(apply_tex)
before_method('process_source')(apply_tex) | 33.586777 | 152 | 0.687131 |
4a26c4e4466394f0d221797f83ca00d6ca6ae12e | 2,608 | py | Python | talentmap_api/fsbid/tests/test_fsbid_bidlist.py | burgwyn/State-TalentMAP-API | 1f4f3659c5743ebfd558cd87af381f5460f284b3 | [
"CC0-1.0"
] | 5 | 2018-08-09T18:51:12.000Z | 2021-11-08T10:28:17.000Z | talentmap_api/fsbid/tests/test_fsbid_bidlist.py | burgwyn/State-TalentMAP-API | 1f4f3659c5743ebfd558cd87af381f5460f284b3 | [
"CC0-1.0"
] | 13 | 2019-01-11T18:19:33.000Z | 2021-03-22T17:15:17.000Z | talentmap_api/fsbid/tests/test_fsbid_bidlist.py | burgwyn/State-TalentMAP-API | 1f4f3659c5743ebfd558cd87af381f5460f284b3 | [
"CC0-1.0"
] | 4 | 2018-06-13T14:49:27.000Z | 2021-06-30T22:29:15.000Z | import pytest
import datetime
from dateutil.relativedelta import relativedelta
from model_mommy import mommy
from unittest.mock import Mock, patch
from rest_framework import status
from django.utils import timezone
import talentmap_api.fsbid.services as services
bid = {
"submittedDate": "2019/01/01",
"statusCode": "A",
"handshakeCode": "N",
"cycle": {
"description" : "",
"status": "A",
},
"employee": {
"perdet_seq_num" : "1"
},
"cyclePosition": {
"cp_id": 1,
"status": "A",
"pos_seq_num": "1",
"totalBidders": 0,
"atGradeBidders": 0,
"inConeBidders": 0,
"inBothBidders": 0,
}
}
@pytest.fixture
def test_bidder_fixture(authorized_user):
group = mommy.make('auth.Group', name='bidder')
group.user_set.add(authorized_user)
@pytest.mark.django_db(transaction=True)
@pytest.mark.usefixtures("test_bidder_fixture")
def test_bidlist_actions(authorized_client, authorized_user):
with patch('talentmap_api.fsbid.services.requests.get') as mock_get:
mock_get.return_value = Mock(ok=True)
mock_get.return_value.json.return_value = [bid]
response = authorized_client.get(f'/api/v1/fsbid/bidlist/')
assert response.json()[0]['emp_id'] == [bid][0]['employee']['perdet_seq_num']
@pytest.mark.django_db(transaction=True)
@pytest.mark.usefixtures("test_bidder_fixture")
def test_bidlist_position_actions(authorized_client, authorized_user):
with patch('talentmap_api.fsbid.services.requests.get') as mock_get:
# returns 404 when no position is found
mock_get.return_value = Mock(ok=True)
mock_get.return_value.json.return_value = []
response = authorized_client.get(f'/api/v1/fsbid/bidlist/position/1/')
assert response.status_code == status.HTTP_404_NOT_FOUND
# returns 204 when position is found
mock_get.return_value = Mock(ok=True)
mock_get.return_value.json.return_value = [bid]
response = authorized_client.get(f'/api/v1/fsbid/bidlist/position/1/')
assert response.status_code == status.HTTP_204_NO_CONTENT
with patch('talentmap_api.fsbid.services.requests.post') as mock_post:
mock_post.return_value = Mock(ok=True)
response = authorized_client.put(f'/api/v1/fsbid/bidlist/position/1/')
assert response.status_code == status.HTTP_204_NO_CONTENT
with patch('talentmap_api.fsbid.services.requests.delete') as mock_del:
mock_del.return_value = Mock(ok=True)
response = authorized_client.delete(f'/api/v1/fsbid/bidlist/position/1/')
assert response.status_code == status.HTTP_204_NO_CONTENT
| 36.732394 | 83 | 0.717408 |
4a26c586498b6c8bde6f47a2e0d479b19c7f03ad | 873 | py | Python | rsa/src/crypto/cipher_test.py | rafaelcn/cryptography | 38de104b4a264a9f71d40f6306bfe96e24e6985b | [
"MIT"
] | null | null | null | rsa/src/crypto/cipher_test.py | rafaelcn/cryptography | 38de104b4a264a9f71d40f6306bfe96e24e6985b | [
"MIT"
] | null | null | null | rsa/src/crypto/cipher_test.py | rafaelcn/cryptography | 38de104b4a264a9f71d40f6306bfe96e24e6985b | [
"MIT"
] | null | null | null | import unittest
from crypto import key
from crypto import primes
from crypto import cipher
# PYTHONPATH=. python3 src/cipher_test.py (on src)
# PYTHONPATH=../ python3 cipher_test.py (on crypto)
class TestKeyAlgorithms(unittest.TestCase):
def test_key_gen(self):
p = primes.get_random_bits(1024)
q = primes.get_random_bits(1024)
msg = 'Rafael e Rafael fazem um trabalho com cara de pastel'
public_key, private_key = key.generate(p, q)
self.assertEqual(public_key['n'], p * q)
self.assertEqual(private_key['n'], p * q)
encrypted_message, t, x, y = cipher.encrypt(msg, public_key)
decrypted_message = cipher.decrypt(encrypted_message, private_key,
t, x, y)
self.assertEqual(decrypted_message, msg)
if __name__ == "__main__":
unittest.main()
| 26.454545 | 74 | 0.654066 |
4a26c5d77ab0fee9d615d30d3a46fa6648183122 | 148 | py | Python | paperjam/codes.py | MineRobber9000/paperjam | f91d8d349f92268afc19414c5b5e27fe37488143 | [
"MIT"
] | null | null | null | paperjam/codes.py | MineRobber9000/paperjam | f91d8d349f92268afc19414c5b5e27fe37488143 | [
"MIT"
] | null | null | null | paperjam/codes.py | MineRobber9000/paperjam | f91d8d349f92268afc19414c5b5e27fe37488143 | [
"MIT"
] | null | null | null | from enum import Enum
class Codes(Enum):
HELLO_POSTING = 200
HELLO_NOPOST = 201
GOODBYE = 205
TEMP_UNAVAILABLE = 400
PERM_UNAVAILABLE = 502
| 14.8 | 23 | 0.75 |
4a26c60d9fe0a6b4323489911c9aa7674c735b8d | 306 | py | Python | setup.py | jpmfribeiro/PyCharts | e1b52ba11adbb8fa884116019e2321bc769adfba | [
"MIT"
] | null | null | null | setup.py | jpmfribeiro/PyCharts | e1b52ba11adbb8fa884116019e2321bc769adfba | [
"MIT"
] | null | null | null | setup.py | jpmfribeiro/PyCharts | e1b52ba11adbb8fa884116019e2321bc769adfba | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name='PyCharts',
version='',
packages=['pycharts', 'pycharts.charts',
'pycharts.fields', 'pycharts.fields.series', 'pycharts.fields.plot_options'],
url='',
license='',
author='jpedro',
author_email='',
description=''
)
| 21.857143 | 91 | 0.611111 |
4a26c6f8f858cbaadcc9f82323fc7e8743a4dfd5 | 3,786 | py | Python | vlapy/core/field.py | kyleniemeyer/VlaPy | efbd38f0d53fb4a5ffa61ecbfbc14d3383eb0f48 | [
"MIT"
] | 22 | 2020-03-02T10:46:59.000Z | 2022-03-29T12:04:55.000Z | vlapy/core/field.py | kyleniemeyer/VlaPy | efbd38f0d53fb4a5ffa61ecbfbc14d3383eb0f48 | [
"MIT"
] | 68 | 2020-03-13T01:16:07.000Z | 2020-09-17T05:18:06.000Z | vlapy/core/field.py | kyleniemeyer/VlaPy | efbd38f0d53fb4a5ffa61ecbfbc14d3383eb0f48 | [
"MIT"
] | 10 | 2020-02-24T18:55:41.000Z | 2021-10-01T12:16:23.000Z | # MIT License
#
# Copyright (c) 2020 Archis Joglekar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from scipy import fft
def compute_charges(f, dv):
"""
Computes a simple moment of the distribution function along
the velocity axis using the trapezoidal rule
:param f: (2D float array) (nx, nv) the distribution function
:param dv: (float) velocity-axis spacing
:return:
"""
return np.trapz(f, dx=dv, axis=1)
def __fft_solve__(net_charge_density, one_over_kx):
"""
del^2 phi = -rho
del e = - integral[rho] = - integral[fdv]
:param net_charge_density: (1D float array (nx,)) charge-density
:param one_over_kx: (1D float array (nx,)) one over real-space wavenumber axis (numpy array of shape (nx,))
:return:
"""
return np.real(fft.ifft(1j * one_over_kx * fft.fft(net_charge_density)))
def solve_for_field(charge_density, one_over_kx):
"""
Solves for the net electric field after subtracting ion charge
:param charge_density: (1D float array (nx,)) charge-density
:param one_over_kx: (1D float array (nx,)) one over real-space wavenumber axis
:return:
"""
return __fft_solve__(
net_charge_density=1.0 - charge_density, one_over_kx=one_over_kx
)
def get_spectral_solver(dv, one_over_kx):
"""
This function gets the spectral field solver that uses Fourier transforms to solve the
periodic system
:param dv: (float) grid spacing in v
:param one_over_kx: (1D float array (nx,)) one over real-space wavenumber axis
:return: the function with the above arguments initialized as static variables
"""
def solve_total_electric_field(driver_field, f):
"""
Allows adding a driver field
:param driver_field: an electric field (numpy array of shape (nx,))
:param f: distribution function. (numpy array of shape (nx, nv))
:return: The solver function
"""
return driver_field + solve_for_field(
charge_density=compute_charges(f, dv), one_over_kx=one_over_kx
)
return solve_total_electric_field
def get_field_solver(stuff_for_time_loop, field_solver_implementation="spectral"):
"""
This method gets the field solver based on the choice in the input parameters.
:param stuff_for_time_loop: dictionary of parameters for the simulation
:param field_solver_implementation: (string) the name of the field solver chosen in the input parameters
:return:
"""
if field_solver_implementation == "spectral":
field_solver = get_spectral_solver(
dv=stuff_for_time_loop["dv"], one_over_kx=stuff_for_time_loop["one_over_kx"]
)
else:
raise NotImplementedError
return field_solver
| 35.055556 | 111 | 0.717644 |
4a26c704afa1a27eb9993c040d00f727b45f776e | 292 | py | Python | dim/nn/modules/mseLoss.py | xhqing/dim | ea776d05568641565358213ea402422102dc53d8 | [
"MIT"
] | null | null | null | dim/nn/modules/mseLoss.py | xhqing/dim | ea776d05568641565358213ea402422102dc53d8 | [
"MIT"
] | null | null | null | dim/nn/modules/mseLoss.py | xhqing/dim | ea776d05568641565358213ea402422102dc53d8 | [
"MIT"
] | null | null | null | import dim
from .module import Module
class MSELoss(Module):
def __init__(self):
super(MSELoss,self).__init__()
self.moduleList.append({"name":None,"module":self})
def forward(self,x,y):
return dim.nn.functional.mseLoss(x,y)
def __str__(self):
return "MSELoss()"
| 22.461538 | 56 | 0.681507 |
4a26c8a5b240e964460cff33c7238a4d033c2d92 | 1,245 | py | Python | 201-250/209.py | yshshadow/Leetcode | 5097f69bb0050d963c784d6bc0e88a7e871568ed | [
"MIT"
] | null | null | null | 201-250/209.py | yshshadow/Leetcode | 5097f69bb0050d963c784d6bc0e88a7e871568ed | [
"MIT"
] | null | null | null | 201-250/209.py | yshshadow/Leetcode | 5097f69bb0050d963c784d6bc0e88a7e871568ed | [
"MIT"
] | null | null | null | # Given an array of n positive integers and a positive integer s, find the minimal length of a contiguous subarray of which the sum ≥ s. If there isn't one, return 0 instead.
#
# Example:
#
# Input: s = 7, nums = [2,3,1,2,4,3]
# Output: 2
# Explanation: the subarray [4,3] has the minimal length under the problem constraint.
# Follow up:
# If you have figured out the O(n) solution, try coding another solution of which the time complexity is O(n log n).
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
if not nums or len(nums) == 0:
return 0
if len(nums) == 1:
return 1 if nums[0] >= s else 0
fast, slow = 0, 0
subsum, res = nums[0], len(nums) + 1
while slow <= fast < len(nums):
if subsum >= s:
res = min(fast - slow + 1, res)
subsum -= nums[slow]
slow += 1
else:
fast += 1
if fast >= len(nums):
break
subsum += nums[fast]
return res if res <= len(nums) else 0
s=Solution()
print(s.minSubArrayLen(100,[2,3,1,2,4,3]))
| 31.923077 | 174 | 0.533333 |
4a26c8d1c7e68ea2366f00c6869f1d0e1b77976c | 107 | py | Python | mkdocs/utils/filters.py | davidhrbac/mkdocs | 3c8a1fccca29272ce327e89c398a55771a7f5635 | [
"BSD-2-Clause"
] | 57 | 2016-09-28T01:19:35.000Z | 2022-01-07T13:59:21.000Z | mkdocs/utils/filters.py | hufyhang/mkdocs | 4c4ef7fa7224713e17d479742c2df1b2fc78edcb | [
"BSD-2-Clause"
] | 16 | 2017-02-06T15:48:03.000Z | 2018-02-28T21:40:10.000Z | mkdocs/utils/filters.py | hufyhang/mkdocs | 4c4ef7fa7224713e17d479742c2df1b2fc78edcb | [
"BSD-2-Clause"
] | 81 | 2016-09-06T04:21:06.000Z | 2022-03-10T06:32:45.000Z | import json
import jinja2
def tojson(obj, **kwargs):
return jinja2.Markup(json.dumps(obj, **kwargs))
| 15.285714 | 51 | 0.71028 |
4a26c9bd57bfe5b46598f4f2f47063acc1f2eb86 | 10,579 | py | Python | altimeter/core/graph/link/links.py | AmOr1984v02/altimeter | 4adcf8d759b1f3f615b00521cc1756c8007e04f3 | [
"MIT"
] | null | null | null | altimeter/core/graph/link/links.py | AmOr1984v02/altimeter | 4adcf8d759b1f3f615b00521cc1756c8007e04f3 | [
"MIT"
] | null | null | null | altimeter/core/graph/link/links.py | AmOr1984v02/altimeter | 4adcf8d759b1f3f615b00521cc1756c8007e04f3 | [
"MIT"
] | null | null | null | """A Link represents the predicate-object portion of a triple."""
import uuid
from typing import Any, Dict, Type, List, Union
from rdflib import BNode, Graph, Literal, Namespace, RDF, URIRef, XSD
from altimeter.core.graph.exceptions import LinkParseException
from altimeter.core.graph.link.base import Link
from altimeter.core.graph.node_cache import NodeCache
class SimpleLink(Link):
"""A SimpleLink represents a scalar value. In RDF terms a SimpleLink creates a Literal
in the graph."""
field_type = "simple"
def to_rdf(
self, subj: Union[BNode, URIRef], namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a BNode/URIRef in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
datatype = None
if isinstance(self.obj, int):
if self.obj > 2147483647:
datatype = XSD.nonNegativeInteger
literal = Literal(self.obj, datatype=datatype)
graph.add((subj, getattr(namespace, self.pred), literal))
def to_lpg(
self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Convert this link to the appropriate vertices, edges, and properties
Args:
:parent: the parent dictionary vertex
:param vertices: the list of all vertex dictionaries
:param edges: the list of all edge dictionaries
:param prefix: the prefix assigned to the key
:type parent: Dict
"""
if isinstance(self.obj, int):
# Need to handle numbers that are bigger than a Long in Java, for now we stringify it
if self.obj > 9223372036854775807 or self.obj < -9223372036854775807:
self.obj = str(self.obj)
elif isinstance(self.obj, SimpleLink):
print("ERROR ERROR")
parent[prefix + self.pred] = self.obj
class MultiLink(Link):
"""Represents a named set of sublinks. For example an 'EBSVolumeAttachemnt'
MultiLink could exist which specifies sublinks Volume, AttachTime"""
field_type = "multi"
def to_dict(self) -> Dict[str, Any]:
"""Return a dictionary representation of this Link
Returns:
Dict representation of this Link
"""
return {
"pred": self.pred,
"obj": [field.to_dict() for field in self.obj],
"type": self.field_type,
}
def to_rdf(
self, subj: Union[BNode, URIRef], namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a BNode/URIRef in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
map_node = BNode()
graph.add((map_node, RDF.type, getattr(namespace, f"{self.pred}")))
for field in self.obj:
field.to_rdf(map_node, namespace, graph, node_cache)
graph.add((subj, getattr(namespace, self.pred), map_node))
def to_lpg(
self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Convert this link to the appropriate vertices, edges, and properties
Args:
:parent: the parent dictionary vertex
vertices: the list of all vertex dictionaries
edges: the list of all edge dictionaries
prefix: A string to prefix the property name with
"""
for field in self.obj:
field.to_lpg(parent, vertices, edges, prefix=self.pred + ".")
@classmethod
def from_dict(cls: Type["MultiLink"], pred: str, obj: Any) -> "MultiLink":
"""Create a MultiLink object from a dict
Args:
pred: Link predicate
obj: Link object, in the case of a MultiLink this is a list of Fields
Returns:
Link subclass object
"""
fields = [link_from_dict(field) for field in obj]
return cls(pred=pred, obj=fields)
class ResourceLinkLink(Link):
"""Represents a link to another resource which must exist in the graph."""
field_type = "resource_link"
def to_rdf(
self, subj: Union[BNode, URIRef], namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a BNode/URIRef in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
link_node = node_cache.setdefault(self.obj, URIRef(self.obj))
graph.add((subj, getattr(namespace, self.pred), link_node))
def to_lpg(
self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Convert this link to the appropriate vertices, edges, and properties
Args:
:parent: the parent dictionary vertex
vertices: the list of all vertex dictionaries
edges: the list of all edge dictionaries
prefix: string to prefix the property name with
"""
edge = {
"~id": uuid.uuid1(),
"~label": self.field_type,
"~from": parent["~id"],
"~to": self.obj,
}
edges.append(edge)
class TransientResourceLinkLink(Link):
"""Represents a link to another resource which may or may not exist in the graph."""
field_type = "transient_resource_link"
def to_rdf(
self, subj: Union[BNode, URIRef], namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a BNode/URIRef in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
link_node = node_cache.setdefault(self.obj, URIRef(self.obj))
graph.add((subj, getattr(namespace, self.pred), link_node))
def to_lpg(
self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Convert this link to the appropriate vertices, edges, and properties
Args:
:parent: the parent dictionary vertex
vertices: the list of all vertex dictionaries
edges: the list of all edge dictionaries
prefix: string to prefix the property name with
"""
edge = {
"~id": uuid.uuid1(),
"~label": self.field_type,
"~from": parent["~id"],
"~to": self.obj,
}
edges.append(edge)
class TagLink(Link):
"""Represents a AWS-style Tag attached to a node."""
field_type = "tag"
def to_rdf(
self, subj: URIRef, namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a URIRef in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
tag_id = f"{self.pred}:{self.obj}"
tag_node = node_cache.get(tag_id)
if tag_node is None:
tag_node = BNode()
graph.add((tag_node, namespace.key, Literal(self.pred)))
graph.add((tag_node, namespace.value, Literal(self.obj)))
graph.add((tag_node, RDF.type, getattr(namespace, "tag")))
node_cache[tag_id] = tag_node
graph.add((subj, getattr(namespace, "tag"), tag_node))
def to_lpg(
self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Convert this link to the appropriate vertices, edges, and properties
Args:
:parent:git the parent dictionary vertex
vertices: the list of all vertex dictionaries
edges: the list of all edge dictionaries
prefix: string to prefix the property name with
"""
if not any(x["~id"] == f"{self.pred}:{self.obj}" for x in vertices):
vertex = {}
vertex["~id"] = f"{self.pred}:{self.obj}"
vertex["~label"] = self.field_type
vertex[self.pred] = self.obj
vertices.append(vertex)
edge = {
"~id": uuid.uuid1(),
"~label": "tagged",
"~from": parent["~id"],
"~to": f"{self.pred}:{self.obj}",
}
edges.append(edge)
def link_from_dict(data: Dict[str, Any]) -> Link:
"""Create and return a Link subclass object from dict data.
Args:
data: data to generate a Link from
Returns:
object of the appropriate Link subclass
"""
field_type = data.get("type")
if field_type is None:
raise LinkParseException(f"key 'type' not found in {data}")
pred = data.get("pred")
if pred is None:
raise LinkParseException(f"key 'pred' not found in {data}")
obj = data.get("obj")
if field_type == "transient_resource_link":
return TransientResourceLinkLink.from_dict(pred, obj)
if obj is None:
raise LinkParseException(f"key 'obj' not found in {data}")
if field_type == "simple":
return SimpleLink.from_dict(pred, obj)
if field_type == "multi":
return MultiLink.from_dict(pred, obj)
if field_type == "resource_link":
return ResourceLinkLink.from_dict(pred, obj)
if field_type == "tag":
return TagLink.from_dict(pred, obj)
raise LinkParseException(f"Unknown field type '{field_type}")
| 36.98951 | 99 | 0.604594 |
4a26c9d857b1b26486895b298c840056c3b26b9e | 250 | py | Python | tests/atgtasks_test.py | seba-1511/atg | 8f0b135d1a32c613f6726c7b28d165b8fd63c94c | [
"MIT"
] | null | null | null | tests/atgtasks_test.py | seba-1511/atg | 8f0b135d1a32c613f6726c7b28d165b8fd63c94c | [
"MIT"
] | null | null | null | tests/atgtasks_test.py | seba-1511/atg | 8f0b135d1a32c613f6726c7b28d165b8fd63c94c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import unittest
class UtilTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_myfunctionality(self):
pass
if __name__ == '__main__':
unittest.main()
| 12.5 | 35 | 0.624 |
4a26ca796ab709e4845e05b9e1f57252efb98bbd | 468 | py | Python | codewars/7kyu/evenOrOdd.py | jglez/code-solutions | 92e9f8f249e238f94e01db71663eac4c82b1a0ef | [
"MIT"
] | null | null | null | codewars/7kyu/evenOrOdd.py | jglez/code-solutions | 92e9f8f249e238f94e01db71663eac4c82b1a0ef | [
"MIT"
] | null | null | null | codewars/7kyu/evenOrOdd.py | jglez/code-solutions | 92e9f8f249e238f94e01db71663eac4c82b1a0ef | [
"MIT"
] | null | null | null | """
Task:
Given a list of integers, determine whether the sum of its elements is odd or even.
Give your answer as a string matching "odd" or "even".
If the input array is empty consider it as: [0] (array with a zero).
Examples:
Input: [0]
Output: "even"
Input: [0, 1, 4]
Output: "odd"
Input: [0, -1, -5]
Output: "even"
Have fun!
"""
def odd_or_even(arr):
if sum(arr) % 2 == 0:
return "even"
return "odd"
| 18.72 | 87 | 0.57906 |
4a26cb5b4f29be2ed89f6009b7b32310da4aa22a | 19,985 | py | Python | pysnmp-with-texts/SNMPEXT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/SNMPEXT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/SNMPEXT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module SNMPEXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SNMPEXT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:08:41 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
snmpExt, = mibBuilder.importSymbols("APENT-MIB", "snmpExt")
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
NotificationType, MibIdentifier, ModuleIdentity, Unsigned32, ObjectIdentity, NotificationType, TimeTicks, IpAddress, iso, Bits, Integer32, Counter32, Counter64, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "MibIdentifier", "ModuleIdentity", "Unsigned32", "ObjectIdentity", "NotificationType", "TimeTicks", "IpAddress", "iso", "Bits", "Integer32", "Counter32", "Counter64", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
apSnmpExtMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 2467, 1, 22, 1))
if mibBuilder.loadTexts: apSnmpExtMib.setLastUpdated('9707202000Z')
if mibBuilder.loadTexts: apSnmpExtMib.setOrganization('ArrowPoint Communications Inc.')
if mibBuilder.loadTexts: apSnmpExtMib.setContactInfo(' Steven Colby Postal: ArrowPoint Communications Inc. 50 Nagog Park Acton, Massachusetts 01720 Tel: +1 978-206-3000 option 1 E-Mail: [email protected]')
if mibBuilder.loadTexts: apSnmpExtMib.setDescription('')
apSnmpExtTrapGeneric = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("enable", 1), ("disable", 0)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtTrapGeneric.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtTrapGeneric.setDescription('This object controls the state of Generic trap generation. ArrowPoint supports the following generic traps: 1) Cold start, 2) Warm start, 3) Link down, 4) Link up, 5) SNMP authentication failure. SNMP authentication failure traps can be individually disallowed using the snmpEnableAuthenTraps object.')
apSnmpExtTrapEnterprise = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("enable", 1), ("disable", 0)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtTrapEnterprise.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtTrapEnterprise.setDescription('This object controls the state of Enterprise trap generation. RMON event traps are treated as enterprise traps.')
apSnmpExtCommunityTable = MibTable((1, 3, 6, 1, 4, 1, 2467, 1, 22, 4), )
if mibBuilder.loadTexts: apSnmpExtCommunityTable.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtCommunityTable.setDescription('A table of SNMP community information used to determine whether read or write access is permitted to the MIBs. A maximum of 5 concurrent communities are supported.')
apSnmpExtCommunityEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2467, 1, 22, 4, 1), ).setIndexNames((0, "SNMPEXT-MIB", "apSnmpExtCommunityName"))
if mibBuilder.loadTexts: apSnmpExtCommunityEntry.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtCommunityEntry.setDescription('An SNMP community entry describing one community name')
apSnmpExtCommunityName = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 22, 4, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apSnmpExtCommunityName.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtCommunityName.setDescription('A community name used for SNMP access to this system')
apSnmpExtCommunityType = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 22, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("ro", 0), ("rw", 1)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apSnmpExtCommunityType.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtCommunityType.setDescription('The privilge level of the associated community name when accessing this system. A community level of <ro> allows read-only access, a community level of <rw> allows read and write access.')
apSnmpExtCommunityStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 22, 4, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apSnmpExtCommunityStatus.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtCommunityStatus.setDescription('Status entry for this row ')
apSnmpExtTrapTable = MibTable((1, 3, 6, 1, 4, 1, 2467, 1, 22, 5), )
if mibBuilder.loadTexts: apSnmpExtTrapTable.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtTrapTable.setDescription('')
apSnmpExtTrapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2467, 1, 22, 5, 1), ).setIndexNames((0, "SNMPEXT-MIB", "apSnmpExtTrapIp"))
if mibBuilder.loadTexts: apSnmpExtTrapEntry.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtTrapEntry.setDescription('This table contains the configured trap hosts. All entries in this table will receive SNMP traps when generated. A maximum of 5 concurrent trap hosts are supported.')
apSnmpExtTrapIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 22, 5, 1, 1), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apSnmpExtTrapIp.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtTrapIp.setDescription('An IP Address of a host configured to receive traps')
apSnmpExtTrapCommunity = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 22, 5, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apSnmpExtTrapCommunity.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtTrapCommunity.setDescription('The community name to use when sending traps to this device')
apSnmpExtTrapStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2467, 1, 22, 5, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: apSnmpExtTrapStatus.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtTrapStatus.setDescription('Status entry for this row ')
apSnmpExtReloadConfigVal = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtReloadConfigVal.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtReloadConfigVal.setDescription('This object is used to control apSnmpExtReloadSet, which provides a SNMP based reboot capability. When this object is set to 0, SNMP based reboot is not allowed. When this object is set between 1 - (2^32) - 2, the equivalent value must be written to apSnmpExtReloadSet to cause a reboot. When this object is set to (2^32) -1, a reboot may be caused with any write value to apSnmpExtReloadSet. For security purposes this object always returns 0 on read')
apSnmpExtReloadSet = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtReloadSet.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtReloadSet.setDescription('This object provides SNMP based reboot capabilities. The required set value to effect a reboot is explained in apSnmpExtReloadConfigVal.')
apSnmpExtServiceTraps = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtServiceTraps.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtServiceTraps.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with service transition. See the trap definition in the svcExt.mib name apSvcTransitionTrap.')
apSnmpExtLoginFailTraps = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtLoginFailTraps.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtLoginFailTraps.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with FTP/Console/Telnet authentication failure.')
apSnmpExtReloadTraps = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtReloadTraps.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtReloadTraps.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident a system reload')
apSnmpExtLastTrap = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: apSnmpExtLastTrap.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtLastTrap.setDescription('This object contains a string which details the last trap which was generated in the system')
apSnmpExtRedundancyTraps = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtRedundancyTraps.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtRedundancyTraps.setDescription('This object controls the generation of ArrowPoint enterprise traps on Redundancy state changes.')
apSnmpExtForceDumpConfigVal = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 13), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtForceDumpConfigVal.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtForceDumpConfigVal.setDescription('This object is used to control apSnmpExtForceDump, which provides a SNMP based dump capability. When this object is set to 0, SNMP based dumping is not allowed. When this object is set between 1 - (2^32) - 2, the equivalent value must be written to apSnmpExtForceDump to cause a dump. When this object is set to (2^32) -1, a dump may be caused with any write value to apSnmpExtReloadSet. For security purposes this object always returns 0 on read.')
apSnmpExtForceDumpSlot = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 14), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtForceDumpSlot.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtForceDumpSlot.setDescription('This object provides SNMP based dumping capabilities on the CS800. This is the slot number of the module to force to dump and then reload. On the cs100 this object is not used. For security purposes this object always returns a 0.')
apSnmpExtForceDumpSubSlot = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 15), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtForceDumpSubSlot.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtForceDumpSubSlot.setDescription('This object provides SNMP based dumping capabilities on the CS800. This is the sub-slot number of the module to force to dump and then reload. On the cs100 this object is not used. For security purposes this object always returns a 0.')
apSnmpExtForceDump = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 16), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtForceDump.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtForceDump.setDescription('This object provides SNMP based dumping capabilities. Setting this value to non-zero will force a dump and reload the unit. On a cs800 the apSnmpExtForceDumpSlot and apSnmapExtForceDumpSubSlot objects must be set to valid values or the setting of this object is ignored. For security purposes this object always returns a 0. The required set value to effect a reboot is explained in apSnmpExtForceDumpConfigVal.')
apSnmpExtDosSynTraps = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtDosSynTraps.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtDosSynTraps.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with Denial of Service (DOS) SYN attacks. See the trap definition in the flowMgrExt.mib name apFlowMgrExtDosSynTrap.')
apSnmpExtDosLandTraps = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtDosLandTraps.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtDosLandTraps.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with Denial of Service (DOS) LAND attacks. See the trap definition in the flowMgrExt.mib name apFlowMgrExtDosLandTrap.')
apSnmpExtDosIllegalTraps = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtDosIllegalTraps.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtDosIllegalTraps.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with Denial of Service (DOS) Illegal packet format attacks. See the trap definition in the flowMgrExt.mib name apFlowMgrExtDosIllegalTrap.')
apSnmpExtDosPingTraps = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtDosPingTraps.setStatus('obsolete')
if mibBuilder.loadTexts: apSnmpExtDosPingTraps.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with Denial of Service (DOS) Ping of Death attacks. See the trap definition in the flowMgrExt.mib name apFlowMgrExtDosPingTrap.')
apSnmpExtDosSmurfTraps = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtDosSmurfTraps.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtDosSmurfTraps.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with Denial of Service (DOS) Smurf attacks. See the trap definition in the flowMgrExt.mib name apFlowMgrExtDosSmurfTrap.')
apSnmpExtDosSynTrapThreshold = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtDosSynTrapThreshold.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtDosSynTrapThreshold.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with Denial of Service (DOS) SYN attacks. It is the threshold over which a trap will be generated.')
apSnmpExtDosLandTrapThreshold = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtDosLandTrapThreshold.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtDosLandTrapThreshold.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with Denial of Service (DOS) LAND attacks. It is the threshold over which a trap will be generated.')
apSnmpExtDosIllegalTrapThreshold = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtDosIllegalTrapThreshold.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtDosIllegalTrapThreshold.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with Denial of Service (DOS) attacks due to invalid packets. It is the threshold over which a trap will be generated.')
apSnmpExtDosPingTrapThreshold = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 25), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(30)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtDosPingTrapThreshold.setStatus('obsolete')
if mibBuilder.loadTexts: apSnmpExtDosPingTrapThreshold.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with Denial of Service (DOS) Ping of Death attacks. It is the threshold over which a trap will be generated.')
apSnmpExtDosSmurfTrapThreshold = MibScalar((1, 3, 6, 1, 4, 1, 2467, 1, 22, 26), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: apSnmpExtDosSmurfTrapThreshold.setStatus('current')
if mibBuilder.loadTexts: apSnmpExtDosSmurfTrapThreshold.setDescription('This object controls the generation of ArrowPoint enterprise traps conincident with Denial of Service (DOS) Smurf attacks. It is the threshold over which a trap will be generated.')
apSnmpExtReloadTrap = NotificationType((1, 3, 6, 1, 4, 1, 2467, 1, 22) + (0,1))
if mibBuilder.loadTexts: apSnmpExtReloadTrap.setDescription('This trap is generated when a reboot of the system is performed.')
mibBuilder.exportSymbols("SNMPEXT-MIB", apSnmpExtLoginFailTraps=apSnmpExtLoginFailTraps, apSnmpExtTrapEntry=apSnmpExtTrapEntry, apSnmpExtDosSynTraps=apSnmpExtDosSynTraps, apSnmpExtForceDump=apSnmpExtForceDump, apSnmpExtDosSynTrapThreshold=apSnmpExtDosSynTrapThreshold, apSnmpExtDosLandTrapThreshold=apSnmpExtDosLandTrapThreshold, apSnmpExtReloadSet=apSnmpExtReloadSet, apSnmpExtTrapTable=apSnmpExtTrapTable, apSnmpExtReloadConfigVal=apSnmpExtReloadConfigVal, apSnmpExtForceDumpConfigVal=apSnmpExtForceDumpConfigVal, apSnmpExtForceDumpSubSlot=apSnmpExtForceDumpSubSlot, apSnmpExtDosPingTraps=apSnmpExtDosPingTraps, apSnmpExtReloadTrap=apSnmpExtReloadTrap, apSnmpExtDosSmurfTrapThreshold=apSnmpExtDosSmurfTrapThreshold, apSnmpExtCommunityType=apSnmpExtCommunityType, apSnmpExtDosPingTrapThreshold=apSnmpExtDosPingTrapThreshold, apSnmpExtMib=apSnmpExtMib, apSnmpExtCommunityEntry=apSnmpExtCommunityEntry, apSnmpExtLastTrap=apSnmpExtLastTrap, apSnmpExtDosIllegalTrapThreshold=apSnmpExtDosIllegalTrapThreshold, apSnmpExtDosLandTraps=apSnmpExtDosLandTraps, apSnmpExtTrapStatus=apSnmpExtTrapStatus, apSnmpExtReloadTraps=apSnmpExtReloadTraps, apSnmpExtDosIllegalTraps=apSnmpExtDosIllegalTraps, PYSNMP_MODULE_ID=apSnmpExtMib, apSnmpExtCommunityName=apSnmpExtCommunityName, apSnmpExtTrapCommunity=apSnmpExtTrapCommunity, apSnmpExtTrapGeneric=apSnmpExtTrapGeneric, apSnmpExtForceDumpSlot=apSnmpExtForceDumpSlot, apSnmpExtTrapEnterprise=apSnmpExtTrapEnterprise, apSnmpExtDosSmurfTraps=apSnmpExtDosSmurfTraps, apSnmpExtCommunityTable=apSnmpExtCommunityTable, apSnmpExtRedundancyTraps=apSnmpExtRedundancyTraps, apSnmpExtTrapIp=apSnmpExtTrapIp, apSnmpExtCommunityStatus=apSnmpExtCommunityStatus, apSnmpExtServiceTraps=apSnmpExtServiceTraps)
| 163.811475 | 1,725 | 0.805154 |
4a26cb76736439c5177b862367a683486d414ac1 | 3,425 | py | Python | examples/submission/fleur_specific_tests/test_CuBulk_fleur.py | broeder-j/aiida_fleur_plugin | cca54b194f4b217abb69aaa1fca0db52c6c830c3 | [
"MIT"
] | 1 | 2017-02-07T12:31:38.000Z | 2017-02-07T12:31:38.000Z | examples/submission/fleur_specific_tests/test_CuBulk_fleur.py | broeder-j/aiida_fleur_plugin | cca54b194f4b217abb69aaa1fca0db52c6c830c3 | [
"MIT"
] | 16 | 2017-04-03T11:42:50.000Z | 2017-05-18T16:25:39.000Z | examples/submission/fleur_specific_tests/test_CuBulk_fleur.py | broeder-j/aiida_fleur_plugin | cca54b194f4b217abb69aaa1fca0db52c6c830c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env runaiida
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
__copyright__ = (u'Copyright (c), 2016, Forschungszentrum Jülich GmbH, '
'IAS-1/PGI-1, Germany. All rights reserved.')
__license__ = 'MIT license, see LICENSE.txt file'
__version__ = '0.27'
__contributors__ = 'Jens Broeder'
from aiida import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
import sys
import os
from aiida.common.example_helpers import test_and_get_code
from aiida.plugins import DataFactory
# If set to True, will ask AiiDA to run in serial mode (i.e., AiiDA will not
# invoke the mpirun command in the submission script)
run_in_serial_mode = True #False
################################################################
ParameterData = DataFactory('parameter')
StructureData = DataFactory('structure')
FleurinpData = DataFactory('fleur.fleurinp')
try:
dontsend = sys.argv[1]
if dontsend == '--dont-send':
submit_test = True
elif dontsend == '--send':
submit_test = False
else:
raise IndexError
except IndexError:
print(('The first parameter can only be either '
'--send or --dont-send'), file=sys.stderr)
sys.exit(1)
try:
codename = sys.argv[2]
except IndexError:
codename = None
queue = None
# queue = "th1_small"
settings = None
#####
code = test_and_get_code(codename, expected_code_type='fleur_inp.fleur')
#TODO: how to make smart path?
# get where tests folder is, then relative path
inpxmlfile = '/usr/users/iff_th1/broeder/aiida/github/aiida_fleur_plugin/tests/inp_xml_files/CuBulkXML/files/inp.xml'
enpara = '/usr/users/iff_th1/broeder/aiida/github/aiida_fleur_plugin/tests/inp_xml_files/CuBulkXML/files/enpara'
fleurinp = FleurinpData(files=[inpxmlfile, enpara])
print(fleurinp.files)
## For remote codes, it is not necessary to manually set the computer,
## since it is set automatically by new_calc
#computer = code.get_remote_computer()
#calc = code.new_calc(computer=computer)
calc = code.new_calc()
calc.label = 'Cu Bulk Fleur test'
calc.description = ('Simple test of Fleur with XML input with one step:'
'1.Generate a starting density and run 1 iteration '
'and compare fermi-energy & total energy')
calc.set_max_wallclock_seconds(5 * 60) # 5 min
# Valid only for Slurm and PBS (using default values for the
# number_cpus_per_machine), change for SGE-like schedulers
calc.set_resources({'num_machines': 1})
if run_in_serial_mode:
calc.set_withmpi(False)
## Otherwise, to specify a given # of cpus per machine, uncomment the following:
# calc.set_resources({"num_machines": 1, "num_mpiprocs_per_machine": 8})
#calc.set_custom_scheduler_commands("#SBATCH --account=ch3")
if queue is not None:
calc.set_queue_name(queue)
calc.use_fleurinpdata(fleurinp)
#calc.use_code(code)
if settings is not None:
calc.use_settings(settings)
if submit_test:
subfolder, script_filename = calc.submit_test()
print(f"Test_submit for calculation (uuid='{calc.uuid}')")
print(f'Submit file in {os.path.join(os.path.relpath(subfolder.abspath), script_filename)}')
else:
calc.store_all()
print(f"created calculation; calc=Calculation(uuid='{calc.uuid}') # ID={calc.dbnode.pk}")
calc.submit()
print(f"submitted calculation; calc=Calculation(uuid='{calc.uuid}') # ID={calc.dbnode.pk}")
| 33.252427 | 117 | 0.714161 |
4a26cbf032c98380663861230020455fdc8e0ad1 | 950 | py | Python | server/app/utils/misc.py | SheetWithoutShit/sws-server | 16b7154188f08b33f84d88caea217673cf989b2b | [
"MIT"
] | null | null | null | server/app/utils/misc.py | SheetWithoutShit/sws-server | 16b7154188f08b33f84d88caea217673cf989b2b | [
"MIT"
] | 57 | 2020-08-01T14:58:59.000Z | 2020-09-20T13:27:16.000Z | server/app/utils/misc.py | SpentlessInc/spentless-server | 16b7154188f08b33f84d88caea217673cf989b2b | [
"MIT"
] | null | null | null | """This module provides misc functions."""
import asyncio
import logging
from app.utils.errors import RetryError
LOGGER = logging.getLogger(__name__)
def retry(times, retry_interval=2):
"""Decorator that provides backoff retries."""
def func_wrapper(func):
"""Function wrapper."""
async def wrapper(*args, **kwargs):
"""The main functionality of backoff retries leaves here."""
for time in range(times):
try:
return await func(*args, **kwargs)
except RetryError:
await asyncio.sleep(retry_interval ** time)
LOGGER.error(
"%s(args: %s, kwargs: %s) ... Retrying, attempt #%s.",
func.__name__,
args,
kwargs,
time + 1
)
return wrapper
return func_wrapper
| 28.787879 | 78 | 0.510526 |
4a26cd0ed194730ea747f0ef6622227fe6c84ff9 | 2,354 | py | Python | lnt/testing/profile/profilev1impl.py | llvm/lnt | 77e0a25f996a5363e23f701c0d995525a5c6484a | [
"Apache-2.0"
] | null | null | null | lnt/testing/profile/profilev1impl.py | llvm/lnt | 77e0a25f996a5363e23f701c0d995525a5c6484a | [
"Apache-2.0"
] | null | null | null | lnt/testing/profile/profilev1impl.py | llvm/lnt | 77e0a25f996a5363e23f701c0d995525a5c6484a | [
"Apache-2.0"
] | null | null | null | from lnt.testing.profile.profile import ProfileImpl
import pickle
import zlib
class ProfileV1(ProfileImpl):
"""
ProfileV1 files not clever in any way. They are simple Python objects with
the profile data layed out in the most obvious way for production/consumption
that are then pickled and compressed.
They are expected to be created by simply storing into the ``self.data``
member.
The ``self.data`` member has this format::
{
counters: {'cycles': 12345.0, 'branch-misses': 200.0}, # absolute values.
disassembly-format: 'raw',
functions: {
name: {
counters: {'cycles': 45.0, ...}, # Note counters are now percentages.
data: [
[463464, {'cycles': 23.0, ...}, '\tadd r0, r0, r1'}],
...
]
}
}
}
"""
def __init__(self, data):
"""
Create from a raw data dict. data has the format given in the class
docstring.
"""
self.data = data
@staticmethod
def upgrade(old):
raise RuntimeError("Cannot upgrade to version 1!")
@staticmethod
def checkFile(fn):
# "zlib compressed data" - 78 9C
with open(fn, 'rb') as f:
return f.read(2) == b'\x78\x9c'
@staticmethod
def deserialize(fobj):
o = zlib.decompress(fobj.read())
data = pickle.loads(o)
return ProfileV1(data)
def serialize(self, fname=None):
obj = pickle.dumps(self.data)
compressed_obj = zlib.compress(obj)
if fname is None:
return bytes(compressed_obj)
else:
with open(fname, 'wb') as fd:
fd.write(compressed_obj)
def getVersion(self):
return 1
def getTopLevelCounters(self):
return self.data['counters']
def getDisassemblyFormat(self):
if 'disassembly-format' in self.data:
return self.data['disassembly-format']
return 'raw'
def getFunctions(self):
d = {}
for fn in self.data['functions']:
f = self.data['functions'][fn]
d[fn] = dict(counters=f.get('counters', {}),
length=len(f.get('data', [])))
return d
def getCodeForFunction(self, fname):
for inst_info in self.data['functions'][fname].get('data', []):
yield (inst_info[0], inst_info[1], inst_info[2])
| 27.057471 | 77 | 0.584537 |
4a26cd7230b52b52759159c2aeac729e2261aebf | 2,674 | py | Python | tohu/v4/set_special_methods.py | maxalbert/tohu | 3adf0c58b13ef1e1d716d7d613484d2adc58fb60 | [
"MIT"
] | 1 | 2019-03-07T19:58:45.000Z | 2019-03-07T19:58:45.000Z | tohu/v4/set_special_methods.py | maxalbert/tohu | 3adf0c58b13ef1e1d716d7d613484d2adc58fb60 | [
"MIT"
] | 9 | 2017-10-04T15:08:53.000Z | 2021-02-02T21:51:41.000Z | tohu/v4/set_special_methods.py | maxalbert/tohu | 3adf0c58b13ef1e1d716d7d613484d2adc58fb60 | [
"MIT"
] | null | null | null | """
This module is not meant to be imported directly.
Its purpose is to patch the TohuBaseGenerator class
so that its special methods __add__, __mul__ etc.
support other generators as arguments.
"""
from operator import add, mul, gt, ge, lt, le, eq
from .base import TohuBaseGenerator
from .primitive_generators import GeoJSONGeolocation, SelectOnePrimitive, Timestamp, as_tohu_generator
from .derived_generators import Apply, GetAttribute, SelectOneDerived
from .utils import identity
__all__ = []
def add_generators(self, other):
return Apply(add, self, as_tohu_generator(other))
def radd_generators(self, other):
return Apply(add, as_tohu_generator(other), self)
def mul_generators(self, other):
return Apply(mul, self, as_tohu_generator(other))
def rmul_generators(self, other):
return Apply(mul, as_tohu_generator(other), self)
def eq_generators(self, other):
return Apply(eq, self, as_tohu_generator(other))
def lt_generators(self, other):
return Apply(lt, self, as_tohu_generator(other))
def le_generators(self, other):
return Apply(le, self, as_tohu_generator(other))
def gt_generators(self, other):
return Apply(gt, self, as_tohu_generator(other))
def ge_generators(self, other):
return Apply(ge, self, as_tohu_generator(other))
# Patch TohuBaseGenerator with the new methods
TohuBaseGenerator.__add__ = add_generators
TohuBaseGenerator.__radd__ = radd_generators
TohuBaseGenerator.__mul__ = mul_generators
TohuBaseGenerator.__rmul__ = rmul_generators
TohuBaseGenerator.__eq__ = eq_generators
TohuBaseGenerator.__lt__ = lt_generators
TohuBaseGenerator.__le__ = le_generators
TohuBaseGenerator.__gt__ = gt_generators
TohuBaseGenerator.__ge__ = ge_generators
def getattribute_generators(self, name):
if name == '__isabstractmethod__':
# Special case which is needed because TohuUltraBaseMeta is
# derived from ABCMeta and it uses '__isabstractmethod__'
# to check for abstract methods.
#
# TODO: This check should probably be moved to TohuUltraBaseGenerator somewhere.
return
if name == '_ipython_canary_method_should_not_exist_':
# Special case which is needed because IPython uses this attribute internally.
raise NotImplementedError("Special case needed for IPython")
return GetAttribute(self, name)
SelectOnePrimitive.__getattr__ = getattribute_generators
SelectOneDerived.__getattr__ = getattribute_generators
def split_geolocation(self):
attributes = ['lon', 'lat'] + self.include_attributes
return tuple(GetAttribute(self, attr_name) for attr_name in attributes)
GeoJSONGeolocation.split = split_geolocation
| 29.384615 | 102 | 0.775243 |
4a26cf0eadec9d8cb44f64b3871c597d4ac9a175 | 15,865 | py | Python | modules/flow0d/flow0d.py | marchirschvogel/ambit | 9c21852d2c7c562b7accdd34025fc6b829eb1d3e | [
"BSD-4-Clause"
] | 3 | 2021-03-22T14:17:09.000Z | 2021-05-03T15:24:09.000Z | modules/flow0d/flow0d.py | marchirschvogel/ambit | 9c21852d2c7c562b7accdd34025fc6b829eb1d3e | [
"BSD-4-Clause"
] | null | null | null | modules/flow0d/flow0d.py | marchirschvogel/ambit | 9c21852d2c7c562b7accdd34025fc6b829eb1d3e | [
"BSD-4-Clause"
] | 2 | 2021-03-29T10:52:09.000Z | 2021-11-26T15:56:38.000Z | #!/usr/bin/env python3
# Copyright (c) 2019-2022, Dr.-Ing. Marc Hirschvogel
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import time, sys
import numpy as np
from petsc4py import PETSc
import timeintegration
import utilities
import solver_nonlin
from base import problem_base
# framework of 0D flow models, relating pressure p (and its derivative) to fluxes q
class Flow0DProblem(problem_base):
def __init__(self, io_params, time_params, model_params, time_curves, coupling_params={}, comm=None):
problem_base.__init__(self, io_params, time_params, comm)
self.problem_physics = 'flow0d'
self.simname = io_params['simname']
self.time_params = time_params
# only relevant to syspul* models
try:
self.chamber_models = model_params['chamber_models']
if 'ao' not in self.chamber_models.keys(): self.chamber_models['ao'] = {'type' : '0D_rigid'} # add aortic root model
except: self.chamber_models = {}
try: self.coronary_model = model_params['coronary_model']
except: self.coronary_model = None
try: self.excitation_curve = model_params['excitation_curve']
except: self.excitation_curve = None
try: initial_file = time_params['initial_file']
except: initial_file = ''
# could use extra write frequency setting for 0D model (i.e. for coupled problem)
try: self.write_results_every_0D = io_params['write_results_every_0D']
except: self.write_results_every_0D = io_params['write_results_every']
# for restart
try: self.write_restart_every = io_params['write_restart_every']
except: self.write_restart_every = -1
# could use extra output path setting for 0D model (i.e. for coupled problem)
try: self.output_path_0D = io_params['output_path_0D']
except: self.output_path_0D = io_params['output_path']
try: valvelaws = model_params['valvelaws']
except: valvelaws = {'av' : ['pwlin_pres',0], 'mv' : ['pwlin_pres',0], 'pv' : ['pwlin_pres',0], 'tv' : ['pwlin_pres',0]}
try: self.cq = coupling_params['coupling_quantity']
except: self.cq = ['volume']
try: self.vq = coupling_params['variable_quantity']
except: self.vq = ['pressure']
try: self.eps_periodic = time_params['eps_periodic']
except: self.eps_periodic = 1.0e-20
try: self.periodic_checktype = time_params['periodic_checktype']
except: self.periodic_checktype = 'allvar'
try: self.prescribed_variables = model_params['prescribed_variables']
except: self.prescribed_variables = {}
try: self.perturb_type = model_params['perturb_type'][0]
except: self.perturb_type = None
try: self.perturb_factor = model_params['perturb_type'][1]
except: self.perturb_factor = 1.
try: self.perturb_id = model_params['perturb_type'][2]
except: self.perturb_id = -1
try: self.perturb_after_cylce = model_params['perturb_after_cylce']
except: self.perturb_after_cylce = -1
# definitely set to -1 if we don't have a perturb type
if self.perturb_type is None: self.perturb_after_cylce = -1
self.have_induced_pert = False
# initialize 0D model class
if model_params['modeltype'] == '2elwindkessel':
from cardiovascular0D_2elwindkessel import cardiovascular0D2elwindkessel
self.cardvasc0D = cardiovascular0D2elwindkessel(model_params['parameters'], self.cq, self.vq, comm=self.comm)
elif model_params['modeltype'] == '4elwindkesselLsZ':
from cardiovascular0D_4elwindkesselLsZ import cardiovascular0D4elwindkesselLsZ
self.cardvasc0D = cardiovascular0D4elwindkesselLsZ(model_params['parameters'], self.cq, self.vq, comm=self.comm)
elif model_params['modeltype'] == '4elwindkesselLpZ':
from cardiovascular0D_4elwindkesselLpZ import cardiovascular0D4elwindkesselLpZ
self.cardvasc0D = cardiovascular0D4elwindkesselLpZ(model_params['parameters'], self.cq, self.vq, comm=self.comm)
elif model_params['modeltype'] == 'syspul':
from cardiovascular0D_syspul import cardiovascular0Dsyspul
self.cardvasc0D = cardiovascular0Dsyspul(model_params['parameters'], self.chamber_models, self.coronary_model, self.cq, self.vq, valvelaws=valvelaws, comm=self.comm)
elif model_params['modeltype'] == 'syspulcap':
from cardiovascular0D_syspulcap import cardiovascular0Dsyspulcap
self.cardvasc0D = cardiovascular0Dsyspulcap(model_params['parameters'], self.chamber_models, self.coronary_model, self.cq, self.vq, valvelaws=valvelaws, comm=self.comm)
elif model_params['modeltype'] == 'syspulcapcor':
from cardiovascular0D_syspulcap import cardiovascular0Dsyspulcapcor
self.cardvasc0D = cardiovascular0Dsyspulcapcor(model_params['parameters'], self.chamber_models, self.coronary_model, self.cq, self.vq, valvelaws=valvelaws, comm=self.comm)
elif model_params['modeltype'] == 'syspulcaprespir':
from cardiovascular0D_syspulcaprespir import cardiovascular0Dsyspulcaprespir
self.cardvasc0D = cardiovascular0Dsyspulcaprespir(model_params['parameters'], self.chamber_models, self.coronary_model, self.cq, self.vq, valvelaws=valvelaws, comm=self.comm)
else:
raise NameError("Unknown 0D modeltype!")
# vectors and matrices
self.dK = PETSc.Mat().createAIJ(size=(self.cardvasc0D.numdof,self.cardvasc0D.numdof), bsize=None, nnz=None, csr=None, comm=self.comm)
self.dK.setUp()
self.K = PETSc.Mat().createAIJ(size=(self.cardvasc0D.numdof,self.cardvasc0D.numdof), bsize=None, nnz=None, csr=None, comm=self.comm)
self.K.setUp()
self.s, self.s_old, self.s_mid = self.K.createVecLeft(), self.K.createVecLeft(), self.K.createVecLeft()
self.sTc, self.sTc_old = self.K.createVecLeft(), self.K.createVecLeft()
self.df, self.df_old = self.K.createVecLeft(), self.K.createVecLeft()
self.f, self.f_old = self.K.createVecLeft(), self.K.createVecLeft()
self.aux, self.aux_old, self.aux_mid = np.zeros(self.cardvasc0D.numdof), np.zeros(self.cardvasc0D.numdof), np.zeros(self.cardvasc0D.numdof)
self.s_set = self.K.createVecLeft() # set point for multisale analysis
self.c, self.y = [], []
# initialize flow0d time-integration class
self.ti = timeintegration.timeintegration_flow0d(time_params, time_curves, self.t_init, comm=self.comm)
if initial_file:
initialconditions = self.cardvasc0D.set_initial_from_file(initial_file)
else:
initialconditions = time_params['initial_conditions']
self.cardvasc0D.initialize(self.s, initialconditions)
self.cardvasc0D.initialize(self.s_old, initialconditions)
self.cardvasc0D.initialize(self.sTc_old, initialconditions)
self.theta_ost = time_params['theta_ost']
self.odemodel = self.cardvasc0D
def assemble_residual_stiffness(self):
K = PETSc.Mat().createAIJ(size=(self.cardvasc0D.numdof,self.cardvasc0D.numdof), bsize=None, nnz=None, csr=None, comm=self.comm)
K.setUp()
# 0D rhs vector: r = (df - df_old)/dt + theta * f + (1-theta) * f_old
r = K.createVecLeft()
r.axpy(1./self.dt, self.df)
r.axpy(-1./self.dt, self.df_old)
r.axpy(self.theta_ost, self.f)
r.axpy(1.-self.theta_ost, self.f_old)
self.dK.assemble()
self.K.assemble()
K.assemble()
K.axpy(1./self.dt, self.dK)
K.axpy(self.theta_ost, self.K)
return r, K
def writerestart(self, sname, N, ms=False):
self.cardvasc0D.write_restart(self.output_path_0D, sname+'_s', N, self.s)
self.cardvasc0D.write_restart(self.output_path_0D, sname+'_aux', N, self.aux)
self.cardvasc0D.write_restart(self.output_path_0D, sname+'_sTc_old', N, self.sTc_old)
if ms: self.cardvasc0D.write_restart(self.output_path_0D, sname+'_s_set', N, self.s_set)
if self.cardvasc0D.T_cycl > 0: # write heart cycle info
if self.comm.rank == 0:
filename = self.output_path_0D+'/checkpoint_'+sname+'_cycledata_'+str(N)+'.txt'
f = open(filename, 'wt')
f.write('%i %.8f' % (self.ti.cycle[0],self.ti.cycleerror[0]))
f.close()
def readrestart(self, sname, rst, ms=False):
self.cardvasc0D.read_restart(self.output_path_0D, sname+'_s', rst, self.s)
self.cardvasc0D.read_restart(self.output_path_0D, sname+'_s', rst, self.s_old)
self.cardvasc0D.read_restart(self.output_path_0D, sname+'_aux', rst, self.aux)
self.cardvasc0D.read_restart(self.output_path_0D, sname+'_aux', rst, self.aux_old)
self.cardvasc0D.read_restart(self.output_path_0D, sname+'_sTc_old', rst, self.sTc_old)
if ms: self.cardvasc0D.read_restart(self.output_path_0D, sname+'_s_set', rst, self.s_set)
if self.cardvasc0D.T_cycl > 0: # read heart cycle info
self.ti.cycle[0] = np.loadtxt(self.output_path_0D+'/checkpoint_'+sname+'_cycledata_'+str(rst)+'.txt', usecols=(0), dtype=int)
self.ti.cycleerror[0] = np.loadtxt(self.output_path_0D+'/checkpoint_'+sname+'_cycledata_'+str(rst)+'.txt', usecols=(1), dtype=float)
self.t_init -= (self.ti.cycle[0]-1) * self.cardvasc0D.T_cycl
def evaluate_activation(self, t):
# activation curves
if bool(self.chamber_models):
ci=0
for ch in ['lv','rv','la','ra']:
if self.chamber_models[ch]['type']=='0D_elast':
self.y[ci] = self.ti.timecurves(self.chamber_models[ch]['activation_curve'])(t)
ci+=1
if self.chamber_models[ch]['type']=='0D_elast_prescr':
self.y[ci] = self.ti.timecurves(self.chamber_models[ch]['elastance_curve'])(t)
ci+=1
def induce_perturbation(self):
if self.perturb_after_cylce > 0: # at least run through one healthy cycle
if self.ti.cycle[0] > self.perturb_after_cylce:
if self.comm.rank == 0:
print(">>> Induced cardiovascular disease type: %s" % (self.perturb_type))
sys.stdout.flush()
self.cardvasc0D.induce_perturbation(self.perturb_type, self.perturb_factor)
self.have_induced_pert = True
class Flow0DSolver():
def __init__(self, problem, solver_params):
self.pb = problem
self.solver_params = solver_params
# initialize nonlinear solver class
self.solnln = solver_nonlin.solver_nonlinear_ode(self.pb, self.solver_params)
def solve_problem(self):
start = time.time()
# print header
utilities.print_problem(self.pb.problem_type, self.pb.comm, self.pb.cardvasc0D.numdof)
# read restart information
if self.pb.restart_step > 0:
self.pb.readrestart(self.pb.simname, self.pb.restart_step)
self.pb.simname += '_r'+str(self.pb.restart_step)
# evaluate old state
if self.pb.excitation_curve is not None:
self.pb.c = []
self.pb.c.append(self.pb.ti.timecurves(self.pb.excitation_curve)(self.pb.t_init))
if bool(self.pb.chamber_models):
self.pb.y = []
for ch in ['lv','rv','la','ra']:
if self.pb.chamber_models[ch]['type']=='0D_elast': self.pb.y.append(self.pb.ti.timecurves(self.pb.chamber_models[ch]['activation_curve'])(self.pb.t_init))
if self.pb.chamber_models[ch]['type']=='0D_elast_prescr': self.pb.y.append(self.pb.ti.timecurves(self.pb.chamber_models[ch]['elastance_curve'])(self.pb.t_init))
if self.pb.chamber_models[ch]['type']=='0D_prescr': self.pb.c.append(self.pb.ti.timecurves(self.pb.chamber_models[ch]['prescribed_curve'])(self.pb.t_init))
self.pb.cardvasc0D.evaluate(self.pb.s_old, self.pb.t_init, self.pb.df_old, self.pb.f_old, None, None, self.pb.c, self.pb.y, self.pb.aux_old)
# flow 0d main time loop
for N in range(self.pb.restart_step+1, self.pb.numstep_stop+1):
wts = time.time()
# current time
t = N * self.pb.dt
# offset time for multiple cardiac cycles
t_off = (self.pb.ti.cycle[0]-1) * self.pb.cardvasc0D.T_cycl # zero if T_cycl variable is not specified
# external volume/flux from time curve
if self.pb.excitation_curve is not None:
self.pb.c[0] = self.pb.ti.timecurves(self.pb.excitation_curve)(t-t_off)
# activation curves
self.pb.evaluate_activation(t-t_off)
# solve
self.solnln.newton(self.pb.s, t-t_off)
# get midpoint dof values for post-processing (has to be called before update!)
self.pb.cardvasc0D.midpoint_avg(self.pb.s, self.pb.s_old, self.pb.s_mid, self.pb.theta_ost), self.pb.cardvasc0D.midpoint_avg(self.pb.aux, self.pb.aux_old, self.pb.aux_mid, self.pb.theta_ost)
# raw txt file output of 0D model quantities
if self.pb.write_results_every_0D > 0 and N % self.pb.write_results_every_0D == 0:
self.pb.cardvasc0D.write_output(self.pb.output_path_0D, t, self.pb.s_mid, self.pb.aux_mid, self.pb.simname)
# update timestep
self.pb.cardvasc0D.update(self.pb.s, self.pb.df, self.pb.f, self.pb.s_old, self.pb.df_old, self.pb.f_old, self.pb.aux, self.pb.aux_old)
# print to screen
self.pb.cardvasc0D.print_to_screen(self.pb.s_mid,self.pb.aux_mid)
# solve time for time step
wte = time.time()
wt = wte - wts
# print time step info to screen
self.pb.ti.print_timestep(N, t, self.solnln.sepstring, self.pb.numstep, wt=wt)
# check for periodicity in cardiac cycle and stop if reached (only for syspul* models - cycle counter gets updated here)
is_periodic = self.pb.cardvasc0D.cycle_check(self.pb.s, self.pb.sTc, self.pb.sTc_old, t-t_off, self.pb.ti.cycle, self.pb.ti.cycleerror, self.pb.eps_periodic, check=self.pb.periodic_checktype, inioutpath=self.pb.output_path_0D, nm=self.pb.simname, induce_pert_after_cycl=self.pb.perturb_after_cylce)
# induce some disease/perturbation for cardiac cycle (i.e. valve stenosis or leakage)
if self.pb.perturb_type is not None and not self.pb.have_induced_pert: self.pb.induce_perturbation()
# write 0D restart info - old and new quantities are the same at this stage (except cycle values sTc)
if self.pb.write_restart_every > 0 and N % self.pb.write_restart_every == 0:
self.pb.writerestart(self.pb.simname, N)
if is_periodic:
if self.pb.comm.rank == 0:
print("Periodicity reached after %i heart cycles with cycle error %.4f! Finished. :-)" % (self.pb.ti.cycle[0]-1,self.pb.ti.cycleerror[0]))
sys.stdout.flush()
break
if self.pb.comm.rank == 0: # only proc 0 should print this
print('Time for computation: %.4f s (= %.2f min)' % ( time.time()-start, (time.time()-start)/60. ))
sys.stdout.flush()
| 47.642643 | 310 | 0.645194 |
4a26cf16f44476d5a82c2d4f0ea0f567b46e241d | 13,427 | py | Python | lib/python3.8/site-packages/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/community/postgresql/plugins/modules/postgresql_copy.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_copy
short_description: Copy data between a file/program and a PostgreSQL table
description:
- Copy data between a file/program and a PostgreSQL table.
options:
copy_to:
description:
- Copy the contents of a table to a file.
- Can also copy the results of a SELECT query.
- Mutually exclusive with I(copy_from) and I(dst).
type: path
aliases: [ to ]
copy_from:
description:
- Copy data from a file to a table (appending the data to whatever is in the table already).
- Mutually exclusive with I(copy_to) and I(src).
type: path
aliases: [ from ]
src:
description:
- Copy data from I(copy_from) to I(src=tablename).
- Used with I(copy_to) only.
type: str
aliases: [ source ]
dst:
description:
- Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file).
- Used with I(copy_from) only.
type: str
aliases: [ destination ]
columns:
description:
- List of column names for the src/dst table to COPY FROM/TO.
type: list
elements: str
aliases: [ column ]
program:
description:
- Mark I(src)/I(dst) as a program. Data will be copied to/from a program.
- See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html).
type: bool
default: no
options:
description:
- Options of COPY command.
- See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html).
type: dict
db:
description:
- Name of database to connect to.
type: str
aliases: [ login_db ]
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
trust_input:
description:
- If C(no), check whether values of parameters are potentially dangerous.
- It makes sense to use C(no) only when SQL injections are possible.
type: bool
default: yes
version_added: '0.2.0'
notes:
- Supports PostgreSQL version 9.4+.
- COPY command is only allowed to database superusers.
- if I(check_mode=yes), we just check the src/dst table availability
and return the COPY query that actually has not been executed.
- If i(check_mode=yes) and the source has been passed as SQL, the module
will execute it and rolled the transaction back but pay attention
it can affect database performance (e.g., if SQL collects a lot of data).
seealso:
- name: COPY command reference
description: Complete reference of the COPY command documentation.
link: https://www.postgresql.org/docs/current/sql-copy.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Copy text TAB-separated data from file /tmp/data.txt to acme table
community.postgresql.postgresql_copy:
copy_from: /tmp/data.txt
dst: acme
- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme
community.postgresql.postgresql_copy:
copy_from: /tmp/data.csv
dst: acme
columns: id,name
options:
format: csv
- name: >
Copy text vertical-bar-separated data from file /tmp/data.txt to bar table.
The NULL values are specified as N
community.postgresql.postgresql_copy:
copy_from: /tmp/data.csv
dst: bar
options:
delimiter: '|'
null: 'N'
- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated
community.postgresql.postgresql_copy:
src: acme
copy_to: /tmp/data.txt
- name: Copy data from SELECT query to/tmp/data.csv in CSV format
community.postgresql.postgresql_copy:
src: 'SELECT * FROM acme'
copy_to: /tmp/data.csv
options:
format: csv
- name: Copy CSV data from my_table to gzip
community.postgresql.postgresql_copy:
src: my_table
copy_to: 'gzip > /tmp/data.csv.gz'
program: yes
options:
format: csv
- name: >
Copy data from columns id, name of table bar to /tmp/data.txt.
Output format is text, vertical-bar-separated, NULL as N
community.postgresql.postgresql_copy:
src: bar
columns:
- id
- name
copy_to: /tmp/data.csv
options:
delimiter: '|'
null: 'N'
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: str
sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ]
src:
description: Data source.
returned: always
type: str
sample: "mytable"
dst:
description: Data destination.
returned: always
type: str
sample: "/tmp/data.csv"
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
pg_quote_identifier,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
class PgCopyData(object):
"""Implements behavior of COPY FROM, COPY TO PostgreSQL command.
Arguments:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
Attributes:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
changed (bool) -- something was changed after execution or not
executed_queries (list) -- executed queries
dst (str) -- data destination table (when copy_from)
src (str) -- data source table (when copy_to)
opt_need_quotes (tuple) -- values of these options must be passed
to SQL in quotes
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.executed_queries = []
self.changed = False
self.dst = ''
self.src = ''
self.opt_need_quotes = (
'DELIMITER',
'NULL',
'QUOTE',
'ESCAPE',
'ENCODING',
)
def copy_from(self):
"""Implements COPY FROM command behavior."""
self.src = self.module.params['copy_from']
self.dst = self.module.params['dst']
query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')]
if self.module.params.get('columns'):
query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
query_fragments.append('FROM')
if self.module.params.get('program'):
query_fragments.append('PROGRAM')
query_fragments.append("'%s'" % self.src)
if self.module.params.get('options'):
query_fragments.append(self.__transform_options())
# Note: check mode is implemented here:
if self.module.check_mode:
self.changed = self.__check_table(self.dst)
if self.changed:
self.executed_queries.append(' '.join(query_fragments))
else:
if exec_sql(self, ' '.join(query_fragments), return_bool=True):
self.changed = True
def copy_to(self):
"""Implements COPY TO command behavior."""
self.src = self.module.params['src']
self.dst = self.module.params['copy_to']
if 'SELECT ' in self.src.upper():
# If src is SQL SELECT statement:
query_fragments = ['COPY (%s)' % self.src]
else:
# If src is a table:
query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')]
if self.module.params.get('columns'):
query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
query_fragments.append('TO')
if self.module.params.get('program'):
query_fragments.append('PROGRAM')
query_fragments.append("'%s'" % self.dst)
if self.module.params.get('options'):
query_fragments.append(self.__transform_options())
# Note: check mode is implemented here:
if self.module.check_mode:
self.changed = self.__check_table(self.src)
if self.changed:
self.executed_queries.append(' '.join(query_fragments))
else:
if exec_sql(self, ' '.join(query_fragments), return_bool=True):
self.changed = True
def __transform_options(self):
"""Transform options dict into a suitable string."""
for (key, val) in iteritems(self.module.params['options']):
if key.upper() in self.opt_need_quotes:
self.module.params['options'][key] = "'%s'" % val
opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])]
return '(%s)' % ', '.join(opt)
def __check_table(self, table):
"""Check table or SQL in transaction mode for check_mode.
Return True if it is OK.
Arguments:
table (str) - Table name that needs to be checked.
It can be SQL SELECT statement that was passed
instead of the table name.
"""
if 'SELECT ' in table.upper():
# In this case table is actually SQL SELECT statement.
# If SQL fails, it's handled by exec_sql():
exec_sql(self, table, add_to_executed=False)
# If exec_sql was passed, it means all is OK:
return True
exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'),
add_to_executed=False)
# If SQL was executed successfully:
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
copy_to=dict(type='path', aliases=['to']),
copy_from=dict(type='path', aliases=['from']),
src=dict(type='str', aliases=['source']),
dst=dict(type='str', aliases=['destination']),
columns=dict(type='list', elements='str', aliases=['column']),
options=dict(type='dict'),
program=dict(type='bool', default=False),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
trust_input=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['copy_from', 'copy_to'],
['copy_from', 'src'],
['copy_to', 'dst'],
]
)
if not module.params['trust_input']:
# Check input for potentially dangerous elements:
opt_list = None
if module.params['options']:
opt_list = ['%s %s' % (key, val) for (key, val) in iteritems(module.params['options'])]
check_input(module,
module.params['copy_to'],
module.params['copy_from'],
module.params['src'],
module.params['dst'],
opt_list,
module.params['columns'],
module.params['session_role'])
# Note: we don't need to check mutually exclusive params here, because they are
# checked automatically by AnsibleModule (mutually_exclusive=[] list above).
if module.params.get('copy_from') and not module.params.get('dst'):
module.fail_json(msg='dst param is necessary with copy_from')
elif module.params.get('copy_to') and not module.params.get('src'):
module.fail_json(msg='src param is necessary with copy_to')
# Connect to DB and make cursor object:
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
data = PgCopyData(module, cursor)
# Note: parameters like dst, src, etc. are got
# from module object into data object of PgCopyData class.
# Therefore not need to pass args to the methods below.
# Note: check mode is implemented inside the methods below
# by checking passed module.check_mode arg.
if module.params.get('copy_to'):
data.copy_to()
elif module.params.get('copy_from'):
data.copy_from()
# Finish:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Return some values:
module.exit_json(
changed=data.changed,
queries=data.executed_queries,
src=data.src,
dst=data.dst,
)
if __name__ == '__main__':
main()
| 31.893112 | 110 | 0.639011 |
4a26d0c8a60ccadafa610a32e97a2d93b6bed596 | 2,918 | py | Python | build_word2vec_model_v020.py | espoirMur/balobi_nini | b68b9af4c84ec0f5b38ae8ba52d5f0d32b41ead3 | [
"Unlicense"
] | 1 | 2020-09-30T08:03:10.000Z | 2020-09-30T08:03:10.000Z | build_word2vec_model_v020.py | espoirMur/balobi_nini | b68b9af4c84ec0f5b38ae8ba52d5f0d32b41ead3 | [
"Unlicense"
] | 22 | 2020-09-23T14:05:33.000Z | 2021-12-04T22:40:41.000Z | build_word2vec_model_v020.py | espoirMur/balobi_nini | b68b9af4c84ec0f5b38ae8ba52d5f0d32b41ead3 | [
"Unlicense"
] | 1 | 2021-07-29T10:38:13.000Z | 2021-07-29T10:38:13.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename: my-word-to-vec.py
# Author: #cf
# Version 0.2.0 (2016-10-08)
from pathlib import Path
"""
Function to build word2vec models from text files using gensim.
https://rare-technologies.com/word2vec-tutorial/
"""
##################
# Parameters
##################
WorkDir = Path.cwd()
TextDir = WorkDir.joinpath("data", "frwiki")
ModelFile = WorkDir.joinpath("models", "frwiki.gensim")
Size = 500 # dimensions of the model
##################
# Imports
##################
import os
import re
import gensim
import logging
print("gensim", gensim.__version__)
##################
# Functions
##################
def extract_sentences(TextPath):
"""
Turns a collection of plain text files into a list of lists of word tokens.
"""
print("--extract_sentences")
Sentences = []
for File in os.listdir(TextDir):
with open(File, "r") as InFile:
Text = InFile.read()
Text = re.sub("\n", " ", Text)
Text = re.sub("--", "", Text)
Text = re.sub("\.\.\.", ".", Text)
Text = Text.lower()
SentencesOne = []
Text = re.split("[.!?]", Text)
for Sent in Text:
Sent = re.split("\W", Sent)
Sent = [Token for Token in Sent if Token]
SentencesOne.append(Sent)
Sentences.extend(SentencesOne)
return Sentences
def build_model(TextDir, ModelFile):
"""
Builds a word vector model of the text files given as input.
This should be used for very large collections of text, as it is very memory-friendly.
"""
print("--build_model_new")
class MySentences(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
for Para in open(os.path.join(self.dirname, fname)):
if "<doc id" not in Para and "</doc>" not in Para:
Sentences = re.split("[.!?]", Para)
for Sent in Sentences:
Sent = re.split("\W", Sent)
Sent = [Token.lower() for Token in Sent if Token]
Sent = [Token for Token in Sent if len(Token) > 2]
if len(Sent) > 1:
#print(Sent)
yield Sent
Sentences = MySentences(TextDir) # a memory-friendly iterator
Model = gensim.models.Word2Vec(Sentences, min_count=10, size=Size, workers=2)
Model.save(ModelFile)
################
# Main function
################
def main(TextDir, Size, ModelFile):
print("Launched.")
logging.basicConfig(filename="logging.txt", level=logging.INFO)
build_model(TextDir, ModelFile)
print("Done.")
main(TextDir, Size, ModelFile)
| 27.790476 | 90 | 0.537012 |
4a26d0fcaeaa80b801a429881bc43214b194642e | 2,555 | py | Python | setup.py | MattTunny/SeleniumBase | d85071074137b1d6070ec3d73122a0de966c844b | [
"MIT"
] | null | null | null | setup.py | MattTunny/SeleniumBase | d85071074137b1d6070ec3d73122a0de966c844b | [
"MIT"
] | null | null | null | setup.py | MattTunny/SeleniumBase | d85071074137b1d6070ec3d73122a0de966c844b | [
"MIT"
] | null | null | null | """
The setup package to install SeleniumBase dependencies and plugins
(Uses selenium 3.x and is compatible with Python 2.7+ and Python 3.6+)
"""
from setuptools import setup, find_packages # noqa
setup(
name='seleniumbase',
version='1.12.3',
description='Web Automation & Testing Framework - http://seleniumbase.com',
long_description='Web Automation and Testing Framework - seleniumbase.com',
platforms='Mac * Windows * Linux * Docker',
url='http://seleniumbase.com',
author='Michael Mintz',
author_email='[email protected]',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'pip',
'setuptools',
'ipython==5.6.0',
'selenium==3.11.0',
'nose==1.3.7',
'pytest==3.6.3',
'pytest-html==1.19.0',
'pytest-xdist==1.22.2',
'six==1.11.0',
'flake8==3.5.0',
'requests==2.19.1',
'beautifulsoup4==4.6.0',
'unittest2==1.1.0',
'chardet==3.0.4',
'boto==2.48.0',
'ipdb==0.11',
'PyVirtualDisplay==0.2.1',
],
packages=['seleniumbase',
'seleniumbase.common',
'seleniumbase.config',
'seleniumbase.core',
'seleniumbase.fixtures',
'seleniumbase.masterqa',
'seleniumbase.plugins',
'console_scripts',
'integrations',
'integrations.selenium_grid',
'integrations.selenium_ide',
],
entry_points={
'console_scripts': [
'seleniumbase = console_scripts.run:main',
],
'nose.plugins': [
'base_plugin = seleniumbase.plugins.base_plugin:Base',
'selenium = seleniumbase.plugins.selenium_plugin:SeleniumBrowser',
'page_source = seleniumbase.plugins.page_source:PageSource',
'screen_shots = seleniumbase.plugins.screen_shots:ScreenShots',
'test_info = seleniumbase.plugins.basic_test_info:BasicTestInfo',
('db_reporting = '
'seleniumbase.plugins.db_reporting_plugin:DBReporting'),
's3_logging = seleniumbase.plugins.s3_logging_plugin:S3Logging',
('hipchat_reporting = seleniumbase.plugins'
'.hipchat_reporting_plugin:HipchatReporting'),
],
'pytest11': ['seleniumbase = seleniumbase.plugins.pytest_plugin']
}
)
# print(os.system("cat seleniumbase.egg-info/PKG-INFO"))
print("\n*** SeleniumBase Installation Complete! ***\n")
| 35.486111 | 79 | 0.590607 |
4a26d13b953bbc6600e94ed9330b38d1a84174ab | 4,612 | py | Python | PythonAPI/carissma_project/lib/python3.5/site-packages/mpl_toolkits/mplot3d/proj3d.py | AbdulHoffmann/carla_carissma | 8d382769ffa02a6c61a22c57160285505f5ff0a4 | [
"MIT"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | venv/lib/python3.7/site-packages/mpl_toolkits/mplot3d/proj3d.py | John1001Song/Big-Data-Robo-Adviser | 9444dce96954c546333d5aecc92a06c3bfd19aa5 | [
"MIT"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | venv/lib/python3.7/site-packages/mpl_toolkits/mplot3d/proj3d.py | John1001Song/Big-Data-Robo-Adviser | 9444dce96954c546333d5aecc92a06c3bfd19aa5 | [
"MIT"
] | 64 | 2018-04-25T08:51:57.000Z | 2022-01-29T14:13:57.000Z | # 3dproj.py
#
"""
Various transforms used for by the 3D code
"""
import numpy as np
import numpy.linalg as linalg
def line2d(p0, p1):
"""
Return 2D equation of line in the form ax+by+c = 0
"""
# x + x1 = 0
x0, y0 = p0[:2]
x1, y1 = p1[:2]
#
if x0 == x1:
a = -1
b = 0
c = x1
elif y0 == y1:
a = 0
b = 1
c = -y1
else:
a = (y0-y1)
b = (x0-x1)
c = (x0*y1 - x1*y0)
return a, b, c
def line2d_dist(l, p):
"""
Distance from line to point
line is a tuple of coefficients a,b,c
"""
a, b, c = l
x0, y0 = p
return abs((a*x0 + b*y0 + c)/np.sqrt(a**2+b**2))
def line2d_seg_dist(p1, p2, p0):
"""distance(s) from line defined by p1 - p2 to point(s) p0
p0[0] = x(s)
p0[1] = y(s)
intersection point p = p1 + u*(p2-p1)
and intersection point lies within segment if u is between 0 and 1
"""
x21 = p2[0] - p1[0]
y21 = p2[1] - p1[1]
x01 = np.asarray(p0[0]) - p1[0]
y01 = np.asarray(p0[1]) - p1[1]
u = (x01*x21 + y01*y21) / (x21**2 + y21**2)
u = np.clip(u, 0, 1)
d = np.sqrt((x01 - u*x21)**2 + (y01 - u*y21)**2)
return d
def mod(v):
"""3d vector length"""
return np.sqrt(v[0]**2+v[1]**2+v[2]**2)
def world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax):
dx, dy, dz = (xmax-xmin), (ymax-ymin), (zmax-zmin)
return np.array([
[1.0/dx,0,0,-xmin/dx],
[0,1.0/dy,0,-ymin/dy],
[0,0,1.0/dz,-zmin/dz],
[0,0,0,1.0]])
def view_transformation(E, R, V):
n = (E - R)
## new
# n /= mod(n)
# u = np.cross(V,n)
# u /= mod(u)
# v = np.cross(n,u)
# Mr = np.diag([1.]*4)
# Mt = np.diag([1.]*4)
# Mr[:3,:3] = u,v,n
# Mt[:3,-1] = -E
## end new
## old
n = n / mod(n)
u = np.cross(V, n)
u = u / mod(u)
v = np.cross(n, u)
Mr = [[u[0],u[1],u[2],0],
[v[0],v[1],v[2],0],
[n[0],n[1],n[2],0],
[0, 0, 0, 1],
]
#
Mt = [[1, 0, 0, -E[0]],
[0, 1, 0, -E[1]],
[0, 0, 1, -E[2]],
[0, 0, 0, 1]]
## end old
return np.dot(Mr, Mt)
def persp_transformation(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0,-1,0]
])
def ortho_transformation(zfront, zback):
# note: w component in the resulting vector will be (zback-zfront), not 1
a = -(zfront + zback)
b = -(zfront - zback)
return np.array([[2,0,0,0],
[0,2,0,0],
[0,0,-2,0],
[0,0,a,b]
])
def proj_transform_vec(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
return txs, tys, tzs
def proj_transform_vec_clip(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here.
txs, tys, tzs = vecw[0] / w, vecw[1] / w, vecw[2] / w
tis = (0 <= vecw[0]) & (vecw[0] <= 1) & (0 <= vecw[1]) & (vecw[1] <= 1)
if np.any(tis):
tis = vecw[1] < 1
return txs, tys, tzs, tis
def inv_transform(xs, ys, zs, M):
iM = linalg.inv(M)
vec = vec_pad_ones(xs, ys, zs)
vecr = np.dot(iM, vec)
try:
vecr = vecr/vecr[3]
except OverflowError:
pass
return vecr[0], vecr[1], vecr[2]
def vec_pad_ones(xs, ys, zs):
return np.array([xs, ys, zs, np.ones_like(xs)])
def proj_transform(xs, ys, zs, M):
"""
Transform the points by the projection matrix
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec(vec, M)
def proj_transform_clip(xs, ys, zs, M):
"""
Transform the points by the projection matrix
and return the clipping result
returns txs,tys,tzs,tis
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec_clip(vec, M)
transform = proj_transform
def proj_points(points, M):
return np.column_stack(proj_trans_points(points, M))
def proj_trans_points(points, M):
xs, ys, zs = zip(*points)
return proj_transform(xs, ys, zs, M)
def proj_trans_clip_points(points, M):
xs, ys, zs = zip(*points)
return proj_transform_clip(xs, ys, zs, M)
def rot_x(V, alpha):
cosa, sina = np.cos(alpha), np.sin(alpha)
M1 = np.array([[1,0,0,0],
[0,cosa,-sina,0],
[0,sina,cosa,0],
[0,0,0,1]])
return np.dot(M1, V)
| 23.292929 | 77 | 0.48634 |
4a26d24bd9839edbd3218a3722f24aa43333327e | 16,518 | py | Python | hityper/__main__.py | JohnnyPeng18/HiTyper | 1e80e85e204dbaca522a0c06309479e29ce5d94c | [
"Apache-2.0"
] | 2 | 2022-02-05T08:46:09.000Z | 2022-02-06T06:03:16.000Z | hityper/__main__.py | JohnnyPeng18/HiTyper | 1e80e85e204dbaca522a0c06309479e29ce5d94c | [
"Apache-2.0"
] | 2 | 2022-03-24T12:36:45.000Z | 2022-03-30T07:34:18.000Z | hityper/__main__.py | JohnnyPeng18/HiTyper | 1e80e85e204dbaca522a0c06309479e29ce5d94c | [
"Apache-2.0"
] | null | null | null | import argparse
import subprocess
import ast
import os
import json
import traceback
from hityper.tdg_generator import TDGGenerator
from hityper.usertype_finder import UsertypeFinder
from hityper.utils import formatUserTypes, getRecommendations, test_multiplefile
from hityper.config import config
from hityper import logger
from hityper.utils import detectChange, SimModel
import logging
from tqdm import tqdm
logger.name = __name__
def setuplogs(repo):
fh = logging.FileHandler(repo + '/hityper.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s[%(levelname)s][%(filename)s:%(lineno)d] %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
def findusertype(args):
if args.repo:
outputrepo = args.output_directory if args.output_directory else "."
setuplogs(outputrepo)
if args.source:
try:
source = open(args.source, "r", encoding = "utf-8").read()
root = ast.parse(source)
usertypefinder = UsertypeFinder(args.source, args.repo, args.validate)
usertypes, _ = usertypefinder.run(root)
with open(outputrepo + "/" + args.source.replace("/", "_").replace(".py", "_USERTYPES.json"), "w", encoding = "utf-8") as of:
of.write(json.dumps(usertypes, sort_keys=True, indent=4, separators=(',', ': ')))
logger.info("Saved results to {}".format(outputrepo + "/" + args.source.replace("/", "_").replace(".py", "_USERTYPES.json")))
except Exception as e:
traceback.print_exc()
logger.error("Failed to find the user-defined types in file {}, reason: {}".format(args.source, e))
else:
files = bytes.decode(subprocess.check_output(["find", args.repo, "-name", "*.py"])).split("\n")
results = {}
for f in files:
if os.path.isfile(f):
try:
source = open(f, "r", encoding = "utf-8").read()
root = ast.parse(source)
usertypefinder = UsertypeFinder(f, args.repo, args.validate)
usertypes, _ = usertypefinder.run(root)
results[f] = usertypes
except Exception as e:
traceback.print_exc()
logger.error("Failed to find the user-defined types in file {}, reason: {}".format(f, e))
with open(outputrepo + "/" + args.repo.replace("/", "_") + "_USERTYPES.json", "w", encoding = "utf-8") as of:
of.write(json.dumps(results, sort_keys=True, indent=4, separators=(',', ': ')))
logger.info("Saved results to {}".format(outputrepo + "/" + args.repo.replace("/", "_") + "_USERTYPES.json"))
def gentdg(args):
if args.repo:
if not os.path.isdir(args.repo):
logger.error("Cannot find directory {}".format(args.repo))
return
outputrepo = args.output_directory if args.output_directory else "."
setuplogs(outputrepo)
if args.source:
if not os.path.isfile(args.source):
logger.error("Cannot find source file {}".format(args.source))
try:
source = open(args.source, "r", encoding = "utf-8").read()
root = ast.parse(source)
usertypefinder = UsertypeFinder(args.source, args.repo, True)
usertypes, _ = usertypefinder.run(root)
generator = TDGGenerator(args.source, args.optimize, args.location, usertypes, alias = 1 if args.alias_analysis else 0, repo = args.repo if args.call_analysis else None)
global_tg = generator.run(root)
if args.output_format == "json":
with open(outputrepo + "/" + args.source.replace("/", "_").replace(".py", "_TDG.json"), "w", encoding = "utf-8") as of:
of.write(json.dumps(global_tg.dump(), sort_keys=True, indent=4, separators=(',', ': ')))
logger.info("Saved TDGs to {}".format(outputrepo + "/" + args.source.replace("/", "_").replace(".py", "_TDG.json")))
else:
for tg in global_tg.tgs:
tg.draw(filerepo = outputrepo)
global_tg.draw(filerepo = outputrepo)
logger.info("Saved TDGs to {}".format(outputrepo))
except Exception as e:
traceback.print_exc()
logger.error("Failed to generate TDG for file {}, reason: {}".format(args.source, e))
if not args.source:
files = bytes.decode(subprocess.check_output(["find", args.repo, "-name", "*.py"])).split("\n")
for f in tqdm(files):
if os.path.isfile(f):
try:
source = open(f, "r", encoding = "utf-8").read()
root = ast.parse(source)
usertypefinder = UsertypeFinder(f, args.repo, True)
usertypes, _ = usertypefinder.run(root)
generator = TDGGenerator(f, args.optimize, args.location, usertypes, alias = 1 if args.alias_analysis else 0, repo = args.repo if args.call_analysis else None)
global_tg = generator.run(root)
except Exception as e:
traceback.print_exc()
logger.error("Failed to generate TDG for file {}, reason: {}".format(f, e))
if args.output_format == "json":
with open(outputrepo + "/" + f.replace("/", "_").replace(".py", "_TDG.json"), "w", encoding = "utf-8") as of:
of.write(json.dumps(global_tg.dump(), sort_keys=True, indent=4, separators=(',', ': ')))
logger.info("Saved TDGs to {}".format(outputrepo + "/" + f.replace("/", "_").replace(".py", "_TDG.json")))
else:
for tg in global_tg.tgs:
tg.draw(filerepo = outputrepo)
global_tg.draw(filerepo = outputrepo)
logger.info("Saved TDGs to {}".format(outputrepo))
def infertypes(args):
if args.repo:
if not os.path.isdir(args.repo):
logger.error("Cannot find directory {}".format(args.repo))
return
outputrepo = args.output_directory if args.output_directory else "."
setuplogs(outputrepo)
if args.recommendations and os.path.isfile(args.recommendations):
with open(args.recommendations, "r", encoding = "utf-8") as mf:
recommendations = json.loads(mf.read())
else:
recommendations = None
if config["simmodel"] != None:
simmodel = SimModel(config[config["simmodel"]], config["tokenizer"])
else:
simmodel = None
if args.source:
if not os.path.isfile(args.source):
logger.error("Cannot find source file {}".format(args.source))
try:
source = open(args.source, "r", encoding = "utf-8").read()
root = ast.parse(source)
usertypefinder = UsertypeFinder(args.source, args.repo, True)
usertypes, _ = usertypefinder.run(root)
generator = TDGGenerator(args.source, True, args.location, usertypes, alias = 0, repo = None)
global_tg = generator.run(root)
str_results = {}
global_tg.passTypes(debug = False)
str_results["global@global"] = global_tg.dumptypes()
if recommendations == None and args.type4py:
recommendations = getRecommendations(source)
elif isinstance(recommendations, dict) and f in recommendations:
recommendations = recommendations[f]
for tg in global_tg.tgs:
if recommendations != None:
changed = True
iters = 0
while changed and iters < config["max_recommendation_iteration"]:
iters += 1
tg.passTypes(debug = False)
types = tg.findHotTypes()
tg.recommendType(types, recommendations, formatUserTypes(usertypes), usertypes["module"], args.topn, simmodel = simmodel)
tg.passTypes(debug = False)
new_types = tg.findHotTypes()
changed = detectChange(types, new_types)
tg.simplifyTypes()
else:
tg.passTypes(debug = False)
tg.simplifyTypes()
str_results[tg.name] = tg.dumptypes()
with open(outputrepo + "/" + args.source.replace("/", "_").replace(".py", "_INFERREDTYPES.json"), "w", encoding = "utf-8") as of:
of.write(json.dumps(str_results, sort_keys=True, indent=4, separators=(',', ': ')))
logger.info("Saved results to {}".format(outputrepo + "/" + args.source.replace("/", "_").replace(".py", "_INFERREDTYPES.json")))
except Exception as e:
traceback.print_exc()
logger.error("Type inference failed for file {}, reason: {}".format(args.source, e))
if not args.source:
files = bytes.decode(subprocess.check_output(["find", args.repo, "-name", "*.py"])).split("\n")
results = {}
for f in tqdm(files):
if os.path.isfile(f):
try:
source = open(f, "r", encoding = "utf-8").read()
root = ast.parse(source)
usertypefinder = UsertypeFinder(f, args.repo, True)
usertypes, _ = usertypefinder.run(root)
generator = TDGGenerator(f, True, args.location, usertypes, alias = 0, repo = None)
global_tg = generator.run(root)
str_results = {}
global_tg.passTypes(debug = False)
str_results["global@global"] = global_tg.dumptypes()
if recommendations == None and args.type4py:
recommendations = getRecommendations(source)
elif isinstance(recommendations, dict) and f in recommendations:
recommendations = recommendations[f]
for tg in global_tg.tgs:
if recommendations != None:
changed = True
iters = 0
while changed and iters < config["max_recommendation_iteration"]:
iters += 1
tg.passTypes(debug = False)
types = tg.findHotTypes()
tg.recommendType(types, recommendations, formatUserTypes(usertypes), usertypes["module"], args.topn, simmodel = simmodel)
tg.passTypes(debug = False)
new_types = tg.findHotTypes()
changed = detectChange(types, new_types)
tg.simplifyTypes()
else:
tg.passTypes(debug = False)
tg.simplifyTypes()
str_results[tg.name] = tg.dumptypes()
results[f] = str_results
except Exception as e:
traceback.print_exc()
logger.error("Type inference failed for file {}, reason: {}".format(f, e))
with open(outputrepo + "/" + args.repo.replace("/", "_") + "_INFERREDTYPES.json", "w", encoding = "utf-8") as of:
of.write(json.dumps(results, sort_keys=True, indent=4, separators=(',', ': ')))
logger.info("Saved results to {}".format(outputrepo + "/" + args.repo.replace("/", "_") + "_INFERREDTYPES.json"))
def evaluate(args):
setuplogs(".")
test_multiplefile(args.groundtruth, args.classified_groundtruth, args.usertype, recfile = args.recommendations if args.recommendations else None, recmodel = args.type4py, topn = args.topn)
def main():
arg_parser = argparse.ArgumentParser()
sub_parsers = arg_parser.add_subparsers(dest='cmd')
usertype_parser = sub_parsers.add_parser('findusertype')
usertype_parser.add_argument('-s', '--source', required = False, type=str, help = "Path to a Python source file")
usertype_parser.add_argument('-p', '--repo', required = True, type=str, help = "Path to a Python project")
usertype_parser.add_argument("-v", "--validate", default = True, action="store_true", help = "Validate the imported user-defined types by finding their implementations")
usertype_parser.add_argument('-d', "--output_directory", required = False, type=str, help = "Path to the store the usertypes")
usertype_parser.set_defaults(func = findusertype)
tdg_parser = sub_parsers.add_parser('gentdg')
tdg_parser.add_argument('-s', '--source', required = False, type=str, help = "Path to a Python source file")
tdg_parser.add_argument('-p', '--repo', required = True, type=str, help = "Path to a Python project")
tdg_parser.add_argument('-o', '--optimize', default = False, action="store_true", help = "Remove redundant nodes in TDG")
tdg_parser.add_argument('-l', '--location', required = False, type=str, help = "Generate TDG for a specific function")
tdg_parser.add_argument('-a', '--alias_analysis', default = False, action="store_true", help = "Generate alias graphs along with TDG")
tdg_parser.add_argument('-c', '--call_analysis', default = False, action="store_true", help = "Generate call graphs along with TDG")
tdg_parser.add_argument('-d', "--output_directory", required = False, type=str, help = "Path to the generated TDGs")
tdg_parser.add_argument('-f', "--output_format", default = "json", choices=["json", "pdf"], type=str, help = "Formats of output TDGs")
tdg_parser.set_defaults(func = gentdg)
inference_parser = sub_parsers.add_parser('infer')
inference_parser.add_argument('-s', '--source', required = False, type=str, help = "Path to a Python source file")
inference_parser.add_argument('-p', '--repo', required = True, type=str, help = "Path to a Python project")
inference_parser.add_argument('-l', '--location', required = False, type=str, help = "Type inference for a specific function")
inference_parser.add_argument('-d', "--output_directory", required = False, type=str, help = "Path to the generated TDGs")
inference_parser.add_argument('-m', "--recommendations", required = False, type=str, help = "Path to the recommendations generated by a DL model")
inference_parser.add_argument('-t', "--type4py", default = False, action="store_true", help = "Use Type4Py as the recommendation model")
inference_parser.add_argument('-n', "--topn", default = 1, type = int, help = "Indicate the top n predictions from DL models used by HiTyper")
inference_parser.set_defaults(func = infertypes)
eval_parser = sub_parsers.add_parser('eval')
eval_parser.add_argument('-g', '--groundtruth', required = True, type=str, help = "Path to a ground truth dataset")
eval_parser.add_argument('-c', '--classified_groundtruth', required = True, type=str, help = "Path to a classified ground truth dataset")
eval_parser.add_argument('-u', '--usertype', required = True, type=str, help = "Path to a previously collected user-defined type set")
eval_parser.add_argument('-m', "--recommendations", required = False, type=str, help = "Path to the recommendations generated by a DL model")
eval_parser.add_argument('-t', "--type4py", default = False, action="store_true", help = "Use Type4Py as the recommendation model")
eval_parser.add_argument('-n', "--topn", default = 1, type = int, help = "Indicate the top n predictions from DL models used by HiTyper")
eval_parser.set_defaults(func = evaluate)
args = arg_parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| 58.367491 | 192 | 0.568047 |
4a26d27f52a034694e96532fa1c97b8c9fe862a1 | 16,442 | py | Python | nikola/plugins/compile/rest/__init__.py | asmeurer/nikola | ea1c651bfed0fd6337f1d22cf8dd99899722912c | [
"MIT"
] | null | null | null | nikola/plugins/compile/rest/__init__.py | asmeurer/nikola | ea1c651bfed0fd6337f1d22cf8dd99899722912c | [
"MIT"
] | null | null | null | nikola/plugins/compile/rest/__init__.py | asmeurer/nikola | ea1c651bfed0fd6337f1d22cf8dd99899722912c | [
"MIT"
] | 1 | 2021-07-07T11:32:42.000Z | 2021-07-07T11:32:42.000Z | # -*- coding: utf-8 -*-
# Copyright © 2012-2020 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""reStructuredText compiler for Nikola."""
import io
import logging
import os
import docutils.core
import docutils.nodes
import docutils.transforms
import docutils.utils
import docutils.io
import docutils.readers.standalone
import docutils.writers.html5_polyglot
import docutils.parsers.rst.directives
from docutils.parsers.rst import roles
from nikola.nikola import LEGAL_VALUES
from nikola.metadata_extractors import MetaCondition
from nikola.plugin_categories import PageCompiler
from nikola.utils import (
makedirs,
write_metadata,
LocaleBorg,
map_metadata
)
class CompileRest(PageCompiler):
"""Compile reStructuredText into HTML."""
name = "rest"
friendly_name = "reStructuredText"
demote_headers = True
logger = None
supports_metadata = True
metadata_conditions = [(MetaCondition.config_bool, "USE_REST_DOCINFO_METADATA")]
def read_metadata(self, post, lang=None):
"""Read the metadata from a post, and return a metadata dict."""
if lang is None:
lang = LocaleBorg().current_lang
source_path = post.translated_source_path(lang)
# Silence reST errors, some of which are due to a different
# environment. Real issues will be reported while compiling.
null_logger = logging.getLogger('NULL')
null_logger.setLevel(1000)
with io.open(source_path, 'r', encoding='utf-8') as inf:
data = inf.read()
_, _, _, document = rst2html(data, logger=null_logger, source_path=source_path, transforms=self.site.rst_transforms)
meta = {}
if 'title' in document:
meta['title'] = document['title']
for docinfo in document.traverse(docutils.nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
value = body_elem.astext()
elif element.tagname == 'authors': # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
meta[name] = value
# Put 'authors' meta field contents in 'author', too
if 'authors' in meta and 'author' not in meta:
meta['author'] = '; '.join(meta['authors'])
# Map metadata from other platforms to names Nikola expects (Issue #2817)
map_metadata(meta, 'rest_docinfo', self.site.config)
return meta
def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None):
"""Compile reST into HTML strings."""
# If errors occur, this will be added to the line number reported by
# docutils so the line number matches the actual line number (off by
# 7 with default metadata, could be more or less depending on the post).
add_ln = 0
if not is_two_file:
m_data, data = self.split_metadata(data, post, lang)
add_ln = len(m_data.splitlines()) + 1
default_template_path = os.path.join(os.path.dirname(__file__), 'template.txt')
settings_overrides = {
'initial_header_level': 1,
'record_dependencies': True,
'stylesheet_path': None,
'link_stylesheet': True,
'syntax_highlight': 'short',
# This path is not used by Nikola, but we need something to silence
# warnings about it from reST.
'math_output': 'mathjax /assets/js/mathjax.js',
'template': default_template_path,
'language_code': LEGAL_VALUES['DOCUTILS_LOCALES'].get(LocaleBorg().current_lang, 'en'),
'doctitle_xform': self.site.config.get('USE_REST_DOCINFO_METADATA'),
'file_insertion_enabled': self.site.config.get('REST_FILE_INSERTION_ENABLED'),
}
from nikola import shortcodes as sc
new_data, shortcodes = sc.extract_shortcodes(data)
if self.site.config.get('HIDE_REST_DOCINFO', False):
self.site.rst_transforms.append(RemoveDocinfo)
output, error_level, deps, _ = rst2html(
new_data, settings_overrides=settings_overrides, logger=self.logger, source_path=source_path, l_add_ln=add_ln, transforms=self.site.rst_transforms)
if not isinstance(output, str):
# To prevent some weird bugs here or there.
# Original issue: empty files. `output` became a bytestring.
output = output.decode('utf-8')
output, shortcode_deps = self.site.apply_shortcodes_uuid(output, shortcodes, filename=source_path, extra_context={'post': post})
return output, error_level, deps, shortcode_deps
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
makedirs(os.path.dirname(dest))
error_level = 100
with io.open(dest, "w+", encoding="utf8") as out_file:
with io.open(source, "r", encoding="utf8") as in_file:
data = in_file.read()
output, error_level, deps, shortcode_deps = self.compile_string(data, source, is_two_file, post, lang)
out_file.write(output)
if post is None:
if deps.list:
self.logger.error(
"Cannot save dependencies for post {0} (post unknown)",
source)
else:
post._depfile[dest] += deps.list
post._depfile[dest] += shortcode_deps
if error_level < 3:
return True
else:
return False
def create_post(self, path, **kw):
"""Create a new post."""
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
# is_page is not used by create_post as of now.
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with io.open(path, "w+", encoding="utf8") as fd:
if onefile:
fd.write(write_metadata(metadata, comment_wrap=False, site=self.site, compiler=self))
fd.write(content)
def set_site(self, site):
"""Set Nikola site."""
super().set_site(site)
self.config_dependencies = []
for plugin_info in self.get_compiler_extensions():
self.config_dependencies.append(plugin_info.name)
plugin_info.plugin_object.short_help = plugin_info.description
if not site.debug:
self.logger.level = logging.WARNING
def get_observer(settings):
"""Return an observer for the docutils Reporter."""
def observer(msg):
"""Report docutils/rest messages to a Nikola user.
Error code mapping:
+----------+----------+
| docutils | logging |
+----------+----------+
| DEBUG | DEBUG |
| INFO | INFO |
| WARNING | WARNING |
| ERROR | ERROR |
| SEVERE | CRITICAL |
+----------+----------+
"""
errormap = {
docutils.utils.Reporter.DEBUG_LEVEL: logging.DEBUG,
docutils.utils.Reporter.INFO_LEVEL: logging.INFO,
docutils.utils.Reporter.WARNING_LEVEL: logging.WARNING,
docutils.utils.Reporter.ERROR_LEVEL: logging.ERROR,
docutils.utils.Reporter.SEVERE_LEVEL: logging.CRITICAL
}
text = docutils.nodes.Element.astext(msg)
line = msg['line'] + settings['add_ln'] if 'line' in msg else ''
out = '[{source}{colon}{line}] {text}'.format(
source=settings['source'], colon=(':' if line else ''),
line=line, text=text)
settings['logger'].log(errormap[msg['level']], out)
return observer
class NikolaReader(docutils.readers.standalone.Reader):
"""Nikola-specific docutils reader."""
config_section = 'nikola'
def __init__(self, *args, **kwargs):
"""Initialize the reader."""
self.transforms = kwargs.pop('transforms', [])
self.logging_settings = kwargs.pop('nikola_logging_settings', {})
docutils.readers.standalone.Reader.__init__(self, *args, **kwargs)
def get_transforms(self):
"""Get docutils transforms."""
return docutils.readers.standalone.Reader(self).get_transforms() + self.transforms
def new_document(self):
"""Create and return a new empty document tree (root node)."""
document = docutils.utils.new_document(self.source.source_path, self.settings)
document.reporter.stream = False
document.reporter.attach_observer(get_observer(self.logging_settings))
return document
def shortcode_role(name, rawtext, text, lineno, inliner,
options={}, content=[]):
"""Return a shortcode role that passes through raw inline HTML."""
return [docutils.nodes.raw('', text, format='html')], []
roles.register_canonical_role('raw-html', shortcode_role)
roles.register_canonical_role('html', shortcode_role)
roles.register_canonical_role('sc', shortcode_role)
def add_node(node, visit_function=None, depart_function=None):
"""Register a Docutils node class.
This function is completely optional. It is a same concept as
`Sphinx add_node function <http://sphinx-doc.org/extdev/appapi.html#sphinx.application.Sphinx.add_node>`_.
For example::
class Plugin(RestExtension):
name = "rest_math"
def set_site(self, site):
self.site = site
directives.register_directive('math', MathDirective)
add_node(MathBlock, visit_Math, depart_Math)
return super().set_site(site)
class MathDirective(Directive):
def run(self):
node = MathBlock()
return [node]
class Math(docutils.nodes.Element): pass
def visit_Math(self, node):
self.body.append(self.starttag(node, 'math'))
def depart_Math(self, node):
self.body.append('</math>')
For full example, you can refer to `Microdata plugin <https://plugins.getnikola.com/#microdata>`_
"""
docutils.nodes._add_node_class_names([node.__name__])
if visit_function:
setattr(docutils.writers.html5_polyglot.HTMLTranslator, 'visit_' + node.__name__, visit_function)
if depart_function:
setattr(docutils.writers.html5_polyglot.HTMLTranslator, 'depart_' + node.__name__, depart_function)
# Output <code> for ``double backticks``. (Code and extra logic based on html4css1 translator)
def visit_literal(self, node):
"""Output <code> for double backticks."""
# special case: "code" role
classes = node.get('classes', [])
if 'code' in classes:
# filter 'code' from class arguments
node['classes'] = [cls for cls in classes if cls != 'code']
self.body.append(self.starttag(node, 'code', ''))
return
self.body.append(
self.starttag(node, 'code', '', CLASS='docutils literal'))
text = node.astext()
for token in self.words_and_spaces.findall(text):
if token.strip():
# Protect text like "--an-option" and the regular expression
# ``[+]?(\d+(\.\d*)?|\.\d+)`` from bad line wrapping
if self.in_word_wrap_point.search(token):
self.body.append('<span class="pre">%s</span>'
% self.encode(token))
else:
self.body.append(self.encode(token))
elif token in ('\n', ' '):
# Allow breaks at whitespace:
self.body.append(token)
else:
# Protect runs of multiple spaces; the last space can wrap:
self.body.append(' ' * (len(token) - 1) + ' ')
self.body.append('</code>')
# Content already processed:
raise docutils.nodes.SkipNode
setattr(docutils.writers.html5_polyglot.HTMLTranslator, 'visit_literal', visit_literal)
def rst2html(source, source_path=None, source_class=docutils.io.StringInput,
destination_path=None, reader=None,
parser=None, parser_name='restructuredtext', writer=None,
writer_name='html5_polyglot', settings=None, settings_spec=None,
settings_overrides=None, config_section='nikola',
enable_exit_status=None, logger=None, l_add_ln=0, transforms=None):
"""Set up & run a ``Publisher``, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client. For programmatic use with string I/O.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's how::
publish_parts(..., settings_overrides={'input_encoding': 'unicode'})
For a description of the parameters, see `publish_programmatically`.
WARNING: `reader` should be None (or NikolaReader()) if you want Nikola to report
reStructuredText syntax errors.
"""
if reader is None:
# For our custom logging, we have special needs and special settings we
# specify here.
# logger a logger from Nikola
# source source filename (docutils gets a string)
# add_ln amount of metadata lines (see comment in CompileRest.compile above)
reader = NikolaReader(transforms=transforms,
nikola_logging_settings={
'logger': logger, 'source': source_path,
'add_ln': l_add_ln
})
pub = docutils.core.Publisher(reader, parser, writer, settings=settings,
source_class=source_class,
destination_class=docutils.io.StringOutput)
pub.set_components(None, parser_name, writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, None)
pub.settings._nikola_source_path = source_path
pub.set_destination(None, destination_path)
pub.publish(enable_exit_status=enable_exit_status)
return pub.writer.parts['docinfo'] + pub.writer.parts['fragment'], pub.document.reporter.max_level, pub.settings.record_dependencies, pub.document
# Alignment helpers for extensions
_align_options_base = ('left', 'center', 'right')
def _align_choice(argument):
return docutils.parsers.rst.directives.choice(argument, _align_options_base + ("none", ""))
class RemoveDocinfo(docutils.transforms.Transform):
"""Remove docinfo nodes."""
default_priority = 870
def apply(self):
"""Remove docinfo nodes."""
for node in self.document.traverse(docutils.nodes.docinfo):
node.parent.remove(node)
| 40.69802 | 159 | 0.637392 |
4a26d2a3d733de0106a6f42ca5f91b1e7d57a812 | 17 | py | Python | hdltools/__init__.py | brunosmmm/hdltools | a98ca8c4d168740fa229c939a7b1f31ea73eec24 | [
"MIT"
] | 2 | 2020-02-28T13:02:39.000Z | 2021-06-30T09:15:35.000Z | hdltools/__init__.py | brunosmmm/hdltools | a98ca8c4d168740fa229c939a7b1f31ea73eec24 | [
"MIT"
] | 1 | 2020-03-22T17:32:45.000Z | 2020-03-23T15:43:39.000Z | hdltools/__init__.py | brunosmmm/hdltools | a98ca8c4d168740fa229c939a7b1f31ea73eec24 | [
"MIT"
] | null | null | null | """HDL Tools."""
| 8.5 | 16 | 0.470588 |
4a26d2a5577efd7afb31f4525dcbd8683d74a5c1 | 7,755 | py | Python | caseworker/core/services.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | 1 | 2021-10-16T16:36:58.000Z | 2021-10-16T16:36:58.000Z | caseworker/core/services.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | 45 | 2020-08-11T14:37:46.000Z | 2022-03-29T17:03:02.000Z | caseworker/core/services.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | 3 | 2021-02-01T06:26:19.000Z | 2022-02-21T23:02:46.000Z | from collections import defaultdict
from caseworker.users.services import get_gov_user
from core import client
from core.helpers import convert_value_to_query_param
from caseworker.cases.constants import CaseType, CaseStatusEnum
from lite_forms.components import Option
def get_denial_reasons(request, convert_to_options=False, group=False):
data = client.get(request, "/static/denial-reasons/").json()["denial_reasons"]
if convert_to_options:
options = [Option(denial_reason["id"], denial_reason["id"]) for denial_reason in data]
if group:
return_dict = defaultdict(list)
for item in options:
return_dict[item.key[0]].append(item)
return dict(return_dict)
return options
return data
def get_countries(request, convert_to_options=False, exclude: list = None):
"""
Returns a list of GOV.UK countries and territories
param exclude: Takes a list of country codes and excludes them
"""
data = client.get(request, "/static/countries/?" + convert_value_to_query_param("exclude", exclude))
if convert_to_options:
converted_units = []
for country in data.json().get("countries"):
converted_units.append(Option(country.get("id"), country.get("name")))
return converted_units
return data.json(), data.status_code
# CaseStatuesEnum
def get_statuses(request, convert_to_options=False):
""" Get static list of case statuses. """
data = client.get(request, "/static/statuses/")
if convert_to_options:
return [Option(key=item["id"], value=item["value"]) for item in data.json().get("statuses")]
return data.json()["statuses"], data.status_code
def get_permissible_statuses(request, case):
""" Get a list of case statuses permissible for the user's role. """
user, _ = get_gov_user(request, str(request.session["lite_api_user_id"]))
user_permissible_statuses = user["user"]["role"]["statuses"]
statuses, _ = get_statuses(request)
case_sub_type = case["case_type"]["sub_type"]["key"]
case_type = case["case_type"]["type"]["key"]
if case_type == CaseType.APPLICATION.value:
case_type_applicable_statuses = [
status
for status in statuses
if status["key"]
not in [
CaseStatusEnum.APPLICANT_EDITING,
CaseStatusEnum.CLOSED,
CaseStatusEnum.FINALISED,
CaseStatusEnum.REGISTERED,
CaseStatusEnum.CLC,
CaseStatusEnum.PV,
CaseStatusEnum.SURRENDERED,
]
]
elif case_type == CaseType.QUERY.value:
if case_sub_type == CaseType.END_USER_ADVISORY.value:
case_type_applicable_statuses = [
status for status in statuses if status["key"] in CaseStatusEnum.base_query_statuses()
]
else:
# if the query is not an end user advisory, then check if CLC/PV statuses are required
goods_query_status_keys = CaseStatusEnum.base_query_statuses().copy()
if case.data["clc_responded"] is not None:
goods_query_status_keys.insert(1, CaseStatusEnum.CLC)
if case.data["pv_grading_responded"] is not None:
# add PV status into the correct location
if case.data["clc_responded"] is not None:
goods_query_status_keys.insert(2, CaseStatusEnum.PV)
else:
goods_query_status_keys.insert(1, CaseStatusEnum.PV)
case_type_applicable_statuses = [status for status in statuses if status["key"] in goods_query_status_keys]
elif case_type == CaseType.COMPLIANCE.value:
if case_sub_type == CaseType.COMPLIANCE_SITE.value:
case_type_applicable_statuses = [
status for status in statuses if status["key"] in [CaseStatusEnum.OPEN, CaseStatusEnum.CLOSED,]
]
elif case_sub_type == CaseType.COMPLIANCE_VISIT.value:
case_type_applicable_statuses = [
status
for status in statuses
if status["key"]
in [
CaseStatusEnum.OPEN,
CaseStatusEnum.UNDER_INTERNAL_REVIEW,
CaseStatusEnum.RETURN_TO_INSPECTOR,
CaseStatusEnum.AWAITING_EXPORTER_RESPONSE,
CaseStatusEnum.CLOSED,
]
]
elif case_type == CaseType.REGISTRATION.value:
case_type_applicable_statuses = [
status
for status in statuses
if status["key"]
in [
CaseStatusEnum.REGISTERED,
CaseStatusEnum.UNDER_ECJU_REVIEW,
CaseStatusEnum.REVOKED,
CaseStatusEnum.SUSPENDED,
CaseStatusEnum.SURRENDERED,
CaseStatusEnum.DEREGISTERED,
]
]
return [status for status in case_type_applicable_statuses if status in user_permissible_statuses]
def get_status_properties(request, status):
data = client.get(request, f"/static/statuses/properties/{status}")
return data.json(), data.status_code
# Permissions
def get_user_permissions(request, with_team=False):
user, _ = get_gov_user(request)
if with_team:
return user["user"]["role"]["permissions"], user["user"]["team"]
return user["user"]["role"]["permissions"]
# Control List Entries
def get_control_list_entries(request, convert_to_options=False, include_parent=False, clc_entries_cache=[]): # noqa
"""
Preliminary caching mechanism, requires service restart to repopulate control list entries
"""
if convert_to_options:
if clc_entries_cache:
return clc_entries_cache
else:
data = client.get(request, "/static/control-list-entries/")
for control_list_entry in data.json().get("control_list_entries"):
clc_entries_cache.append(
Option(
key=control_list_entry["rating"],
value=control_list_entry["rating"],
description=control_list_entry["text"],
)
)
return clc_entries_cache
if include_parent:
response = client.get(request, "/static/control-list-entries/?include_parent=True")
else:
response = client.get(request, "/static/control-list-entries/?group=True")
response.raise_for_status()
return response.json().get("control_list_entries")
def get_gov_pv_gradings(request, convert_to_options=False):
pv_gradings = client.get(request, "/static/private-venture-gradings/gov/").json().get("pv_gradings")
if convert_to_options:
converted_units = []
for pv_grading_entry in pv_gradings:
for key in pv_grading_entry:
converted_units.append(Option(key=key, value=pv_grading_entry[key]))
return converted_units
return pv_gradings
def get_pv_gradings(request, convert_to_options=False):
pv_gradings = client.get(request, "/static/private-venture-gradings/").json().get("pv_gradings")
if convert_to_options:
converted_units = []
for pv_grading_entry in pv_gradings:
for key in pv_grading_entry:
converted_units.append(Option(key=key, value=pv_grading_entry[key]))
return converted_units
return pv_gradings
def get_menu_notifications(request):
if not hasattr(request, "cached_get_menu_notifications"):
request.cached_get_menu_notifications = client.get(request, "/gov-users/notifications/")
response = request.cached_get_menu_notifications
return response.json()
| 37.283654 | 119 | 0.648485 |
4a26d2f94bca429c7f22505c371e0b6c3c9fc917 | 1,850 | py | Python | build/lib/skedm/version.py | NickC1/sknla | 4a33011537961d0aa08e6c4245684088a6f8f5c4 | [
"MIT"
] | 21 | 2017-03-04T00:55:57.000Z | 2021-06-20T00:33:06.000Z | build/lib/skedm/version.py | NickC1/sknla | 4a33011537961d0aa08e6c4245684088a6f8f5c4 | [
"MIT"
] | 3 | 2017-03-17T21:35:02.000Z | 2019-01-11T16:59:08.000Z | build/lib/skedm/version.py | NickC1/sknla | 4a33011537961d0aa08e6c4245684088a6f8f5c4 | [
"MIT"
] | 15 | 2017-06-06T08:17:08.000Z | 2021-11-08T23:15:30.000Z | from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 2
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
#_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "skedm: Emperical Dynamic Modeling with a simple api"
# Long description will go up on the pypi page
long_description = """
skedm
========
Scikit Emperical Dynamic Modeling (sknla) can be used as a way to forecast time series,
spatio-temporal 2D arrays, and even discrete spatial arrangements. More
importantly, skedm can provide insight into the underlying dynamics of a system.
"""
NAME = "skedm"
MAINTAINER = "Nick Cortale"
MAINTAINER_EMAIL = "[email protected]"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "https://github.com/NickC1/skedm"
DOWNLOAD_URL = "https://github.com/NickC1/skedm/tarball/0.1"
LICENSE = "MIT"
AUTHOR = "Nick Cortale"
AUTHOR_EMAIL = "[email protected]"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGES = ['skedm']
PACKAGE_DATA = ""
REQUIRES = ["numpy", "scikitlearn"]
| 31.896552 | 87 | 0.712432 |
4a26d3411595c7bd2dfb4d9e5660b668e90fe5a1 | 21,151 | py | Python | examples.py | bvilhjal/mixmogam | 766b889d4f5e97f4c9a960e3a007b125137ba796 | [
"MIT"
] | 15 | 2015-08-02T05:39:06.000Z | 2021-12-22T12:13:21.000Z | examples.py | bvilhjal/mixmogam | 766b889d4f5e97f4c9a960e3a007b125137ba796 | [
"MIT"
] | null | null | null | examples.py | bvilhjal/mixmogam | 766b889d4f5e97f4c9a960e3a007b125137ba796 | [
"MIT"
] | 8 | 2017-02-16T07:35:59.000Z | 2022-02-11T19:56:19.000Z | """
Examples for how to perform GWAS using mixed models, and stepwise mixed models.
Author: Bjarni J. Vilhjalmsson
Email: [email protected]
"""
def load_a_thaliana_genotypes():
"""
Loads A. thaliana genotypes (Horton et al., 2012) and returns a snps_data object
"""
import dataParsers as dp
sd = dp.parse_snp_data('at_data/all_chromosomes_binary.csv')
return sd
def load_a_thaliana_phenotypes():
"""
Loads A. thaliana phenotypes (Atwell et al., 2010) and returns a phenotype_data
object containing 107 different phenotypes.
"""
import phenotypeData as pd
phend = pd.parse_phenotype_file('at_data/199_phenotypes.csv')
return phend
def linear_regression_gwas(phenotype_id=5, pvalue_file='lr_results.pvals',
manhattan_plot_file='lr_manhattan.png',
qq_plot_file_prefix='lr_qq'):
"""
Perform linear regression GWAS for flowering time (phenotype_id=5 in the phenotype file)
in plants grown under 10C conditions.
"""
import linear_models as lm
import gwaResults as gr
# Load genotypes
sd = load_a_thaliana_genotypes()
# Load phenotypes
phend = load_a_thaliana_phenotypes()
# Coordinate phenotype of interest and genotypes. This filters the genotypes and
# phenotypes, leaving only accessions (individuals) which overlap between both,
# and SNPs that are polymorphic in the resulting subset.
sd.coordinate_w_phenotype_data(phend, phenotype_id)
# Perform linear regression GWAS
lr_results = lm.linear_model(sd.get_snps(), phend.get_values(phenotype_id))
# Construct a results object
res = gr.Result(scores=lr_results['ps'], snps_data=sd)
# Save p-values to file
res.write_to_file(pvalue_file)
# Plot Manhattan plot
res.plot_manhattan(png_file=manhattan_plot_file, percentile=90, plot_bonferroni=True,
neg_log_transform=True)
# Plot a QQ-plot
res.plot_qq(qq_plot_file_prefix)
def mixed_model_gwas(phenotype_id=5, pvalue_file='mm_results.pvals',
manhattan_plot_file='mm_manhattan.png',
qq_plot_file_prefix='mm_qq'):
"""
Perform mixed model (EMMAX) GWAS for flowering time (phenotype_id=5 in the phenotype file)
in plants grown under 10C conditions.
"""
import linear_models as lm
import kinship
import gwaResults as gr
# Load genotypes
sd = load_a_thaliana_genotypes()
# Load phenotypes
phend = load_a_thaliana_phenotypes()
# Coordinate phenotype of interest and genotypes. This filters the genotypes and
# phenotypes, leaving only accessions (individuals) which overlap between both,
# and SNPs that are polymorphic in the resulting subset.
sd.coordinate_w_phenotype_data(phend, phenotype_id)
# Calculate kinship (IBS)
K = kinship.calc_ibs_kinship(sd.get_snps())
# Perform mixed model GWAS
mm_results = lm.emmax(sd.get_snps(), phend.get_values(phenotype_id), K)
# Construct a results object
res = gr.Result(scores=mm_results['ps'], snps_data=sd)
# Save p-values to file
res.write_to_file(pvalue_file)
# Plot Manhattan plot
res.plot_manhattan(png_file=manhattan_plot_file, percentile=90, plot_bonferroni=True,
neg_log_transform=True)
# Plot a QQ-plot
res.plot_qq(qq_plot_file_prefix)
def multiple_loci_mixed_model_gwas(phenotype_id=5, pvalue_file_prefix='mlmm_results',
result_files_prefix='mlmm_manhattan', max_num_steps=10, snp_priors=None):
"""
Perform multiple loci mixed model GWAS for flowering time (phenotype_id=5 in the phenotype file)
in plants grown under 10C conditions.
"""
import linear_models as lm
import kinship
# Load genotypes
sd = load_a_thaliana_genotypes()
# Load phenotypes
phend = load_a_thaliana_phenotypes()
# Coordinate phenotype of interest and genotypes. This filters the genotypes and
# phenotypes, leaving only accessions (individuals) which overlap between both,
# and SNPs that are polymorphic in the resulting subset.
sd.coordinate_w_phenotype_data(phend, phenotype_id)
# Calculate kinship (IBS)
K = kinship.calc_ibs_kinship(sd.get_snps())
# Perform multiple loci mixed model GWAS
mlmm_results = lm.mlmm(phend.get_values(phenotype_id), K, sd=sd,
num_steps=max_num_steps, file_prefix=result_files_prefix,
save_pvals=True, pval_file_prefix=result_files_prefix, snp_priors=snp_priors)
def perform_cegs_gwas(kinship_type='ibd', phen_type='medians'):
"""
Perform a simple MLM GWAS for the 8 traits
"""
import hdf5_data
import kinship
import linear_models as lm
import time
import scipy as sp
from matplotlib import pyplot as plt
import analyze_gwas_results as agr
phen_dict = hdf5_data.parse_cegs_drosophila_phenotypes()
phenotypes = ['Protein', 'Sugar', 'Triglyceride', 'weight']
envs = ['mated', 'virgin']
for phenotype in phenotypes:
for env in envs:
print phenotype, env
s1 = time.time()
d = hdf5_data.coordinate_cegs_genotype_phenotype(
phen_dict, phenotype, env)
print 'Calculating kinship'
if kinship_type == 'ibs':
K = kinship.calc_ibs_kinship(d['snps'])
elif kinship_type == 'ibd':
K = kinship.calc_ibd_kinship(d['snps'])
else:
raise NotImplementedError
if phen_type == 'means':
lmm = lm.LinearMixedModel(d['Y_means'])
elif phen_type == 'medians':
lmm = lm.LinearMixedModel(d['Y_medians'])
else:
raise NotImplementedError
lmm.add_random_effect(K)
print "Running EMMAX"
res = lmm.emmax_f_test(d['snps'], emma_num=1000)
print 'Mean p-value:', sp.mean(res['ps'])
secs = time.time() - s1
if secs > 60:
mins = int(secs) / 60
secs = secs - mins * 60
print 'Took %d mins and %f seconds.' % (mins, secs)
else:
print 'Took %f seconds.' % (secs)
# Now generating QQ-plots
label_str = '%s_%s_%s_%s' % (
kinship_type, phenotype, env, phen_type)
agr.plot_simple_qqplots_pvals('/Users/bjarnivilhjalmsson/data/tmp/cegs_qq_%s' % (label_str),
[res['ps']], result_labels=[
label_str], line_colors=['green'],
num_dots=1000, title=None, max_neg_log_val=6)
# Perform multiple loci mixed model GWAS
chromosomes = d['positions'][:, 0]
positions = sp.array(d['positions'][:, 1], 'int32')
x_positions = []
y_log_pvals = []
colors = []
x_shift = 0
for i, chrom in enumerate(sp.unique(chromosomes)):
if chrom in ['2L', '2LHet', '3L', '3LHet', '4', 'X', 'XHet']:
colors.append('c')
else: # chrom in ['2R', '2RHet', '3R', '3RHet', 'U', 'Uextra']
# Toss U and Hets
colors.append('m')
chrom_filter = sp.in1d(chromosomes, chrom)
positions_slice = positions[chrom_filter]
x_positions.append(positions_slice + x_shift)
x_shift += positions_slice.max()
log_ps_slice = -sp.log10(res['ps'][chrom_filter])
y_log_pvals.append(log_ps_slice)
m = len(positions)
log_bonf = -sp.log10(1 / (20.0 * m))
print m, log_bonf
# Plot manhattan plots?
plt.figure(figsize=(12, 4))
plt.axes([0.03, 0.1, 0.95, 0.8])
for i, chrom in enumerate(sp.unique(chromosomes)):
plt.plot(x_positions[i], y_log_pvals[i],
c=colors[i], ls='', marker='.')
xmin, xmax = plt.xlim()
plt.hlines(log_bonf, xmin, xmax, colors='k',
linestyles='--', alpha=0.5)
plt.title('%s, %s' % (phenotype, env))
plt.savefig('/Users/bjarnivilhjalmsson/data/tmp/cegs_gwas_%s_%s_%s_%s.png' %
(kinship_type, phenotype, env, phen_type))
def leave_k_out_blup(num_repeats=20, num_cvs=5, genotype_file='/Users/bjarnivilhjalmsson/data/cegs_lehmann/', k_thres=0.5):
"""
"""
import h5py
import hdf5_data
import kinship
import linear_models as lm
import time
import scipy as sp
from matplotlib import pyplot as plt
import analyze_gwas_results as agr
phen_dict = hdf5_data.parse_cegs_drosophila_phenotypes()
phenotypes = ['Protein', 'Sugar', 'Triglyceride', 'weight']
envs = ['mated', 'virgin']
rep_dict = {}
for rep_i in range(num_repeats):
res_dict = {}
for phenotype in phenotypes:
env_dict = {}
for env in envs:
print phenotype, env
s1 = time.time()
# Load data..
d = hdf5_data.coordinate_cegs_genotype_phenotype(
phen_dict, phenotype, env, k_thres=k_thres)
Y_means = d['Y_means']
snps = d['snps']
assert sp.all(sp.negative(sp.isnan(snps))), 'WTF?'
K = kinship.calc_ibd_kinship(snps)
print '\nKinship calculated'
assert sp.all(sp.negative(sp.isnan(K))), 'WTF?'
n = len(Y_means)
# partition genotypes in k parts.
gt_ids = d['gt_ids']
num_ids = len(gt_ids)
chunk_size = num_ids / num_cvs
# Create k CV sets of prediction and validation data
cv_chunk_size = int((n / num_cvs) + 1)
ordering = sp.random.permutation(n)
a = sp.arange(n)
osb_ys = []
pred_ys = []
p_herits = []
for cv_i, i in enumerate(range(0, n, cv_chunk_size)):
cv_str = 'cv_%d' % cv_i
# print 'Working on CV %d' % cv_i
end_i = min(n, i + cv_chunk_size)
validation_filter = sp.in1d(a, ordering[i:end_i])
training_filter = sp.negative(validation_filter)
train_snps = snps[:, training_filter]
val_snps = snps[:, validation_filter]
train_Y = Y_means[training_filter]
val_Y = Y_means[validation_filter]
#Calc. kinship
K_train = K[training_filter, :][:, training_filter]
K_cross = K[validation_filter, :][:, training_filter]
# Do gBLUP
lmm = lm.LinearMixedModel(train_Y)
lmm.add_random_effect(K_train)
r1 = lmm.get_REML()
# Now the BLUP.
y_mean = sp.mean(lmm.Y)
Y = lmm.Y - y_mean
p_herit = r1['pseudo_heritability']
p_herits.append(p_herit)
#delta = (1 - p_herit) / p_herit
# if K_inverse == None:
# K_inverse = K.I
# M = (sp.eye(K.shape[0]) + delta * K_inverse)
# u_blup = M.I * Y
M = sp.mat(p_herit * sp.mat(K_train) +
(1 - p_herit) * sp.eye(K_train.shape[0]))
u_mean_pred = sp.array(K_cross * (M.I * Y)).flatten()
osb_ys.extend(val_Y)
pred_ys.extend(u_mean_pred)
corr = sp.corrcoef(osb_ys, pred_ys)[1, 0]
print 'Correlation:', corr
r2 = corr**2
print 'R2:', r2
mean_herit = sp.mean(p_herits)
print 'Avg. heritability:', mean_herit
env_dict[env] = {'R2': r2, 'obs_y': osb_ys,
'pred_y': pred_ys, 'corr': corr, 'avg_herit': mean_herit}
res_dict[phenotype] = env_dict
rep_dict[rep_i] = res_dict
res_hdf5_file = '/Users/bjarnivilhjalmsson/data/tmp/leave_%d_BLUP_results_kthres_%0.1f.hdf5' % (
num_cvs, k_thres)
h5f = h5py.File(res_hdf5_file)
for rep_i in range(num_repeats):
res_dict = rep_dict[rep_i]
rep_g = h5f.create_group('repl_%d' % rep_i)
for phenotype in phenotypes:
phen_g = rep_g.create_group(phenotype)
for env in envs:
d = res_dict[phenotype][env]
env_g = phen_g.create_group(env)
env_g.create_dataset('R2', data=[d['R2']])
env_g.create_dataset('corr', data=[d['corr']])
env_g.create_dataset('obs_y', data=d['obs_y'])
env_g.create_dataset('pred_y', data=d['pred_y'])
env_g.create_dataset('avg_herit', data=[d['avg_herit']])
h5f.close()
def _test_GxE_mixed_model_gwas(num_indivs=1000, num_snps=10000, num_trait_pairs=10,
plot_prefix='/Users/bjarnivilhjalmsson/tmp/test'):
"""
Test for the multiple environment mixed model
Simulates correlated trait pairs with exponentially distributed effects.
"""
import simulations
import kinship
import scipy as sp
import linear_models as lm
import gwaResults as gr
num_trait_pairs = 10
num_indivs = 200
num_snps = 10000
# Number of causal SNPs per trait (in total there may be up to twice that,
# depending on genetic correlation)
num_causals = 10
# Simulating (unlinked) genotypes and phenotype pairs w. random positive
# correlation
d = simulations.get_simulated_data(num_indivs=num_indivs, num_snps=num_snps,
num_trait_pairs=num_trait_pairs, num_causals=num_causals)
for i in range(num_trait_pairs):
# The two different phenotypes.
phen1 = d['trait_pairs'][i][0]
phen2 = d['trait_pairs'][i][1]
# Stacking up the two phenotypes into one vector.
Y = sp.hstack([phen1, phen2])
# The higher genetic correlation, the better the model fit (since we
# assume genetic correlation is 1).
print 'The genetic correlation between the two traits is %0.4f' % d['rho_est_list'][i][0, 1]
# The genotypes
sd = d['sd']
snps = sd.get_snps()
# Doubling the genotype data as well.
snps = sp.hstack([snps, snps])
# Calculating the kinship using the duplicated genotypes
K = kinship.calc_ibd_kinship(snps)
print ''
# Calculating the environment vector
E = sp.zeros((2 * num_indivs, 1))
E[num_indivs:, 0] = 1
print 'Here are the dimensions:'
print 'Y.shape: ', Y.shape
print 'snps.shape: ', snps.shape
print 'E.shape: ', E.shape
print 'K.shape: ', K.shape
mm_results = lm.emmax_w_two_env(snps, Y, K, E)
gtres = mm_results["gt_res"]
gtgres = mm_results["gt_g_res"]
gres = mm_results["g_res"]
# Figuring out which loci are causal
highlight_loci = sp.array(sd.get_chr_pos_list())[
d['causal_indices_list'][i]]
highlight_loci = highlight_loci.tolist()
highlight_loci.sort()
# Plotting stuff
res = gr.Result(scores=gtres['ps'], snps_data=sd)
res.plot_manhattan(png_file='%s_%d_gtres_manhattan.png' % (plot_prefix, i),
percentile=50, highlight_loci=highlight_loci,
plot_bonferroni=True,
neg_log_transform=True)
res.plot_qq('%s_%d_gtres_qq.png' % (plot_prefix, i))
res = gr.Result(scores=gtgres['ps'], snps_data=sd)
res.plot_manhattan(png_file='%s_%d_gtgres_manhattan.png' % (plot_prefix, i),
percentile=50, highlight_loci=highlight_loci,
plot_bonferroni=True,
neg_log_transform=True)
res.plot_qq('%s_%d_gtgres_qq.png' % (plot_prefix, i))
res = gr.Result(scores=gres['ps'], snps_data=sd)
res.plot_manhattan(png_file='%s_%d_gres_manhattan.png' % (plot_prefix, i),
percentile=50, highlight_loci=highlight_loci,
plot_bonferroni=True,
neg_log_transform=True)
res.plot_qq('%s_%d_gres_qq.png' % (plot_prefix, i))
def lotus_data_analysis(phenotype_id=1,
result_files_prefix='/Users/bjarnivilhjalmsson/Dropbox/Cloud_folder/tmp/lmm_results',
manhattan_plot_file='/Users/bjarnivilhjalmsson/Dropbox/Cloud_folder/tmp/lmm_manhattan.png',
qq_plot_file_prefix='/Users/bjarnivilhjalmsson/Dropbox/Cloud_folder/tmp/lmm_qq'):
"""
Lotus GWAS (data from Stig U Andersen)
"""
import linear_models as lm
import kinship
import gwaResults as gr
import dataParsers as dp
import phenotypeData as pd
# Load genotypes
print 'Parsing genotypes'
sd = dp.parse_snp_data(
'/Users/bjarnivilhjalmsson/Dropbox/Lotus_GWAS/20140603_NonRep.run2.vcf.matrix.ordered.csv')
# Load phenotypes
print 'Parsing phenotypes'
phend = pd.parse_phenotype_file(
'/Users/bjarnivilhjalmsson/Dropbox/Lotus_GWAS/141007_FT_portal_upd.csv')
print 'Box-cox'
phend.box_cox_transform(1)
# Coordinate phenotype of interest and genotypes. This filters the genotypes and
# phenotypes, leaving only accessions (individuals) which overlap between both,
# and SNPs that are polymorphic in the resulting subset.
print 'Coordinating data'
sd.coordinate_w_phenotype_data(phend, phenotype_id)
# Calculate kinship (IBS/IBD)
# print 'Calculating kinship'
# K = kinship.calc_ibd_kinship(sd.get_snps())
# print K
# Perform mixed model GWAS
print 'Performing mixed model GWAS'
# mm_results = lm.emmax(sd.get_snps(), phend.get_values(phenotype_id), K)
# mlmm_results = lm.mlmm(phend.get_values(phenotype_id), K, sd=sd,
# num_steps=10, file_prefix=result_files_prefix,
# save_pvals=True, pval_file_prefix=result_files_prefix)
lg_results = lm.local_vs_global_mm_scan(phend.get_values(phenotype_id), sd,
file_prefix='/Users/bjarnivilhjalmsson/Dropbox/Cloud_folder/tmp/lotus_FT_loc_glob_0.1Mb',
window_size=100000, jump_size=50000, kinship_method='ibd', global_k=None)
# # Construct a results object
print 'Processing results'
# res = gr.Result(scores=mm_results['ps'], snps_data=sd)
# Save p-values to file
# res.write_to_file(pvalue_file)
# Plot Manhattan plot
# res.plot_manhattan(png_file=manhattan_plot_file, percentile=90, plot_bonferroni=True,
# neg_log_transform=True)
# Plot a QQ-plot
# res.plot_qq(qq_plot_file_prefix)
# Local-global scan
def lotus_mixed_model_gwas(phenotype_id=4, phen_file = '/home/bjarni/LotusGenome/cks/Lotus31012019/20181113_136LjAccessionData.csv',
gt_file = '/home/bjarni/LotusGenome/cks/Lotus31012019/all_chromosomes_binary.csv',
pvalue_file='mm_results.pvals', manhattan_plot_file='mm_manhattan.png', qq_plot_file_prefix='mm_qq'):
"""
Perform mixed model (EMMAX) GWAS for Lotus data
"""
import linear_models as lm
import kinship
import gwaResults as gr
import dataParsers as dp
# Load genotypes
sd = dp.parse_snp_data(gt_file)
# Load phenotypes
import phenotypeData as pd
phend = pd.parse_phenotype_file(phen_file, with_db_ids=False)
# Coordinate phenotype of interest and genotypes. This filters the genotypes and
# phenotypes, leaving only accessions (individuals) which overlap between both,
# and SNPs that are polymorphic in the resulting subset.
sd.coordinate_w_phenotype_data(phend, phenotype_id)
# Calculate kinship (IBS)
K = kinship.calc_ibs_kinship(sd.get_snps())
# Perform mixed model GWAS
mm_results = lm.emmax(sd.get_snps(), phend.get_values(phenotype_id), K)
# Construct a results object
res = gr.Result(scores=mm_results['ps'], snps_data=sd)
# Save p-values to file
res.write_to_file(pvalue_file)
# Plot Manhattan plot
res.plot_manhattan(png_file=manhattan_plot_file, percentile=90, plot_bonferroni=True,
neg_log_transform=True)
# Plot a QQ-plot
res.plot_qq(qq_plot_file_prefix)
if __name__ == '__main__':
# lotus_data_analysis()
# _test_GxE_mixed_model_gwas()
lotus_mixed_model_gwas()
# linear_regression_gwas()
# multiple_loci_mixed_model_gwas()
pass
| 38.952118 | 133 | 0.603896 |
4a26d3711dc98b9abb95a6124380fd82743d7f67 | 99 | py | Python | bb_django_library/models.py | openbox/bb-django-library | eb4db4308ecda82125fa1f99fe6aa8cdd9e13eb7 | [
"MIT"
] | null | null | null | bb_django_library/models.py | openbox/bb-django-library | eb4db4308ecda82125fa1f99fe6aa8cdd9e13eb7 | [
"MIT"
] | 4 | 2019-10-02T21:46:28.000Z | 2021-06-10T21:09:12.000Z | bb_django_library/models.py | openbox/bb-django-library | eb4db4308ecda82125fa1f99fe6aa8cdd9e13eb7 | [
"MIT"
] | null | null | null | from django.db import models
class Foo(models.Model):
bar = models.CharField(max_length=255)
| 16.5 | 42 | 0.747475 |
4a26d5a6439a8fed7c57ca5d77fd57f95a4e2733 | 1,709 | py | Python | test/test_styles.py | Inveracity/flynt | b975b6f61893d5db1114d68fbb5d212c4e11aeb8 | [
"MIT"
] | 487 | 2019-06-10T17:44:56.000Z | 2022-03-26T01:28:19.000Z | test/test_styles.py | Inveracity/flynt | b975b6f61893d5db1114d68fbb5d212c4e11aeb8 | [
"MIT"
] | 118 | 2019-07-03T12:26:39.000Z | 2022-03-06T22:40:17.000Z | test/test_styles.py | Inveracity/flynt | b975b6f61893d5db1114d68fbb5d212c4e11aeb8 | [
"MIT"
] | 25 | 2019-07-10T08:39:58.000Z | 2022-03-03T14:44:15.000Z | import random
import pytest
from flynt.format import QuoteTypes, get_quote_type, set_quote_type
from flynt.lexer.split import get_chunks
@pytest.mark.parametrize(
argnames=["code", "quote_type"],
argvalues=[
("'abra'", QuoteTypes.single),
('"bobro"', QuoteTypes.double),
("'''abra'''", QuoteTypes.triple_single),
('"""bobro"""', QuoteTypes.triple_double),
],
)
def test_get_quote_type_token(code, quote_type):
g = get_chunks(code)
next(g)
chunk = next(g)
token = chunk.tokens[0]
assert token.get_quote_type() == quote_type
@pytest.mark.parametrize(
argnames=["code", "quote_type"],
argvalues=[
("'abra'", QuoteTypes.single),
('"bobro"', QuoteTypes.double),
("'''abra'''", QuoteTypes.triple_single),
('"""bobro"""', QuoteTypes.triple_double),
],
)
def test_get_quote_type(code, quote_type):
assert get_quote_type(code) == quote_type
@pytest.mark.parametrize(
argnames="code", argvalues=["'abra'", '"bobro"', "'''abra'''", '"""bobro"""']
)
def test_cycle(code):
assert set_quote_type(code, get_quote_type(code)) == code
@pytest.mark.parametrize(argnames="quote_type", argvalues=QuoteTypes.all)
def test_initial_doesnt_matter(quote_type):
code = random.choice(["'abra'", '"bobro"', "'''abra'''", '"""bobro"""'])
assert get_quote_type(set_quote_type(code, quote_type)) == quote_type
def test_single():
code = '"alpha123"'
expected = "'alpha123'"
assert set_quote_type(code, QuoteTypes.single) == expected
def test_single_from_triple():
code = '"""alpha123"""'
expected = "'alpha123'"
assert set_quote_type(code, QuoteTypes.single) == expected
| 25.893939 | 81 | 0.652428 |
4a26d5ed61d7b4693b755747cbdf583a47372e5d | 80 | py | Python | template/{{ cookiecutter.project_slug }}/{{ cookiecutter.project_module }}/__init__.py | seldonPlan/pyscript | 562335b5f11c2bf1820577d143c973e58cb35f01 | [
"MIT"
] | null | null | null | template/{{ cookiecutter.project_slug }}/{{ cookiecutter.project_module }}/__init__.py | seldonPlan/pyscript | 562335b5f11c2bf1820577d143c973e58cb35f01 | [
"MIT"
] | null | null | null | template/{{ cookiecutter.project_slug }}/{{ cookiecutter.project_module }}/__init__.py | seldonPlan/pyscript | 562335b5f11c2bf1820577d143c973e58cb35f01 | [
"MIT"
] | null | null | null | """{{ cookiecutter.project_name }} project description"""
__version__ = "0.0.1"
| 26.666667 | 57 | 0.7 |
4a26d62b6a1f60e7a9b3963ab4f92a435d8bebdd | 403 | py | Python | register/migrations/0019_auto_20190402_2028.py | LucasHiago/pede_ja | 62609a32d045b167a96be79cc93113d32dcfe917 | [
"MIT"
] | null | null | null | register/migrations/0019_auto_20190402_2028.py | LucasHiago/pede_ja | 62609a32d045b167a96be79cc93113d32dcfe917 | [
"MIT"
] | null | null | null | register/migrations/0019_auto_20190402_2028.py | LucasHiago/pede_ja | 62609a32d045b167a96be79cc93113d32dcfe917 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.4 on 2019-04-02 20:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register', '0018_auto_20190401_1338'),
]
operations = [
migrations.AlterField(
model_name='offlinecompensations',
name='date',
field=models.DateField(auto_now_add=True),
),
]
| 21.210526 | 54 | 0.615385 |
4a26d6d623b3f895bd16ef6cafd11e6c226bd62e | 184 | py | Python | packages/lib/python3.8/site-packages/corsheaders/models.py | sakthiRathinam/cuebook | 7f9b13fe73ed366959fdbee3ee0e60dcf5efbffb | [
"MIT"
] | null | null | null | packages/lib/python3.8/site-packages/corsheaders/models.py | sakthiRathinam/cuebook | 7f9b13fe73ed366959fdbee3ee0e60dcf5efbffb | [
"MIT"
] | 3 | 2019-02-27T11:06:34.000Z | 2021-06-10T21:11:45.000Z | env/lib/python3.6/site-packages/corsheaders/models.py | developeroka/blog_restapi | 3a4dd782c2bb71562aa59919593a3ad984bd7a9b | [
"MIT"
] | null | null | null | from django.db import models
# For signal registration
from .signals import check_request_enabled # noqa
class CorsModel(models.Model):
cors = models.CharField(max_length=255)
| 20.444444 | 50 | 0.782609 |
4a26d7a9d1d0b0ec7f9fb39fe2e48b175e11ec8c | 5,170 | py | Python | utils/image1_gluoncv.py | jundeli/Scaled-YOLOv4-tensorflow2 | dd2ce523258f9a5b851bd6f391a6c07a4999662e | [
"Apache-2.0"
] | 30 | 2021-01-29T13:57:47.000Z | 2022-02-09T13:17:57.000Z | utils/image1_gluoncv.py | jundeli/Scaled-YOLOv4-tensorflow2 | dd2ce523258f9a5b851bd6f391a6c07a4999662e | [
"Apache-2.0"
] | 13 | 2021-04-16T06:30:27.000Z | 2022-03-16T18:42:23.000Z | utils/image1_gluoncv.py | jundeli/Scaled-YOLOv4-tensorflow2 | dd2ce523258f9a5b851bd6f391a6c07a4999662e | [
"Apache-2.0"
] | 16 | 2021-04-28T06:51:58.000Z | 2022-03-23T23:47:52.000Z | """Extended image transformations to `mxnet.image`."""
from __future__ import division
import random
import numpy as np
import cv2
__all__ = ['imresize',
'random_pca_lighting', 'random_expand', 'random_flip']
def imresize(src, w, h, inter=1):
"""Resize image with OpenCV.
This is a duplicate of mxnet.image.imresize for name space consistency.
Parameters
----------
src : mxnet.nd.NDArray
source image
w : int, required
Width of resized image.
h : int, required
Height of resized image.
interp : int, optional, default='1'
Interpolation method (default=cv2.INTER_LINEAR).
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Examples
--------
"""
# oh, ow, _ = src.shape
# return mx.image.imresize(src, w, h, interp=get_interp(interp, (oh, ow, h, w)))
if inter == 0:
inter_type = cv2.INTER_NEAREST
elif inter == 1:
inter_type = cv2.INTER_LINEAR
elif inter == 2:
inter_type = cv2.INTER_LINEAR_EXACT
elif inter == 3:
inter_type = cv2.INTER_AREA
elif inter == 4:
inter_type = cv2.INTER_CUBIC
src = cv2.resize(src, (w,h), interpolation = inter_type)
return src
def random_pca_lighting(src, alphastd, eigval=None, eigvec=None):
"""Apply random pca lighting noise to input image.
Parameters
----------
img : mxnet.nd.NDArray
Input image with HWC format.
alphastd : float
Noise level [0, 1) for image with range [0, 255].
eigval : list of floats.
Eigen values, defaults to [55.46, 4.794, 1.148].
eigvec : nested lists of floats
Eigen vectors with shape (3, 3), defaults to
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]].
Returns
-------
mxnet.nd.NDArray
Augmented image.
"""
if alphastd <= 0:
return src
if eigval is None:
eigval = np.array([55.46, 4.794, 1.148])
if eigvec is None:
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
alpha = np.random.normal(0, alphastd, size=(3,))
rgb = np.dot(eigvec * alpha, eigval)
src += nd.array(rgb, ctx=src.context)
return src
def random_expand(src, max_ratio=4, fill=0, keep_ratio=True):
"""Random expand original image with borders, this is identical to placing
the original image on a larger canvas.
Parameters
----------
src : mxnet.nd.NDArray
The original image with HWC format.
max_ratio : int or float
Maximum ratio of the output image on both direction(vertical and horizontal)
fill : int or float or array-like
The value(s) for padded borders. If `fill` is numerical type, RGB channels
will be padded with single value. Otherwise `fill` must have same length
as image channels, which resulted in padding with per-channel values.
keep_ratio : bool
If `True`, will keep output image the same aspect ratio as input.
Returns
-------
mxnet.nd.NDArray
Augmented image.
tuple
Tuple of (offset_x, offset_y, new_width, new_height)
"""
if max_ratio <= 1:
return src, (0, 0, src.shape[1], src.shape[0])
h, w, c = src.shape
ratio_x = random.uniform(1, max_ratio)
if keep_ratio:
ratio_y = ratio_x
else:
ratio_y = random.uniform(1, max_ratio)
oh, ow = int(h * ratio_y), int(w * ratio_x)
off_y = random.randint(0, oh - h)
off_x = random.randint(0, ow - w)
dst = np.full(shape=(oh, ow, c), fill_value=fill, dtype=src.dtype)
# make canvas
# if isinstance(fill, np.numeric_types):
# dst = np.full(shape=(oh, ow, c), val=fill, dtype=src.dtype)
# else:
# fill = np.array(fill, dtype=src.dtype)
# if not c == fill.size:
# raise ValueError("Channel and fill size mismatch, {} vs {}".format(c, fill.size))
# dst = np.tile(fill.reshape((1, c)), reps=(oh * ow, 1)).reshape((oh, ow, c))
dst[off_y:off_y+h, off_x:off_x+w, :] = src
return dst, (off_x, off_y, ow, oh)
def random_flip(src, px=0, py=0, copy=False):
"""Randomly flip image along horizontal and vertical with probabilities.
Parameters
----------
src : mxnet.nd.NDArray
Input image with HWC format.
px : float
Horizontal flip probability [0, 1].
py : float
Vertical flip probability [0, 1].
copy : bool
If `True`, return a copy of input
Returns
-------
mxnet.nd.NDArray
Augmented image.
tuple
Tuple of (flip_x, flip_y), records of whether flips are applied.
"""
flip_y = np.random.choice([False, True], p=[1-py, py])
flip_x = np.random.choice([False, True], p=[1-px, px])
if flip_y:
src = np.flip(src, axis=0)
if flip_x:
src = np.flip(src, axis=1)
if copy:
src = np.copy()
return src, (flip_x, flip_y)
# | 31.717791 | 95 | 0.595938 |
4a26d91e30978b9d6e68c8810dadaa7060ec32a8 | 47 | py | Python | girvi/__init__.py | rajeshr188/django-onex | f1086a4159b1d135e54327c77c93fcc6c446338f | [
"MIT"
] | 2 | 2019-06-08T22:50:59.000Z | 2020-07-12T14:13:18.000Z | girvi/__init__.py | rajeshr188/django-onex | f1086a4159b1d135e54327c77c93fcc6c446338f | [
"MIT"
] | 13 | 2020-02-11T23:51:43.000Z | 2021-06-05T13:10:49.000Z | girvi/__init__.py | rajeshr188/django-onex | f1086a4159b1d135e54327c77c93fcc6c446338f | [
"MIT"
] | null | null | null | default_app_config = 'girvi.apps.GirviConfig'#
| 23.5 | 46 | 0.808511 |
4a26d9883adda9b724e5bc544c38e43e2e27af1f | 46,798 | py | Python | lib/transient.py | apokhr/PumpProbe-analysis | 18ddd4b5be37b48bed249d90baeabae941b72648 | [
"MIT"
] | null | null | null | lib/transient.py | apokhr/PumpProbe-analysis | 18ddd4b5be37b48bed249d90baeabae941b72648 | [
"MIT"
] | null | null | null | lib/transient.py | apokhr/PumpProbe-analysis | 18ddd4b5be37b48bed249d90baeabae941b72648 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon May 15 09:57:29 2017
@author: S.Y. Agustsson
"""
import os
import re
import numpy as np
import pandas as pd
import scipy.io as spio
import scipy.signal as spsignal
from matplotlib import cm, pyplot as plt
from scipy.optimize import curve_fit
from lib import utils
def main():
path = 'E:/data/RuCl3/mat/kerr_rotation/fluence_3.8K/'
series_name = 'Kerr - 4K fluence dependence'
key_parameter = 'pump_power'
files = os.listdir(path)
filepaths = []
for name in files:
filepaths.append(path + name)
series = MultiTransients(transients_list=filepaths, series_name=series_name, key_parameter=key_parameter)
print(series.key_parameter)
series.quickplot()
plt.show()
class Transient(object):
""" Creates an object(OBJ) that contains data and metadata of a single time resolved scan. The standard scan is that
obtained from a single color pump probe setup, aka 'RedRed'.
Data can be imported through use of OBJ.import_file(filepath), which imports either the raw .mat format outputted by RegaScope2012,
or the .txt format outputted by this class (called csv here due to the csv format of contained data). To output such
.txt use OBJ.export_csv(directory).
Data is contained in:
- raw_time: raw measured time scale
- raw_trace: measured data points
- time: time trace modified by analysis functions applied on this object
- trace: data modified by analysis functions applied on this object.
Metadata entries are described in __init__
"""
def __init__(self, key_parameter=None, series_name=None, description=None):
"""
:param key_parameter: str, name of parameter iterated in the series this scan belongs to.
:param description: description of the scan/series.
"""
######################################
# Data #
######################################
self.raw_time = np.array([]) # time data
self.raw_trace = np.array([]) # raw trace data
self.time = np.array([]) # cleaned time axis
self.trace = np.array([]) # cleaned and modified data trace
######################################
# Metadata #
######################################
self.name = None # String used as file name for saving data
self.material = None # Material name
self.date = None # Scan date in format YYYY-MM-DD hh.mm.ss
self.original_filepath = None # Path to original raw file
# parameters
self.pump_power = None # Pump Power [mW]
self.probe_power = None # Probe Power [mW]
self.destruction_power = None # Destruction Power [mW]
# Spot size represents the FWHM diameter from Gaussian fit of the beam profile
self.pump_spot = None # Pump beam spot size [micrometers]
self.probe_spot = None # Probe beam spot size [micrometers]
self.destruction_spot = None # Destruction beam spot size [micrometers]
# Excitation densities calculated from power, spot size and repetition rate
self.pump_energy = None
self.probe_energy = None
self.destruction_energy = None
# destruction pulse parameters:
self.t12 = None # delay between pump and destruction pulses, [ps]
# Polarization are measured clockwise in propagation direction of the beam, 0 = 12o'clock
self.pump_polarization = None # Pump beam polarization [deg]
self.probe_polarization = None # Probe beam polarization [deg]
self.destruction_polarization = None # Destruction beam polariz. [deg]
self.sample_orientation = None # Sample orientation [deg]
self.temperature = None # Temperature [K]
self.R0 = None # Static reflectivity
######################################
# Analysis #
######################################
self.analysis_log = {} # Keeps track of analysis changes performed
self.fit_function = None
self.fit_parameters = None
######################################
# input info #
######################################
self.description = description
self.key_parameter = key_parameter
if self.key_parameter is not None:
self.key_parameter_value = getattr(self, self.key_parameter)
else:
self.key_parameter_value = None
self.series_name = series_name
# ignore list for metadata export. Add here any further non-metadata attributes created in this class.
self.DATA_ATTRIBUTES = ('raw_time', 'raw_trace', 'time', 'trace', 'DATA_ATTRIBUTES')
# %% metadata management
def key_parameter_value(self):
try:
self.key_parameter_value = getattr(self, str(self.key_parameter))
return self.key_parameter_value
except AttributeError:
raise AttributeError('No key parameter detected.')
def calc_energy_densities(self, rep_rate=283000):
""" recalculate metadata depending on given parameters.
it calculates energy densities
"""
beams = ['pump', 'probe', 'destruction']
for beam in beams:
if getattr(self, (beam + '_spot')) is None:
pass
else:
power = getattr(self, (beam + '_power'))
spot = getattr(self, (beam + '_spot'))
if beam == 'pump':
rep_rate = rep_rate / 2 # pump has half reprate (darkcontrol)
energy = round(utils.get_energy_density(spot, power, rep_rate), 3)
print(energy)
setattr(self, (beam + '_energy'), energy)
def input_attribute(self, attribute_name, value):
"""
manually input values for metadata attributes
:param attribute_name: name of parameter or attribute
:param value: value to assign to parameter
"""
setattr(self, attribute_name, value)
def get_metadata(self):
""" Returns a dictionary containing all metadata information available
keys are attribute names and values the corresponding value.
:rtype: dict
"""
metadata = {'analysis_log': {}} # make dict for metadata. Also create analysis_log entry, used later
attributes = self.__dict__
for key, value in attributes.items():
if key not in self.DATA_ATTRIBUTES and value is not None: # ignore time, trace and all other fields defined in data_attributes
try:
# if parameter is a number != 0 append to metadata
metadata[key] = float(attributes[key])
except (TypeError, ValueError):
metadata[key] = value
return metadata
def log_it(self, keyword, overwrite=False, *args, **kargs):
"""
Generate log entry for analysis_log.
creates a key with given key in analysis_log, making it:
- boolean if no other args or kargs are given, flips previous values written in log
- list if *args are passed
- dictionary if **kargs are passed
if overwrite is False, it appends values on previous logs,
if True, it obviously overwrites them.
:param key: string with name of analysis function used
:type overwrite: bool
"""
# key_string = 'analysis_log[' + key + ']'
# make the right type of entry for the log
if kargs or args:
if kargs: # make a dictionary of parameters passed
entry = {}
for key in kargs:
entry[key] = kargs[key]
if args: # make a lsit of parameter passed
entry = []
for arg in args:
entry.append(arg)
else: # make None to trigger boolean behaviour
entry = None
# Check if previous logs with this key and eventually overwrite/append
# the new entry.
try:
# prev = getattr(self, key_string)
previous_value = self.analysis_log[keyword]
# if it finds such a key in this library, it overwrites/appends new information.
if entry == None: # trigger boolean behaviour, flipping previous registered status if available
self.analysis_log[keyword] = not previous_value
elif entry is list:
if overwrite:
self.analysis_log[keyword] = entry
# setattr(self, key_string, previous_value + entry)
else:
entry = previous_value.append(entry)
self.analysis_log[keyword] = entry
elif entry is dict:
if overwrite:
self.analysis_log[keyword] = entry
else:
new_entry = {}
for key in entry:
if key in previous_value:
new_entry[key] = previous_value[key].append(entry[key])
else:
new_entry[key] = entry[key]
self.analysis_log[keyword] = new_entry
except KeyError: # rises Key error when key was not previously assigned -> no previous record of this analysis
if entry == None: # if boolean, its it's first usage, therefore set it to true
self.analysis_log[keyword] = True
else: # otherwise just append the list/dictionary.
self.analysis_log[keyword] = entry
def give_name(self):
"""Define name attribute as material_date."""
if self.key_parameter is None:
self.key_parameter = input('What is the Key parameter for basename? ')
if self.description is None:
self.description = input('Add brief description for file name: ')
self.name = (str(self.material) + '_' +
str(self.description) + '_' +
str(getattr(self, self.key_parameter)) + '_' +
str(self.get_unit(self.key_parameter)))
def get_unit(self, parameter):
""" Returns the unit of the given parameter.
works for
- power
- polarization
- R0
- trace
- time
- energy
- temperature
parameter:type parameter: str
"""
splitpar = parameter.split('_')
if splitpar[-1] == 'power':
return ('mW')
elif splitpar[-1] == 'polarization' or splitpar[-1] == 'orientation':
return ('deg')
elif splitpar[-1] == 'R0':
return ('V')
elif splitpar[-1] == 'trace':
return ('')
elif splitpar[-1] == 'time':
return ('ps')
elif splitpar[-1] == 'energy':
return ('mJ/cm^2')
elif splitpar[-1] == 'temperature':
return ('K')
else:
return ('')
# %% import export
def import_file(self, filepath, cleanData=True, key_parameter=None, description=None, silent=True, **kwargs):
"""
Imports a file, .mat or .csv, using self.import_file_mat() and self.import_file_csv methods respectively.
:param filepath
path to file
:param cleanData:
run the cleanData method, including fltering, baseline removal, setting timezero and others.
:param key_parameter
sets the key parameter
:param description
brief description of this transient, for file naming
:param silent
if true, doesnt print anything, set to False for debugging
:param **kwargs
all keyord args passed are set as attributes of this class instance. (use to overwrite parameters.
"""
try: # if it finds the file requested...
ext = os.path.splitext(filepath)[-1].lower()
basename = os.path.basename(filepath)
if ext == '.mat':
try:
self.import_file_mat(filepath)
except TypeError:
print('Ignored incorrect matlab file: {}'.format(filepath))
#
# if basename.lower() != 't-cal.mat':
# self.import_file_mat(filepath)
# else:
# print('Ignored t-cal.mat')
elif ext == '.txt':
self.import_file_csv(filepath)
else:
print("Invalid format. Couldn't import file: " + filepath)
# self.importMetadata()
if key_parameter is not None:
self.key_parameter = key_parameter
if description is not None:
self.description = description
self.give_name()
for attr,val in kwargs:
setattr(self,attr,val)
if not silent:
print('Imported {0} as {1}'.format(basename, self.name))
if cleanData and len(self.raw_time) != 0:
self.clean_data()
except TypeError as err:
print(err)
except FileNotFoundError:
print('File ' + filepath + ' not found')
def import_file_mat(self, filepath):
"""Import data from a raw .mat file generated by redred software.
extracts data about raw_time raw_trace and R0.
"""
self.original_filepath = filepath
data = spio.loadmat(filepath)
try: # if it finds the right data structure
self.raw_time = data['Daten'][2]
self.raw_trace = data['Daten'][0]
self.R0 = data['DC'][0][0]
# get all metadata from name
metadataDict = utils.get_metadata_from_name(filepath) # todo: add eventual non scripted parameters
# write metadata to relative attributes
for key in metadataDict:
try:
setattr(self, key, metadataDict[key])
except KeyError:
print('invalid key: ' + key)
except KeyError:
raise TypeError(filepath + ' is not a valid matlab scan datafile. As created by redred setup.')
def import_file_csv(self, filepath):
"""
Import data from a .txt file containing metadata in the header.
Metadata should be coded as variable names from this class:
material, date, pump_power, temperature, probe_polarization etc...
Data expected is 4 couloms: raw_time, raw_trace, time, trace.
filepath should full path to file as string.
"""
# ---------- get metadata ----------
# dictionary of attributes where to assign parameters
attributes = self.__dict__
parameters = []
for attribute in attributes: # use only non-data attributes
if attribute not in self.DATA_ATTRIBUTES:
parameters.append(attribute)
with open(filepath, 'r') as f:
n = 0
for l in f:
# search for the data coulomn header
if 'raw_time' in l:
dataOffset = n + 1 # used for offset of data fetching, data starts from next line -> +1
columnHeaders = l.replace('\n', '').replace(' ', '').split(',')
else:
n += 1
# split each line from file into a list
word = l[:-1:].split(': ')
# if the first word corresponds to an attribute name
if word[0] in parameters:
key = word[0]
value_string = word[1].replace(' ', '')
if self.get_unit(key):
# if parameter expects units, get only numbers,
value = float(re.findall("\d+\.\d+", value_string)[0])
else: # otherwise get the whole string
value = value_string
# create/assign attribute from imported parameter
setattr(self, key, value)
self.key_parameter_value = getattr(self, self.key_parameter)
# ---------- get data ---------- using pandas! :)
data = pd.read_csv(filepath, names=columnHeaders, skiprows=dataOffset)
for col in data.columns:
# make a list of float tipe data for each dataset found
col_data = getattr(data, col).astype(float).tolist()
data_list = []
for i in col_data:
if i != 'nan': data_list.append(i)
setattr(self, col, data_list)
def export_file_csv(self, directory):
"""
save Transient() to a .txt file in csv format (data)
Metadata header is in tab separated values, generated as 'name': 'value' 'unit'
data is comma separated values, as raw_time, raw_trace, time, trace.
Metadata is obtained from get_metadata(), resulting in all non0 parameters available.
"""
# ----------- metadata -----------
# self.give_name() # creates a name for the file
print('Exporting {0}'.format(self.name))
metadata = self.get_metadata()
logDict = metadata.pop('analysis_log', None) # separate log, will be printed later
logDict.pop('', None) # remove the useless empty entry
name = metadata.pop('name', None)
original_filepath = metadata.pop('original_filepath', None)
# open file with name self.name in overwrite mode
file = open(directory + name + '.txt', 'w+')
# make a title in the header
file.write('RedRed Scan\n\nMetadata\n\n')
# write metadata as: parameter: value unit
for key in metadata:
try:
line = (key + ': ' +
str(metadata[key]) + ' ' +
self.get_unit(key) + '\n')
file.write(line)
except TypeError:
print("Type error for " + key + 'when writing to file: ' +
self.name)
# write analysis log as function: values
file.write('\nAnalysis\n')
for key in logDict:
line = key + ': ' + str(logDict[key]) + '\n'
file.write(line)
# ----------- Data -----------
# Data header followed by column heads:
file.write('\n\nData\n\n')
file.write('raw_time, raw_trace, time, trace\n')
for i in range(len(self.raw_time)):
line = str(self.raw_time[i]) + ',' + str(self.raw_trace[i])
try: # try appending analysied data, or skip if it is finished
# this because of the deleting of initial and final data
line += ',' + str(self.time[i]) + ',' + str(self.trace[i])
except IndexError:
pass
finally:
line += '\n'
file.write(line)
file.close()
# %% Data manipulation
def clean_data(self, cropTimeScale=True, shiftTime=0, flipTime=True, removeDC=True, filterLowPass=True,
flipTrace=False):
"""Perform a standard set of data cleaning, good for quick plotting and test purposes."""
if cropTimeScale:
self.crop_time_scale()
if shiftTime:
self.shift_time(shiftTime)
if filterLowPass:
self.filter_low_pass()
if flipTrace:
self.flip_trace()
if removeDC:
self.remove_DC_offset()
if flipTime:
self.flip_time()
def crop_time_scale(self): # todo: fix the overwriting issue
"""chops time scale to the monotonous central behaviour, deleting the wierd ends.
ATTENTION: overwrites self.time and self.trace, deleting any previous changes"""
# clear previous time and trace, and the analysis log since it goes lost
self.analysis_log = {} # reset log
self.time = [] # reset time and trace
self.trace = []
# print('crop time scale, len: ' + str(len(self.raw_time)))
maxT = max(self.raw_time)
minT = min(self.raw_time)
# pick the type of behaviour. redred scans will always be false in this statement
if self.raw_time[0] < self.raw_time[1]:
start = 0
while self.raw_time[start] < maxT:
start += 1
end = start
while self.raw_time[end] > minT:
end += 1
i = 0
while i in range(end - start):
self.time.append(self.raw_time[i + start])
self.trace.append(self.raw_trace[i + start])
i += 1
elif self.raw_time[0] > self.raw_time[1]:
start = 0
while self.raw_time[start] > minT:
start += 1
end = start
while self.raw_time[end] < maxT:
end += 1
i = 0
while i in range(end - start):
self.time.append(self.raw_time[i + start])
self.trace.append(self.raw_trace[i + start])
i += 1
self.log_it('Crop Time Scale', maxtime=maxT, mintime=minT)
def shift_time(self, tshift):
""" Shift time scale by tshift. Changes time zero
writes to analysis_log the shifted value, or increases it if already present"""
self.time = np.array(self.time) - tshift
self.log_it('Shift Time', tshift)
def flip_time(self):
""" Flip time scale: t = -t
also reverts order in the array"""
self.time = self.time[::-1]
self.time = -np.array(self.time)
self.trace = self.trace[::-1]
self.log_it('Flip Time')
def flip_trace(self):
""" Flip the Y trace, usually not needed from matlab redred software"""
self.trace = -self.trace
self.log_it('Flip Trace')
def remove_DC_offset(self, window=40): # todo: change range in case of flipped scan!!!
"""Remove DC offset.
offset is caluclated with 40 points (~700fs) taken at negative time delays.
such delay is at the end of the scan in raw data, or at the beginning
if scan was reverted by flip_time """
try:
reverted = self.analysis_log['Flip Time']
except KeyError:
reverted = False
if reverted:
shift = np.average(self.trace[0:window:1])
else:
tpoints = len(self.time)
shift = np.average(self.trace[tpoints - window:tpoints:1])
self.trace = self.trace - shift
self.log_it('Remove DC', window=window, shift=shift)
def filter_low_pass(self, cutHigh=0.1, order=1, return_frequency=False): # todo: add different methods between which to choose
""" apply simple low pass filter to data. if return_frequency is True, returns the filter frequency value
in THz ( if time data is in ps)
This function applies a linear filter twice, once forward and once backwards.
The combined filter has linear phase.
To avoid spikes at edges of the scan, Gustaffson's method is used:
F. Gustaffson, “Determining the initial states in forward-backward filtering”,
Transactions on Signal Processing, Vol. 46, pp. 988-992, 1996.
"""
b, a = spsignal.butter(order, cutHigh, 'low', analog=False)
self.trace = spsignal.filtfilt(b, a, self.trace, method='gust')
frequency = utils.get_nyquist_frequency(self.time) * cutHigh
self.log_it('Low Pass Filter', frequency=frequency, nyq_factor=cutHigh, order=order)
if return_frequency:
return frequency
def normalize_to_parameter(self, parameter):
""" Normalize scan by dividing by its pump power value"""
if getattr(self, parameter):
if getattr(self, parameter) != 0:
self.trace = self.trace / getattr(self, parameter)
else:
print('Normalization failed: invalid parameter name')
logkey = 'Normalized by ' + parameter.replace('_', ' ')
self.log_it(logkey)
def quickplot(self, xlabel='Time [ps]', ylabel='Trace', fntsize=15, title='Transient', clear=False, raw=False):
"""Generates a quick simple plot with matplotlib """
if clear: plt.clf()
quickplotfig = plt.figure(num=1)
ax = quickplotfig.add_subplot(111)
if raw:
ax.plot(self.raw_time, self.raw_trace, 'o')
else:
ax.plot(self.time, self.trace, )
ax.set_xlabel(xlabel, fontsize=fntsize)
ax.set_ylabel(ylabel, fontsize=fntsize)
ax.set_title(title, fontsize=fntsize)
ax.tick_params(axis='x', labelsize=fntsize)
ax.tick_params(axis='y', labelsize=fntsize)
plt.show()
class MultiTransients(object):
""" list of transients corresponding to a certain dependence series"""
datadir = 'E:/DATA_temp'
def __init__(self, transients_list=None, series_name=None, description=None, key_parameter=None):
"""
Initialize the transients_list can be a list of transient objects or of path strings pointing to data files
:param transients_list: list
can be a list of transient objects or of path strings pointing to data files or None
if not given, will create an empty object, where to later load data.
:param series_name: str
title of the series, used for printing on graphs for example.
:param description: str
some description.. not really used.
:param key_parameter: str
the parameter which changes throughout each scan, making it a "key_parameter" dependence series.
"""
if transients_list is None:
self.transients = []
self.key_parameter = None
self.description = None
self.series_name = None
self.material = None
self.key_parameter_list = None
self.metadata = None
else:
if type(transients_list[0]) == Transient:
self.transients = transients_list
self.import_metadata_from_transients()
elif type(transients_list[0]) in (str, dir):
self.transients = []
self.import_files(transients_list)
self.metadata = self.get_metadata()
if key_parameter is None:
self.key_parameter = self.transients[0].key_parameter
if self.key_parameter is None:
self.key_parameter = self.get_dependence_parameter()
else:
self.key_parameter = key_parameter
if description is None:
self.description = self.transients[0].description
if self.description is None:
self.description = None
else:
self.description = description
if series_name is None:
self.series_name = self.transients[0].series_name
if self.series_name is None:
self.series_name = None
else:
self.series_name = series_name
self.key_parameter_list = []
self.sort_scan_list_by_parameter()
self.update_key_parameter_list()
# %% Metadata
def give_name(self, name=None):
if name is None:
name = input('Choose a name for this series')
self.series_name = name
for scan in self.transients:
self.series_name = name
def get_dependence_parameter(self): # todo: fix it, doesnt work, unhashable dict in definition of valdic
"""find the variable parameter within the series of scans.
:returns : str name of dependence parameter, list of str if multiple dependence parameters found, also prints
message.
"""
dependence_parameter = []
for key in self.metadata:
valdic = {i: self.metadata[key].count(i) for i in frozenset(self.metadata[key])}
if len(valdic) != 1:
dependence_parameter.append(key)
if 'date' in dependence_parameter:
dependence_parameter.remove('date')
if 'originalfilename' in dependence_parameter:
dependence_parameter.remove('originalfilename')
if len(dependence_parameter) > 1:
print("Warning: multiple variables change between scans")
print('Please choose between:')
for parameter in dependence_parameter:
print(parameter)
dependence_parameter = input('\nEnter chosen key parameter: ')
if len(dependence_parameter) < len(self.transients):
print('Warning: multiple scans with same key parameter')
else:
dependence_parameter = str(dependence_parameter)
return dependence_parameter
def get_metadata(self):
"""Create a Dictionary of all metadata from all single scans.
Each entry of the dictionary represents a parameter. Its values are a
list of the value corresponding to the scan."""
# get metadata from first scan, to initialize dictionary
metadata = self.transients[0].get_metadata()
for key in metadata:
metadata[key] = [metadata[key]]
# construct a dictionary containing all metadata
skip = True # skip first entry, since it was already written during
# initialization
for transient in self.transients:
if not skip:
md = transient.get_metadata()
for key in metadata:
metadata[key].append(md[key])
else:
skip = False
return metadata
def update_transients_metadata(self):
""" assign metadata from multitransient object to each scan"""
for scan in self.transients:
scan.key_parameter = self.key_parameter
scan.description = self.description
scan.series_name = self.series_name
scan.material = self.material
# %% Import Export
def import_metadata_from_transients(self):
metadata = self.get_metadata()
try:
self.key_parameter = metadata['key_parameter'][0]
except KeyError:
pass
try:
self.material = metadata['material'][0]
except KeyError:
pass
try:
self.description = metadata['description'][0]
except KeyError:
pass
try:
self.series_name = metadata['series_name'][0]
except KeyError:
pass
def import_files(self, files, append=False, key_parameter=None, description=None):
"""imports any series of data files. Files can be:
- string of full path of a single scan
- list of full paths of a single scan
- folder from which all files will be imported
- append : if true, appends new scans to object, if false overwrites.
"""
if not append:
self.transients = [] # clear scans in memory
# check if 'files' is single file (str), list of files ([str,str,...]) or folder containing files.
if isinstance(files, str):
self.transients.append(Transient(key_parameter=key_parameter, description=description))
self.transients[-1].import_file(files)
print('Imported file ' + files)
elif isinstance(files, list) or isinstance(files, tuple):
for i in range(len(files)):
self.transients.append(Transient(key_parameter=key_parameter, description=description))
self.transients[-1].import_file(files[i])
print('Imported files form list')
elif os.path.isdir(files):
folderlist = os.listdir(files)
for i in range(len(folderlist)):
fullpath = files + '//' + folderlist[i]
self.transients.append(Transient(key_parameter=key_parameter, description=description))
self.transients[-1].import_file(fullpath)
print('Imported files form folder')
self.import_metadata_from_transients()
# self.key_parameter = self.get_dependence_parameter()
# self.sort_scan_list_by_parameter() # todo: uncomment when get dependence parmater is fixed
# %% data analysis
def saveas_csv(self, directory=None): # todo: implement dynamic paramter choosing option
""" creates a directory inside the given directory where it will save all data in csv format."""
if directory is None:
directory = utils.choose_folder('C:/Users/sagustss/py_code/DATA')
save_dir = directory + '/' + self.series_name + '_' + self.key_parameter + '/'
if os.path.exists(save_dir):
n = 1
new_save_dir = save_dir + '_1'
while True:
if os.path.exists(new_save_dir):
n += 1
new_save_dir = new_save_dir.split('_')[0] + '_' + str(n)
else:
save_dir = new_save_dir
os.makedirs(save_dir)
self.update_transients_metadata()
for item in self.transients:
item.export_file_csv(save_dir)
def clean_data_all_scans(self, cropTimeScale=True, shiftTime=0, flipTime=True, removeDC=True, filterLowPass=True,
flipTrace=False):
"""
:return:
"""
for transient in self.transients:
transient.clean_data(cropTimeScale=cropTimeScale, shiftTime=shiftTime, flipTime=flipTime, removeDC=removeDC,
filterLowPass=filterLowPass, flipTrace=flipTrace)
def input_attribute(self, attribute_name, value):
"""
manually input values for metadata attributes
:param attribute_name: name of parameter or attribute
:param value: value to assign to parameter
"""
setattr(self, attribute_name, value)
def sort_scan_list_by_parameter(self, reverse=False):
transients_list = self.transients
parameter = self.key_parameter
# self.transients = sorted(self.transients, key=lambda transients: getattr(transients, parameter))
self.transients.sort(key=lambda x: getattr(x, parameter), reverse=reverse)
self.update_key_parameter_list()
# return sorted_list
def update_key_parameter_list(self):
self.key_parameter_list = []
for transient in self.transients:
self.key_parameter_list.append(getattr(transient, self.key_parameter))
# %% analysis
def filter_low_pass(self, cutHigh=0.1, order=2):
for item in self.transients:
item = item.filter_low_pass(cutHigh, order)
def remove_DC_offset(self):
for item in self.transients:
item = item.remove_DC_offset()
def flip_time(self):
for item in self.transients:
item = item.flip_time()
def flip_trace(self):
for item in self.transients:
item = item.flip_trace
def shift_time(self, tshift):
for item in self.transients:
item = item.shift_time(tshift)
# %% plot functions
# %% plot
def quickplot(self, figure=1):
""" simple plot of a list of transients """ # todo: move to transients.py -> under multitransients()
fig = plt.figure(num=figure)
plt.clf()
ax = fig.add_subplot(111)
ax.set_xlabel('Time [ps]', fontsize=18)
ax.set_ylabel('Differential Reflectivity', fontsize=18)
ax.set_title(self.series_name, fontsize=26)
ax.tick_params(axis='x', labelsize=12)
ax.tick_params(axis='y', labelsize=12)
# todo: make nice color iteration, that follows parameter value
colorlist_length = len(self.transients)
colorlist = plt.cm.rainbow(np.linspace(0, 1, colorlist_length))
color = iter(colorlist)
for curve in self.transients:
xdata = curve.time
ydata = curve.trace
label = str(getattr(curve, self.key_parameter)) + str(curve.get_unit(self.key_parameter))
col = next(color)
ax.plot(xdata, ydata, c=col, label=label, alpha=0.7)
return fig
def quickplot_OLD(self):
""" simple plot of a list of transients """
fig = plt.figure(num=516542)
plt.clf() #
ax = fig.add_subplot(111)
ax.set_xlabel('Time [ps]', fontsize=18)
ax.set_ylabel('Differential Reflectivity', fontsize=18)
ax.set_title(self.series_name, fontsize=26)
ax.tick_params(axis='x', labelsize=12)
ax.tick_params(axis='y', labelsize=12)
key_parameter_values = []
for transient in self.transients:
key_parameter_values.append(getattr(transient, self.key_parameter))
key_parameter_max = max(key_parameter_values)
print(key_parameter_max)
n = 1
# while key_parameter_range % 10 != 0:
# n *=10
# key_parameter_range * n
colorlist = cm.rainbow(np.linspace(0, 1, 1001))
# colorlist = cm.rainbow(np.logspace(0,3,1000)) / 100
for curve in self.transients:
# l = str(scn[i].temperature) + 'K'
xdata = curve.time
ydata = curve.trace
parameter = float(getattr(curve, self.key_parameter))
parameter_label = str('{0} {1}'.format(parameter, curve.get_unit(self.key_parameter)))
color_number = (parameter / key_parameter_max) * 999
print(color_number, parameter)
ax.plot(xdata, ydata, c=colorlist[color_number], label=str() + 'K', alpha=0.5)
plt.draw()
return fig
def rrPlot3d(self, Yparameter='Sample Orientation', title='3dplot', Xlabel='Time, ps',
Zlabel='Kerr rotation (mrad)',
colormap='viridis'): # todo: correct to new TransientsSet() class system
'''plot 3d graf with time on X trace on Z and selected parametr on Y '''
# create 3 lists of X Y Z data
time = []
trace = []
ypar = []
# for every scan object takes values
for item in self.transients:
time.append(item.time)
trace.append(item.trace)
# on Y axis will be chosen parameter which exist in scan object
ypar.append(item.parameters[Yparameter][0])
# Make proper arrays from lists with data
Ypar = []
for item in range(len(self.transients[0].time)):
Ypar.append(ypar)
X = np.array(time)
Y = np.transpose(np.array(Ypar))
Z = np.array(trace)
fig = plt.figure(num=2)
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=100, cmap=colormap)
ax.set_xlabel(Xlabel, fontsize=20, labelpad=20)
ax.tick_params(axis='x', labelsize=20)
ax.tick_params(axis='y', labelsize=20)
ax.tick_params(axis='z', labelsize=20)
ax.set_ylabel(Yparameter, fontsize=20, labelpad=20)
ax.set_zlabel(Zlabel, fontsize=20, labelpad=20)
ax.set_title(title, fontsize=40)
plt.show()
def fit_transients(self, fit_function, parameters, fit_from=0, fit_to=0, method='curve_fit', ext_plot=None,
print_results=True, recursive_optimization=False, colorlist=None, saveDir=None):
"""
Fit given model to a series of Transients.
:param fit_function:
Model which will be fitted to the data
:param parameters: list, (list of lists - no longer supported)
Initial parameters for the given function
:param fit_from: int
Minimum from which to perform fit.
:param fit_to: int
Maximum data point (x axis) from which to perform fit.
:param method: function
Fitting method used: supports 'curve_fit'
:param recursive_optimization: bool
If true, it uses the optimized fit from previous cycle to initialize the next fitting
:param ext_plot: bool
if true, plots the results in a matplotlib figure
:param print_results: bool
if true prints fitting results in console while being obtained.
:return all_popt: dict
dictionary with transient label as key and fit optimized parameters as values
:return all_pcov: dict
"""
if ext_plot is None:
fig = plt.figure('Fit of transients')
plt.clf()
ax = fig.add_subplot(111)
ax.set_xlabel('Time [ps]', fontsize=18)
ax.set_ylabel('Differential Reflectivity', fontsize=18)
ax.set_title(self.series_name, fontsize=26)
ax.tick_params(axis='x', labelsize=12)
ax.tick_params(axis='y', labelsize=12)
if colorlist is None:
colorlist_length = len(self.transients)
colorlist = plt.cm.rainbow(np.linspace(0, 1, colorlist_length))
else:
ax = ext_plot
color = iter(colorlist)
all_popt = {} # dict type output
all_pcov = {}
key_parameter_values = []
fit_parameters_data = {} # dict of Data type output
try:
if len(parameters[0]) > 1:
pars = parameters[0]
except TypeError:
pars = parameters
for i, fit_parameter in enumerate(pars):
fit_parameters_data['par{}'.format(i)] = []
last_popt = parameters
for i, transient in enumerate(self.transients):
xdata = transient.time[fit_from:-fit_to]
ydata = transient.trace[fit_from:-fit_to]
key_parameter_values.append(transient.key_parameter_value)
key_parameter = transient.key_parameter
label = '{0} {1}'.format(transient.key_parameter_value, transient.get_unit(transient.key_parameter))
try:
if len(parameters[0]) > 1:
guess = parameters[i]
except TypeError:
if recursive_optimization:
guess = last_popt
else:
guess = parameters
all_popt[label] = []
all_pcov[label] = []
if method == 'curve_fit':
try:
popt, pcov = curve_fit(fit_function, xdata, ydata, p0=guess)
if recursive_optimization:
last_popt = popt
if print_results:
print('{0}: popt: {1}'.format(label, popt))
col = next(color)
ax.plot(xdata, fit_function(xdata, *popt), '--', c=col)
ax.plot(xdata, ydata, c=col, label=label, alpha=0.5)
all_popt[label] = popt
all_pcov[label] = pcov
for i, item in enumerate(popt):
fit_parameters_data['par{}'.format(i)].append(item)
except RuntimeError:
print('no fit parameters found for transient: {}'.format(label))
elif method == 'fmin':
print('fmin not yet implemented') # todo: add support for fmin
for key, value in fit_parameters_data.items():
fit_parameters_data[key] = Data(key_parameter_values, value, key_parameter, key)
if ext_plot:
pass
# plt.show()
if saveDir is not None:
pass
return all_popt, all_pcov, fit_parameters_data
class Data(object):
""" This object stores data obtained from a fit such as decay times, amplitudes etc and provides analysis tools"""
def __init__(self, x, y, x_label, y_label):
""" initialization"""
self.x_data = x
self.y_data = y
self.x_data = np.array(self.x_data)
self.y_data = np.array(self.y_data)
self.x_label = x_label
self.y_label = y_label
def quickplot(self, title='Dependence', clear=False, plt_handle=None, show=True, *args, **kargs):
"""
:param title: str
window title
:param clear:
if true clears the figure before replotting
:param plt_handle:
handle to which to add the plot, used to plot in a pre-existant figure
:param show:
if true, uses plt.show()
:return:
"""
if plt_handle is None:
fig = plt.figure(num=title)
ax = fig.add_subplot(111)
else:
ax = plt_handle
ax.scatter(self.x_data, self.y_data, *args, **kargs)
ax.set_xlabel(self.x_label, fontsize=15)
ax.set_ylabel(self.y_label, fontsize=15)
ax.set_title(title, fontsize=15)
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=15)
if show:
plt.show()
def fit_data(self, fit_function, initial_parameters, x_range=(0, 0)):
"""
:param function: func
function used for data fitting
:param initial_parameters: list
initial parameters for inizializing fitting procedure
:param x_range: [int,int]
limits to use for fitting [ start , stop ]
:param ydata: lsit or np.array
y_data, takes precedence over self.y_data
:return popt:
optimized parameters
:return pcov:
"""
if x_range == (0, 0):
x_range = (0, len(self.x_data))
x_data = self.x_data[x_range[0]:x_range[1]]
y_data = self.y_data
popt, pcov = curve_fit(fit_function, x_data, y_data, p0=initial_parameters)
return popt, pcov
class FitFunction(object):
""" wrapper class for fit functions"""
def __init__(self):
""" """
@staticmethod
def double_exponential_pos_neg(x, A1, t1, A2, t2, c, d):
labels = ['A1', 't1', 'A2', 't2', 'c', 'd']
func = A1 * (1 - np.exp(- x / t1)) - A2 * (1 - np.exp(- x / t2)) + c * x + d
return func, labels
@staticmethod
def expfunc_const(x, A, t0, c):
labels = ['A', 't0', 'c']
func = A * (1 - np.exp(- x / t0)) + c
return func, labels
@staticmethod
def expFunc_lin_const(x, A, t0, c, d):
labels = ['A', 't0', 'c', 'd']
func = A * (1 - np.exp(- x / t0)) + c * x + d
return func, labels
if __name__ == "__main__":
main()
| 39.625741 | 139 | 0.577952 |
4a26da5d16c3fdb6208d04675f9a5f5076f4a9ac | 24,202 | py | Python | resources/fixtures/scripts/fixtures-tool.py | janosvitok/qlcplus | 0a2e9302de8349d74a54a1a78266d58084c6b818 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-05-11T06:20:08.000Z | 2017-05-11T06:20:08.000Z | resources/fixtures/scripts/fixtures-tool.py | janosvitok/qlcplus | 0a2e9302de8349d74a54a1a78266d58084c6b818 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | resources/fixtures/scripts/fixtures-tool.py | janosvitok/qlcplus | 0a2e9302de8349d74a54a1a78266d58084c6b818 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import os
import re
import argparse
import lxml.etree as etree
singleCapCount = 0
namespace = "http://www.qlcplus.org/FixtureDefinition"
# please see https://github.com/mcallegari/qlcplus/wiki/Fixture-definition-presets when changing this list
def getPresetsArray():
return [
"Custom",
"IntensityMasterDimmer", "IntensityMasterDimmerFine", "IntensityDimmer", "IntensityDimmerFine",
"IntensityRed", "IntensityRedFine", "IntensityGreen", "IntensityGreenFine", "IntensityBlue", "IntensityBlueFine",
"IntensityCyan", "IntensityCyanFine", "IntensityMagenta", "IntensityMagentaFine", "IntensityYellow", "IntensityYellowFine",
"IntensityAmber", "IntensityAmberFine", "IntensityWhite", "IntensityWhiteFine", "IntensityUV", "IntensityUVFine",
"IntensityIndigo", "IntensityIndigoFine", "IntensityLime", "IntensityLimeFine", "IntensityHue", "IntensityHueFine",
"IntensitySaturation", "IntensitySaturationFine", "IntensityLightness", "IntensityLightnessFine",
"IntensityValue", "IntensityValueFine",
"PositionPan", "PositionPanFine", "PositionTilt", "PositionTiltFine", "PositionXAxis", "PositionYAxis",
"SpeedPanSlowFast", "SpeedPanFastSlow", "SpeedTiltSlowFast", "SpeedTiltFastSlow", "SpeedPanTiltSlowFast", "SpeedPanTiltFastSlow",
"ColorMacro", "ColorWheel", "ColorWheelFine", "ColorRGBMixer", "ColorCTOMixer", "ColorCTCMixer", "ColorCTBMixer",
"GoboWheel", "GoboWheelFine", "GoboIndex", "GoboIndexFine",
"ShutterStrobeSlowFast", "ShutterStrobeFastSlow", "ShutterIrisMinToMax", "ShutterIrisMaxToMin", "ShutterIrisFine"
"BeamFocusNearFar", "BeamFocusFarNear", "BeamFocusFine", "BeamZoomSmallBig", "BeamZoomBigSmall", "BeamZoomFine",
"PrismRotationSlowFast", "PrismRotationFastSlow",
"NoFunction" ]
def printPresets(group):
presets = getPresetsArray()
pMin = 1
pMax = presets.index("NoFunction")
if group == "Intensity":
pMin = presets.index("IntensityMasterDimmer")
pMax = presets.index("IntensityValueFine")
elif group == "Pan":
pMin = presets.index("PositionPan")
pMax = presets.index("PositionPanFine")
elif group == "Tilt":
pMin = presets.index("PositionTilt")
pMax = presets.index("PositionTiltFine")
elif group == "Speed":
pMin = presets.index("SpeedPanSlowFast")
pMax = presets.index("SpeedPanTiltFastSlow")
elif group == "Colour":
pMin = presets.index("ColorMacro")
pMax = presets.index("ColorCTBMixer")
elif group == "Gobo":
pMin = presets.index("GoboWheel")
pMax = presets.index("GoboIndexFine")
elif group == "Shutter":
pMin = presets.index("ShutterStrobeSlowFast")
pMax = presets.index("ShutterIrisFine")
elif group == "Beam":
pMin = presets.index("BeamFocusNearFar")
pMax = presets.index("BeamZoomFine")
elif group == "Prism":
pMin = presets.index("PrismRotationSlowFast")
pMax = presets.index("PrismRotationFastSlow")
for i in range(pMin, pMax + 1):
sys.stdout.write("[" + str(i) + "] " + presets[i] + " ")
sys.stdout.flush()
print ""
###########################################################################################
# update_fixture
#
# Convert an 'old' syntax definition to the 'new' syntax, which includes:
# - single capability channels
# - global physical dimension
#
# path: the source path with the fixtures to convert
# filename: the relative file name
# destpath: the destination folder where to save the converted fixture
###########################################################################################
def update_fixture(path, filename, destpath):
absname = os.path.join(path, filename)
parser = etree.XMLParser(ns_clean=True, recover=True)
xmlObj = etree.parse(absname, parser=parser)
root = xmlObj.getroot()
fxSingleCapCount = 0
global namespace
################################## PHYSICAL PROCESSING ################################
global_phy = {}
gphy_tag = etree.Element("Physical")
for mode in root.findall('{' + namespace + '}Mode'):
phy_dict = {}
phy_tag = mode.find('{' + namespace + '}Physical')
if not phy_tag:
# Mode already processed. Skip
continue
bulb_tag = phy_tag.find('{' + namespace + '}Bulb')
dim_tag = phy_tag.find('{' + namespace + '}Dimensions')
lens_tag = phy_tag.find('{' + namespace + '}Lens')
focus_tag = phy_tag.find('{' + namespace + '}Focus')
tech_tag = phy_tag.find('{' + namespace + '}Technical')
phy_dict.update(phy_tag.attrib)
phy_dict.update(bulb_tag.attrib)
phy_dict.update(dim_tag.attrib)
phy_dict.update(lens_tag.attrib)
phy_dict.update(focus_tag.attrib)
if tech_tag:
phy_dict.update(tech_tag.attrib)
if not global_phy:
global_phy = phy_dict
gphy_tag = phy_tag
mode.remove(phy_tag)
print "Moving mode " + mode.attrib['Name'] + " to global"
elif phy_dict == global_phy:
mode.remove(phy_tag)
print "Mode " + mode.attrib['Name'] + " is identical to global"
root.append(gphy_tag)
##################################### CHANNELS PROCESSING #################################
for channel in root.findall('{' + namespace + '}Channel'):
locCapCount = 0
if 'Preset' in channel.attrib:
# Channel already converted. Skip
continue
for cap in channel.findall('{' + namespace + '}Capability'):
locCapCount += 1
if locCapCount < 2:
print "Single capability found in " + filename
fxSingleCapCount += 1
preset = ""
name = ""
group = ""
color = ""
controlByte = 0
fineWord = ""
# Modes have a <Channel> tag too, but they don't have a name
if not 'Name' in channel.attrib:
continue
name = channel.attrib['Name']
grpNode = channel.find('{' + namespace + '}Group')
if grpNode is not None:
group = grpNode.text
controlByte = int(grpNode.attrib['Byte'])
if controlByte == 1:
fineWord = "Fine"
if group == "Intensity":
colNode = channel.find('{' + namespace + '}Colour')
if colNode is not None:
color = colNode.text
if color == "Red":
preset = "IntensityRed" + fineWord
elif color == "Green":
preset = "IntensityGreen" + fineWord
elif color == "Blue":
preset = "IntensityBlue" + fineWord
elif color == "Cyan":
preset = "IntensityCyan" + fineWord
elif color == "Magenta":
preset = "IntensityMagenta" + fineWord
elif color == "Yellow":
preset = "IntensityYellow" + fineWord
elif color == "Amber":
preset = "IntensityAmber" + fineWord
elif color == "White":
preset = "IntensityWhite" + fineWord
elif color == "UV":
preset = "IntensityUV" + fineWord
elif color == "Lime":
preset = "IntensityLime" + fineWord
elif color == "Indigo":
preset = "IntensityIndigo" + fineWord
elif color == "Hue":
preset = "IntensityHue" + fineWord
elif color == "Saturation":
preset = "IntensitySaturation" + fineWord
elif color == "Lightness":
preset = "IntensityLightness" + fineWord
elif color == "Value":
preset = "IntensityValue" + fineWord
elif group == "Pan":
preset = "PositionPan" + fineWord
elif group == "Tilt":
preset = "PositionTilt" + fineWord
#print "Found group " + group + ", control byte: " + str(controlByte)
print chr(27) + "[2J" # clear screen
print "File: " + filename
print etree.tostring(channel)
if not preset:
printPresets(group)
select = ""
# wait for user input until a preset can be resolved
while 1:
select = raw_input("Replacement preset code (0 = keep) (enter = " + preset + "): ")
if select == "":
if preset == "":
print "Select an option!"
else:
break
else:
presets = getPresetsArray()
preset = presets[int(select)]
break
# perform the XML Channel node replacement
if preset != "Custom":
channel.clear()
channel.set("Name", name)
channel.set("Preset", preset)
channel.tail = "\n "
newfile = os.path.join(destpath, filename)
xmlFile = open(newfile, "w")
xmlFile.write(etree.tostring(root, pretty_print=True, xml_declaration=True, encoding="UTF-8", doctype="<!DOCTYPE FixtureDefinition>"))
xmlFile.close()
return fxSingleCapCount
def check_physical(node, hasPan, hasTilt):
errNum = 0
phy_tag = node.find('{' + namespace + '}Physical')
if phy_tag is not None:
dim_tag = phy_tag.find('{' + namespace + '}Dimensions')
focus_tag = phy_tag.find('{' + namespace + '}Focus')
tech_tag = phy_tag.find('{' + namespace + '}Technical')
width = int(dim_tag.attrib.get('Width', 0))
height = int(dim_tag.attrib.get('Height', 0))
depth = int(dim_tag.attrib.get('Depth', 0))
panDeg = int(focus_tag.attrib.get('PanMax', 0))
tiltDeg = int(focus_tag.attrib.get('TiltMax', 0))
if width == 0 or height == 0 or depth == 0:
print absname + ": Invalid physical dimensions detected"
errNum += 1
if hasPan and panDeg == 0:
print absname + ": Invalid PAN degrees"
errNum += 1
if hasTilt and tiltDeg == 0:
print absname + ": Invalid TILT degrees"
errNum += 1
if tech_tag is not None:
power = int(tech_tag.attrib.get('PowerConsumption', 0))
if power == 0:
print absname + ": Invalid power consumption"
errNum += 1
return errNum
###########################################################################################
# validate_fixture
#
# Check the syntax of a definition and reports errors if found
#
# path: the source path with the fixtures to validate
# filename: the relative file name
###########################################################################################
def validate_fixture(path, filename):
absname = os.path.join(path, filename)
parser = etree.XMLParser(ns_clean=True, recover=True)
xmlObj = etree.parse(absname, parser=parser)
root = xmlObj.getroot()
global namespace
errNum = 0
hasPan = False
hasTilt = False
needSave = False
##################################### CHECK CREATOR #################################
creator_tag = root.find('{' + namespace + '}Creator')
if creator_tag is None:
print "Creator tag not found"
else:
author_tag = creator_tag.find('{' + namespace + '}Author')
name_tag = creator_tag.find('{' + namespace + '}Name')
version_tag = creator_tag.find('{' + namespace + '}Version')
numversion_tok = re.findall('\d+', version_tag.text)
#print "Definition version: " + version_tag.text
# extract a unified number from the QLC version string
if len(numversion_tok) == 3:
qlc_version = (int(numversion_tok[0]) * 10000) + (int(numversion_tok[1]) * 100) + int(numversion_tok[2])
else:
qlc_version = (int(numversion_tok[0]) * 10000) + (int(numversion_tok[1]) * 100)
if author_tag is None:
print absname + ": Author tag not found"
errNum += 1
else:
# pre QLC+ definition didn't have the Author tag. Let's do
# the following check only for newer defs
if name_tag.text == "Q Light Controller Plus":
if not author_tag.text:
print absname + ": Empty author name detected"
errNum += 1
else:
authName = author_tag.text
if "@" in authName or "://" in authName or "www" in authName:
print absname + ": URLs or emails not allowed in author tag"
errNum += 1
################################ CHECK FIXTURE GENERALS ##############################
manuf_tag = root.find('{' + namespace + '}Manufacturer')
model_tag = root.find('{' + namespace + '}Model')
type_tag = root.find('{' + namespace + '}Type')
if manuf_tag is None or not manuf_tag.text:
print absname + ": Invalid manufacturer detected"
errNum += 1
if model_tag is None or not model_tag.text:
print absname + ": Invalid model detected"
errNum += 1
if type_tag is None or not type_tag.text:
print absname + ": Invalid type detected"
errNum += 1
##################################### CHECK CHANNELS #################################
chCount = 0
channelNames = []
for channel in root.findall('{' + namespace + '}Channel'):
chName = ""
chPreset = ""
if not 'Name' in channel.attrib:
print absname + ": Invalid channel. No name specified"
errNum += 1
else:
chName = channel.attrib['Name']
channelNames.append(chName)
if 'Preset' in channel.attrib:
chPreset = channel.attrib['Preset']
childrenCount = len(channel.getchildren())
group_tag = channel.find('{' + namespace + '}Group')
groupByte = -1
if not chPreset and childrenCount == 0:
print absname + "/" + chName + ": Invalid channel. Not a preset and no capabilities found"
errNum += 1
if not chPreset and group_tag is None:
print absname + "/" + chName + ": Invalid channel. Not a preset and no group tag found"
errNum += 1
if group_tag is not None:
if not group_tag.text:
print absname + "/" + chName + ": Invalid channel. Empty group tag detected"
errNum += 1
else:
if group_tag.text == 'Pan':
hasPan = True
if group_tag.text == 'Tilt':
hasTilt = True
if not 'Byte' in group_tag.attrib:
print absname + "/" + chName + ": Invalid channel. Group byte attribute not found"
errNum += 1
else:
groupByte = group_tag.attrib['Byte']
if chPreset:
# no need to go further is this is a preset
chCount += 1
continue
# check the word 'fine' against control byte
if groupByte == 0 and 'fine' in chName:
print absname + "/" + chName + ": control byte should be set to Fine (LSB)"
errNum += 1
################################# CHECK CAPABILITIES ##############################
rangeMin = 255
rangeMax = 0
lastMax = -1
capCount = 0
for capability in channel.findall('{' + namespace + '}Capability'):
newResSyntax = False
capName = capability.text
if not capName:
print absname + "/" + chName + ": Capability with no description detected"
errNum += 1
# check capabilities overlapping
currMin = int(capability.attrib['Min'])
currMax = int(capability.attrib['Max'])
#print "Min: " + str(currMin) + ", max: " + str(currMax)
if currMin <= lastMax:
print absname + "/" + chName + "/" + capName + ": Overlapping values detected " + str(currMin) + "/" + str(lastMax)
errNum += 1
# disabled for now. 710 errors with this !
#if currMin != lastMax + 1:
# print absname + "/" + chName + "/" + capName + ": Non contiguous range detected " + str(currMin) + "/" + str(lastMax)
# errNum += 1
lastMax = currMax
resource = capability.attrib.get('Res', "")
# try and see if new sytax is on
if not resource:
resource = capability.attrib.get('Res1', "")
newResSyntax = True
if resource.startswith('/'):
print absname + "/" + chName + "/" + capName + ": Absolute paths not allowed in resources"
errNum += 1
# check the actual existence of a gobo. If possible, migrate to SVG
if resource and '/' in resource:
goboPath = os.getcwd() + "/../gobos/" + resource
#print "GOBO path: " + goboPath
if not os.path.isfile(goboPath):
# check if a SVG version of the gobo exists
resource = resource.replace('png', 'svg')
goboPath = os.getcwd() + "/../gobos/" + resource
if not os.path.isfile(goboPath):
print absname + "/" + chName + "/" + capName + ": Non existing gobo file detected (" + resource + ")"
errNum += 1
else:
needSave = True
if newResSyntax:
capability.set('Res1', resource)
else:
capability.set('Res', resource)
capCount += 1
if capCount == 0:
print absname + "/" + chName + ": Channel has no capabilities"
errNum += 1
chCount += 1
if chCount == 0:
print absname + ": Invalid fixture. No channels found!"
errNum += 1
###################################### CHECK MODES ###################################
modeCount = 0
for mode in root.findall('{' + namespace + '}Mode'):
modeName = ""
if not 'Name' in mode.attrib:
print absname + ": mode name attribute not found"
errNum += 1
else:
modeName = mode.attrib['Name']
if not modeName:
print absname + ": Empty mode name detected"
errNum += 1
# better to skip this for now. Still too many errors
#if qlc_version >= 41100 and 'mode' in modeName.lower():
# print absname + "/" + modeName + ": word 'mode' found in mode name"
# errNum += 1
modeChanCount = 0
for mchan in mode.findall('{' + namespace + '}Channel'):
if mchan.text is None:
print absname + "/" + modeName + ": Empty channel name found. This definition won't work."
errNum += 1
else:
if not mchan.text in channelNames:
print absname + "/" + modeName + ": Channel " + mchan.text + " doesn't exist. This definition won't work."
errNum += 1
modeChanCount += 1
if modeChanCount == 0:
print absname + "/" + modeName + ": No channel found in mode"
errNum += 1
errNum += check_physical(mode, hasPan, hasTilt)
modeCount += 1
if modeCount == 0:
print absname + ": Invalid fixture. No modes found!"
errNum += 1
################################ CHECK GLOBAL PHYSICAL ################################
errNum += check_physical(root, hasPan, hasTilt)
if needSave:
print "Saving back " + filename + "..."
xmlFile = open(absname, "w")
xmlFile.write(etree.tostring(root, pretty_print=True, xml_declaration=True, encoding="UTF-8", doctype="<!DOCTYPE FixtureDefinition>"))
xmlFile.close()
return errNum
###########################################################################################
# createFixtureMap
#
# Creates the Fixture definition map read by QLC+ at startup
#
###########################################################################################
def createFixtureMap():
global namespace
count = 0
manufacturer = ""
xmlFile = open("FixturesMap.xml", "w")
root = etree.Element("FixturesMap")
root.set('xmlns', 'http://www.qlcplus.org/FixturesMap')
for dirname in sorted(os.listdir('.'), key=lambda s: s.lower()):
if not os.path.isdir(dirname): continue
if dirname != "scripts" and dirname != manufacturer:
mfTag = etree.SubElement(root, "M")
mfTag.set('n', dirname)
manufacturer = dirname
for filename in sorted(os.listdir(dirname), key=lambda s: s.lower()):
if not filename.endswith('.qxf'): continue
parser = etree.XMLParser(ns_clean=True, recover=True)
xmlObj = etree.parse(os.path.join(dirname, filename), parser=parser)
fxRoot = xmlObj.getroot()
manufacturer = fxRoot.find('{' + namespace + '}Manufacturer')
model = fxRoot.find('{' + namespace + '}Model')
fxTag = etree.SubElement(mfTag, "F")
fxTag.set('n', os.path.splitext(filename)[0])
#fxTag.set('m', manufacturer.text)
fxTag.set('m', model.text)
#print manufacturer.text + ", " + model.text
count += 1
xmlFile.write(etree.tostring(root, pretty_print=True, xml_declaration=True, encoding="UTF-8", doctype="<!DOCTYPE FixturesMap>"))
xmlFile.close()
print "Fixtures in map: " + str(count)
###########################################################################################
#
# MAIN
#
###########################################################################################
parser = argparse.ArgumentParser(description='Unified Fixture tool.')
parser.add_argument('--map', help='Create the Fixture map', action='store_true')
parser.add_argument('--convert [source] [destination]', help='Convert an "old" syntax Fixture definition',
nargs='*', dest='convert')
parser.add_argument('--validate [path]', help='Validate fixtures in the specified path',
nargs='*', dest='validate')
args = parser.parse_args()
print args
if args.map:
createFixtureMap()
elif args.convert:
if len(sys.argv) < 3:
print "Usage " + sys.argv[0] + "--convert [source folder] [destination folder]"
sys.exit()
path = sys.argv[2]
if len(sys.argv) > 3:
destpath = sys.argv[3]
else:
destpath = ""
print "Converting fixtures in " + path + "..."
for filename in os.listdir(path):
if not filename.endswith('.qxf'): continue
print "Processing file " + filename
singleCapCount += update_fixture(path, filename, destpath)
print "Scan done. Single cap found: " + str(singleCapCount)
elif args.validate:
if len(sys.argv) < 2:
print "Usage " + sys.argv[0] + "--validate [path]"
sys.exit()
path = sys.argv[2]
fileCount = 0
errorCount = 0
for dirname in sorted(os.listdir(path), key=lambda s: s.lower()):
if not os.path.isdir(dirname): continue
for filename in sorted(os.listdir(dirname), key=lambda s: s.lower()):
if not filename.endswith('.qxf'): continue
#print "Processing file " + filename
errorCount += validate_fixture(dirname, filename)
fileCount += 1
print str(fileCount) + " definitions processed. " + str(errorCount) + " errors detected"
| 37.348765 | 142 | 0.530493 |
4a26db0070c53565e5ba657ff3ed6586084bbb6d | 492 | py | Python | pyscript/math/matplotlib_plot.py | airy-ict/learn_python | 5a6c45c627208856bb04c2545fae8cba903519d3 | [
"MIT"
] | 1 | 2021-06-07T09:01:21.000Z | 2021-06-07T09:01:21.000Z | pyscript/math/matplotlib_plot.py | airy-ict/learn_python | 5a6c45c627208856bb04c2545fae8cba903519d3 | [
"MIT"
] | null | null | null | pyscript/math/matplotlib_plot.py | airy-ict/learn_python | 5a6c45c627208856bb04c2545fae8cba903519d3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #指定默认字体
plt.rcParams['axes.unicode_minus'] = False #解决保存图像是负号'-'显示为方块的问题
x = np.linspace(0, 10, 1000)
y = np.sin(x)
z = np.cos(x**2)
plt.figure(figsize=(8,4))
plt.plot(x,y,label="$sin(x)$",color="red",linewidth=4)
plt.plot(x,z,"b--",label="$cos(x^2)$")
plt.xlabel("Time(s)")
plt.ylabel("Volt")
plt.title("PyPlot 曲线 Demo")
plt.ylim(-1.2,1.2)
plt.legend()
plt.show()
| 23.428571 | 66 | 0.648374 |
4a26db992214ad8f5de8744c67320c661eff22d0 | 4,122 | py | Python | main/utils/dto.py | keshavbohra/movie-bookie | 3ca4f855bfaad6b2fa859a6feed8f2d512513b69 | [
"MIT"
] | null | null | null | main/utils/dto.py | keshavbohra/movie-bookie | 3ca4f855bfaad6b2fa859a6feed8f2d512513b69 | [
"MIT"
] | 1 | 2021-09-06T11:49:05.000Z | 2021-09-06T11:49:05.000Z | main/utils/dto.py | keshavbohra/movie_bookie | 3ca4f855bfaad6b2fa859a6feed8f2d512513b69 | [
"MIT"
] | null | null | null | from flask_restx import Namespace, fields
screening_marshaller = {
'screening_start': fields.DateTime(required=True, description='Screening start time'),
'screening_end': fields.DateTime(required=True, description='Screening end time'),
'seats_remain': fields.Integer(required=True, description='Remaining Seats')
}
booking_marshaller = {
'booking_id': fields.Integer(attribute='id', required=True, description='Booking id'),
'movie_name': fields.String(required=True, description='Movie Name'),
'theatre_name': fields.String(required=True, description='Theatre Name'),
'theatre_city': fields.String(required=True, description='Theatre City'),
'screening_time': fields.DateTime(attribute='screening_start', required=True, description='Screening Start'),
'movie_duration': fields.Integer(required=True, description='Duration of the movie.'),
'total_seats': fields.Integer(required=True, description='Seats Booked')
}
screening_book_marshaller = {
'screening_id': fields.Integer(attribute='id', required=True, description='Screening id'),
'movie_id': fields.Integer(attribute='id', required=True, description='Movie id'),
'theatre_id': fields.Integer(attribute='id', required=True, description='Theatre id'),
'seats': fields.Integer(required=True, description='Seats Booked')
}
movie_marshaller = {
'movie_id': fields.Integer(attribute='id', required=False, description='Movie id'),
'movie_name': fields.String(required=True, description='Movie name'),
'movie_duration': fields.Integer(required=True, description='Movie duration'),
'poster_url': fields.String(required=True, description='Poster URL')
}
theatre_marshaller = {
'theatre_id': fields.Integer(attribute='id', required=False, description='Theatre id'),
'theatre_name': fields.String(required=True, description='Theatre name'),
'theatre_city': fields.String(required=True, description='Theatre duration'),
'seats_num': fields.Integer(attribute='seats_remain', required=True, description='Total Seats')
}
user_marshaller = {
'username': fields.String(attribute='user_name', required=True, description='user username'),
'email': fields.String(required=True, description='user email address'),
'password': fields.String(required=True, description='user passw, ord'),
'public_id': fields.String(required=False, description='user Identifier')
}
user_update_marshaller = {
'email': fields.String(required=True, description='user email address'),
'admin': fields.Boolean(required=True, description='User role flag')
}
auth_marshaller = {
'email': fields.String(required=True, description='The email address'),
'password': fields.String(required=True, description='The user password '),
}
class MovieDto:
api = Namespace('movie', description='Movie related opertions')
movie = api.model('movie_obj', movie_marshaller)
class BookingDto:
api = Namespace('booking', description='Ticket Booking related operations')
screening_model = api.model('screening_book', screening_book_marshaller)
booking_model = api.model('booking', booking_marshaller)
class TheatreDto:
api = Namespace('theatre', description='Theatre related opertions')
theatre = api.model('theatre_obj', theatre_marshaller)
class ScreeningDto:
api = Namespace('screening', description='Screening related opertions')
screening = api.model('screening', screening_marshaller)
movie_screening_marshaller = {**screening_marshaller, **theatre_marshaller}
movie_screening = api.model('movie_screening', movie_screening_marshaller)
theatre_screening_marshaller = {**screening_marshaller, **movie_marshaller}
theatre_screening = api.model('theatre_screening', theatre_screening_marshaller)
class UserDto:
api = Namespace('user', description='user related operations')
user = api.model('user', user_marshaller)
user_update = api.model('user_update', user_update_marshaller)
class AuthDto:
api = Namespace('auth', description='authentication related operations')
user_auth = api.model('auth_details', auth_marshaller)
| 47.930233 | 113 | 0.74721 |
4a26dc9cb6d6e08e1f17afbcde876f2ea7c434f4 | 10,718 | py | Python | src/ipycbm/utils/help_docs.py | VP-GEO/cbm | 4ed229f6b6455435b6d032deb8a39dba4ecee7a2 | [
"BSD-3-Clause"
] | null | null | null | src/ipycbm/utils/help_docs.py | VP-GEO/cbm | 4ed229f6b6455435b6d032deb8a39dba4ecee7a2 | [
"BSD-3-Clause"
] | null | null | null | src/ipycbm/utils/help_docs.py | VP-GEO/cbm | 4ed229f6b6455435b6d032deb8a39dba4ecee7a2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import (HTML, HBox, VBox, Checkbox, Layout, widgets)
def widget_box():
wbox = VBox(children=[ipycbm_help(), about()])
return wbox
def ipycbm_help():
html = """
<H2>'Get' and 'View' functions.</H2>
With the 'get' function you can download data from the server to your local jupyter environment.<br>
The 'view' function is to load local files and display them with different methods, or provide example code for each selected dataset.<br>
<H4>Available options:</H4>
<b>Get data example:</b><br>
<code>import src.ipycbm</code><br>
<code>ipycbm.get()</code>
<br>
<b>View data example:</b><br>
<code>import src.ipycbm</code><br>
<code>ipycbm.view()</code><br>
<br>
'**tmp**' folder structure example for parcel with ID 12345:<br>
<code>tmp/
cat2019/parcel_12345/12345_information.json
cat2019/parcel_12345/12345_time_series.csv
cat2019/parcel_12345/12345_chipimages/12345_images_list.csv
cat2019/parcel_12345/12345_chipimages/S2A_MSIL2A_2019---.B04.tif
cat2019/parcel_12345/12345_chipimages/...</code>
"""
wbox = widgets.HTML(
value=html,
placeholder="Documantation",
description="")
return wbox
def about():
from src import __version__
html = f"""
<H1>About</H1>
<H3>JRC D5 Food security - GTCAP</H3>
<H4>DIAS for CAP Checks by Monitoring, development platforms and services.</H4>
Authors:<br>
Guido Lemoine<br>
Konstantinos Anastasakis<br>
<br>
Copyright 2021, Joint Research Centre (JRC) European Commission<br>
License: 3-Clause BSD , Version: {__version__}
"""
wbox = HTML(
value=html,
placeholder='About',
description='',
)
return wbox
def widget_box_foi():
wbox = VBox(children=[ipycbm_help_foi(), about()])
return wbox
def ipycbm_help_foi():
html = """
<H2>FOI Assessment: Heterogeneity and Cardinality</H2>
The FOI assessment notebook is based on the principle that inside of a homogenous FOI there should be only one type of pixels. In the same idea, a FOI which respects the 1-1 cardinalityshould not include clusters of pixels larger than a specified threshold (we can consider dispersed pixels different than the main class as “noise”).<br>
The FOI Assessment performs a spatial analysis on a "thematic raster" produced in advance. The thematic raster can be the result of any image/raster processing method yielding a class label for each pixel - crop classification, behavior analysis of land phenomenon, gridded data on soil, slope, humidity, etc.<br>
As an example, if the thematic raster is the result of a crop classification, a homogeneous FOI should have only one type of pixels that represent the respective crop, a cardinal FOI should not include any cluster of pixels from other class larger than a specified threshold.
If the thematic raster is the result of a behavior analysis, all the pixels inside an FOI should behave in the same way during a period of time.<br>
For both heterogeneity and cardinality, the notebook provides two methods for the analysis: one based area calculation (version 1) and one based on cluster size calculation (version 2). Both methods have similar results.
<br>
<H2>Version 1</H2>
The first version requires the connection to a database server (PostgreSQL with PostGIS extension)<br>
For the heterogeneity analysis the following steps are required (the steps correspond to the numbering on the interface):<br>
1. Connect to the database (at the moment only „Database connection settings” are required)<br>
a) Upload the reference data shapefile to the server. It is provided a graphical interface for upload.<br>
b) Import uploaded shapefile to the database, specifying the name for the table that will be created in the database.<br>
2. Upload the raster „thematic” image. A graphical interface is provided. The accepted files are tif or tiff files. The thematic raster should be a one band raster file, with the pixel values representing the classes (like crop type or type of behaviour)<br>
3. Prepare FOI procedure – Allows the user to create the database functions on the database server. This procedure creates the necessary function and stored procedures on the database server.<br>
4. Select the required files for analysis:<br>
a) Vector file: the data on which the analysis will be applied. In case that we have more shapefiles uploaded on the server, this functionality allows us to select the one that we want to analyze.<br>
b) Thematic raster: the thematic raster provided. In case that we have more rasters uploaded on the server, this functionality allows us to select the one that we want to use on the analysis.<br>
c) YAML file that holds the classes form the thematic raster file: this file specifies the classes of pixels from the thematic raster and can also provide the meaning of those classes. It should have the following structure:<br>
<code>example.yml</code><br>
<code>category_map:
0: Unclasified
1: Class1
2: Class2
3: Class3
4: Class4
5: Class5
6: Class6
7: Class7
8: Class8
9: Class9
10: Class10</code><br>
Class1, Class2 can be replaced by the meaning of the class (like Wheat, Maize, etc. or by behavior name or any other ….).<br>
The YAML file should include all the classes that exist in the thematic raster. It is provided a graphical interface for upload.<br>
5. Analysis parameters:<br>
Heterogeneity thresholds: in order to exclude the influence of „noise” pixels, the user can specify the heterogeneity thresholds (for example only the FOIs where one class of pixels have a percentage between 30 and 70 is considered heterogeneous).<br>
Minimum area for clusters selection: the user can specify the minimum area of the cluster that are considered a cardinality issue, in square meters. Of example the clusters smaller than 2000 square meters can be considered as not influencing the FOI cardinality.<br>
6. Run FOI procedure.<br>
Starts the FOI analysis. The result of the analysis is represented by three shapefiles that are stored on the “output_data” folder (/cbm/tmp/foi/output_data).<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foih_v1.shp</b> – represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_h – heterogeneity flag (0 for homogeneous FOIs and 1 for heterogeneous FOIs)<br>
• number of pixels for each class (the name of the attribute is the name of the class)<br>
• total number of pixel for the respective FOI<br>
• percentage of pixels from each class (number of pixels for each class / total number of pixels inside the FOI)<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foic_v1.shp</b> - represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_c – cardinality flag (0 for FOIs respecting the 1-1 cardinality and 1 for FOIs not respecting the 1-1 cardinality). As a result of this analysis, the FOIs that include more than one cluster of pixel from different classes bigger than the threshold are considered non-cardinal. For example and FOI that includes two clusters of pixels from different classes (one arable land and non-agricultural area), each of the clusters bigger than the threshold (ex. 2000 square meters), is considered as not respecting the 1-1 cardinality.<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foic_clusters_v1.shp</b> – represents only the clusters of pixels that are setting the FOI cardinality (for example if an FOI includes three clusters of pixels bigger that the threshold, only those clusters will be saved in this shapefile)<br>
<H2>Version 2</H2>
The second version does not require a database server. All the calculations are made at pixel level using Python function.<br>
The interface and the steps are similar to the ones from the Version 1. The main difference is that it does not include the functionality for database connection and creating the functions on the database server.<br>
The different options available:<br>
Connectivity type: 8 or 4 connected pixels (4 indicating that diagonal pixels are not considered directly adjacent for polygon membership purposes or 8 indicating they are)<br>
Negative buffer: user can apply a negative buffer on the FOI in order to reduce the influence of boundary influence on the analysis (roads, adjacent FOIs, etc.)<br>
Cluster size (in pixels): the minimum number of pixels for which a cluster is taken into account.<br>
The result of the analysis is represented by two shapefiles that are stored on the “output_data” folder (/cbm/tmp/foi/output_data).<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foih_v2.shp</b> – represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_h – heterogeneity flag (0 for homogeneous FOIs and 1 for heterogeneous FOIs)<br>
• number of pixels for each class (the name of the attribute is the name of the class)<br>
• total number of pixel for the respective FOI<br>
• percentage of pixels from each class (number of pixels for each class / total number of pixels inside the FOI)<br>
<b>name of the shapefile dataset (without extension) that needs to be tested + foic_v2.shp</b> - represents the initial shapefile and during the analysis the following attributes are added:<br>
• foi_c – cardinality flag (0 for FOIs respecting the 1-1 cardinality and 1 for FOIs not respecting the 1-1 cardinality). As a result of this analysis, the FOIs that include more than one cluster of pixels from different classes bigger than the threshold are considered not respecting the 1-1 cardinality. For example and FOI that includes two clusters of pixels from different classes (one arable land and non-agricultural area), each of the clusters bigger than the threshold (ex. 20 pixels), is considered as not respecting the 1-1 cardinality.<br>
• Clusters – the information about the clusters of pixels identified inside the FOI, as pair of pixel class and cluster size: for example (3, 25), (5, 120) means that inside the FOI we have identified two clusters: one of pixels from class 3 and the cluster size is 25 pixels and another one with pixels of class 5 and cluster size 120 pixels.<br>
"""
wbox = widgets.HTML(
value=html,
placeholder="Documentation",
description="")
return wbox
| 68.267516 | 551 | 0.764135 |
4a26dd29afca6397b575dc8afad7dac419ba1f61 | 4,880 | py | Python | feature_engine/discretisation/arbitrary.py | david-cortes/feature_engine | 702328d1a072d0911441e10b4eb98b3bfbf19215 | [
"BSD-3-Clause"
] | 1 | 2022-01-02T19:35:50.000Z | 2022-01-02T19:35:50.000Z | feature_engine/discretisation/arbitrary.py | david-cortes/feature_engine | 702328d1a072d0911441e10b4eb98b3bfbf19215 | [
"BSD-3-Clause"
] | null | null | null | feature_engine/discretisation/arbitrary.py | david-cortes/feature_engine | 702328d1a072d0911441e10b4eb98b3bfbf19215 | [
"BSD-3-Clause"
] | null | null | null | # Authors: Soledad Galli <[email protected]>
# License: BSD 3 clause
from typing import Dict, List, Optional, Union
import pandas as pd
from feature_engine.base_transformers import BaseNumericalTransformer
from feature_engine.validation import _return_tags
class ArbitraryDiscretiser(BaseNumericalTransformer):
"""
The ArbitraryDiscretiser() divides numerical variables into intervals which limits
are determined by the user. Thus, it works only with numerical variables.
You need to enter a dictionary with variable names as keys, and a list with
the limits of the intervals as values. For example `{'var1':[0, 10, 100, 1000],
'var2':[5, 10, 15, 20]}`.
The ArbitraryDiscretiser() will then sort var1 values into the intervals 0-10,
10-100, 100-1000, and var2 into 5-10, 10-15 and 15-20. Similar to `pandas.cut`.
More details in the :ref:`User Guide <arbitrary_discretiser>`.
Parameters
----------
binning_dict: dict
The dictionary with the variable to interval limits pairs. A valid dictionary
looks like this:
`binning_dict = {'var1':[0, 10, 100, 1000], 'var2':[5, 10, 15, 20]}`
return_object: bool, default=False
Whether the the discrete variable should be returned as numeric or as object.
If you would like to proceed with the engineering of the variable as if
it was categorical, use True. Alternatively, keep the default to False.
return_boundaries: bool, default=False
Whether the output, that is the bins, should be the interval boundaries. If
True, it returns the interval boundaries. If False, it returns integers.
Attributes
----------
binner_dict_:
Dictionary with the interval limits per variable.
variables_:
The variables that will be discretised.
n_features_in_:
The number of features in the train set used in fit.
Methods
-------
fit:
This transformer does not learn any parameter.
transform:
Sort variable values into the intervals.
fit_transform:
Fit to the data, then transform it.
See Also
--------
pandas.cut
"""
def __init__(
self,
binning_dict: Dict[Union[str, int], List[Union[str, int]]],
return_object: bool = False,
return_boundaries: bool = False,
) -> None:
if not isinstance(binning_dict, dict):
raise ValueError(
"Please provide at a dictionary with the interval limits per variable"
)
if not isinstance(return_object, bool):
raise ValueError("return_object must be True or False")
self.binning_dict = binning_dict
self.return_object = return_object
self.return_boundaries = return_boundaries
def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):
"""
This transformer does not learn any parameter.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The training dataset. Can be the entire dataframe, not just the
variables to be transformed.
y: None
y is not needed in this transformer. You can pass y or None.
"""
# check input dataframe
X = super()._select_variables_from_dict(X, self.binning_dict)
# for consistency wit the rest of the discretisers, we add this attribute
self.binner_dict_ = self.binning_dict
self.n_features_in_ = X.shape[1]
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""Sort the variable values into the intervals.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The dataframe to be transformed.
Returns
-------
X_new: pandas dataframe of shape = [n_samples, n_features]
The transformed data with the discrete variables.
"""
# check input dataframe and if class was fitted
X = super().transform(X)
# transform variables
if self.return_boundaries:
for feature in self.variables_:
X[feature] = pd.cut(X[feature], self.binner_dict_[feature])
else:
for feature in self.variables_:
X[feature] = pd.cut(
X[feature], self.binner_dict_[feature], labels=False
)
# return object
if self.return_object:
X[self.variables_] = X[self.variables_].astype("O")
return X
def _more_tags(self):
tags_dict = _return_tags()
# add additional test that fails
tags_dict["_xfail_checks"][
"check_parameters_default_constructible"
] = "transformer has 1 mandatory parameter"
return tags_dict
| 32.533333 | 86 | 0.634016 |
4a26dd4b13fb0c5a6bb3c1597b5a9baaf436bd2c | 4,836 | py | Python | framework/Runners/InternalRunner.py | alptezbasaran/raven | fd6fe8fe90b59d6dd3615cfea929722f3e04b2ca | [
"Apache-2.0"
] | 1 | 2018-07-02T21:12:48.000Z | 2018-07-02T21:12:48.000Z | framework/Runners/InternalRunner.py | alptezbasaran/raven | fd6fe8fe90b59d6dd3615cfea929722f3e04b2ca | [
"Apache-2.0"
] | null | null | null | framework/Runners/InternalRunner.py | alptezbasaran/raven | fd6fe8fe90b59d6dd3615cfea929722f3e04b2ca | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Mar 5, 2013
@author: alfoa, cogljj, crisr
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import os
import signal
import copy
import sys
import abc
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from utils import utils
from BaseClasses import BaseType
import MessageHandler
from .Runner import Runner
from .Error import Error
#Internal Modules End--------------------------------------------------------------------------------
class InternalRunner(Runner):
"""
Generic base Class for running internal objects
"""
def __init__(self, messageHandler, args, functionToRun, identifier=None, metadata=None, uniqueHandler = "any", profile = False):
"""
Init method
@ In, messageHandler, MessageHandler object, the global RAVEN message
handler object
@ In, args, dict, this is a list of arguments that will be passed as
function parameters into whatever method is stored in functionToRun.
e.g., functionToRun(*args)
@ In, functionToRun, method or function, function that needs to be run
@ In, identifier, string, optional, id of this job
@ In, metadata, dict, optional, dictionary of metadata associated with
this run
@ In, uniqueHandler, string, optional, it is a special keyword attached to
this runner. For example, if present, to retrieve this runner using the
method jobHandler.getFinished, the uniqueHandler needs to be provided.
If uniqueHandler == 'any', every "client" can get this runner
@ In, profile, bool, optional, if True then during deconstruction timing
summaries will be printed.
@ Out, None
"""
## First, allow the base class to handle the commonalities
## We keep the command here, in order to have the hook for running exec
## code into internal models
super(InternalRunner, self).__init__(messageHandler, identifier, metadata, uniqueHandler, profile)
## Other parameters passed at initialization
self.args = copy.copy(args)
self.functionToRun = functionToRun
## Other parameters manipulated internally
self.thread = None
self.runReturn = None
self.hasBeenAdded = False
self.returnCode = 0
## These things cannot be deep copied
self.skipOnCopy = ['functionToRun','thread','__queueLock']
def __deepcopy__(self,memo):
"""
This is the method called with copy.deepcopy. Overwritten to remove some keys.
@ In, memo, dict, dictionary required by deepcopy method
@ Out, newobj, object, deep copy of this object
"""
cls = self.__class__
newobj = cls.__new__(cls)
memo[id(self)] = newobj
for k,v in self.__dict__.items():
if k not in self.skipOnCopy:
setattr(newobj,k,copy.deepcopy(v,memo))
return newobj
def _collectRunnerResponse(self):
"""
Method to add the process response in the internal variable (pointer)
self.runReturn
@ In, None
@ Out, None
"""
pass
def getReturnCode(self):
"""
Returns the return code from running the code.
@ In, None
@ Out, returnCode, int, the return code of this evaluation
"""
return self.returnCode
def getEvaluation(self):
"""
Method to return the results of the function evaluation associated with
this Runner
@ In, None
@ Out, returnValue, object or Error, whatever the method that this
instance is executing returns, or if the job failed, will return an
Error
"""
if self.isDone():
self._collectRunnerResponse()
if self.runReturn is None:
self.returnCode = -1
return Error()
return self.runReturn
else:
return Error()
| 37.2 | 130 | 0.627792 |
4a26dd8c2ae63cab2429ad5007aebe7d4ad952c2 | 1,865 | py | Python | src/other_scripts/split_sentences.py | saranya132/classinsight-language | c62d43337f190261bd00f0878f7364daebab3b1e | [
"BSD-3-Clause"
] | null | null | null | src/other_scripts/split_sentences.py | saranya132/classinsight-language | c62d43337f190261bd00f0878f7364daebab3b1e | [
"BSD-3-Clause"
] | null | null | null | src/other_scripts/split_sentences.py | saranya132/classinsight-language | c62d43337f190261bd00f0878f7364daebab3b1e | [
"BSD-3-Clause"
] | 1 | 2020-02-15T01:45:23.000Z | 2020-02-15T01:45:23.000Z | '''
Created on Oct 14, 2019
@author: jzc1104
Splits paragraphs into sentences
'''
import csv,nltk,sys
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
if __name__ == '__main__':
if len(sys.argv)>1:
input_filename=sys.argv[1]
output_filename=sys.argv[2]
else:
#input_filename="transcripts/190423_Henry_Per2.csv"
#output_filename="transcripts/19023_Henry_Per2_split.csv"
input_filename="test.csv"
output_filename="testcsv_split.csv"
new_transcript_lines=[]
line_index=1
timestamp_tag=1
speaker_tag=0
transcript_tag=2
with open(input_filename,encoding="utf-8") as csv_file:
csvreader = csv.reader(csv_file, delimiter=",")
for line in csvreader:
if len(line)<4:
print("LESS THAN 4 COLUMNS:",line_index,line)
continue
if line[timestamp_tag]=="" and line[speaker_tag]=="" and line[transcript_tag]=="":continue
sents=nltk.tokenize.sent_tokenize(line[transcript_tag])
line[speaker_tag]=line[speaker_tag][:-1] #Speaker field always ends with ":", here we remove it
new_sent=[line_index, line[timestamp_tag],line[speaker_tag],sents[0]]
line_index+=1
new_transcript_lines.append(new_sent)
for sent in sents[1:]:
new_sent=[line_index,"","",sent]
line_index+=1
new_transcript_lines.append(new_sent)
headers=["Line","Time_Stamp","Speaker","Transcript"]
with open(output_filename, 'w', encoding = 'utf-8') as output_file:
writer = csv.writer(output_file, delimiter=",")
writer.writerow(headers)
writer.writerows(new_transcript_lines) | 30.080645 | 107 | 0.609115 |
4a26de8dd41e87aca3e8f26f847b7887ee30d46e | 9,269 | py | Python | mlpug/pytorch/trainers/training.py | nuhame/mlpug | be9f7c55f7d6616af5303e9350cfd8092d55440b | [
"Apache-2.0"
] | 4 | 2019-12-30T16:12:06.000Z | 2022-03-25T15:25:49.000Z | mlpug/pytorch/trainers/training.py | nuhame/mlpug | be9f7c55f7d6616af5303e9350cfd8092d55440b | [
"Apache-2.0"
] | null | null | null | mlpug/pytorch/trainers/training.py | nuhame/mlpug | be9f7c55f7d6616af5303e9350cfd8092d55440b | [
"Apache-2.0"
] | null | null | null | import torch
from torch.cuda.amp import autocast
import torch.distributed as dist
from functools import reduce
import basics.base_utils as _
from mlpug.trainers.training import *
from mlpug.trainers.training import TrainingManager as TrainingManagerBase
from mlpug.mlpug_exceptions import TrainerInvalidException, BatchNotChunkableException, LossNotAvailableException
from mlpug.pytorch.utils import is_chunkable, SlidingWindow
from mlpug.pytorch.multi_processing import MultiProcessingMixin
class TrainingManager(MultiProcessingMixin, TrainingManagerBase):
def __init__(self, *args, sliding_window_factory=SlidingWindow, **kwargs):
super().__init__(*args, sliding_window_factory=sliding_window_factory, **kwargs)
def _training_ended(self):
if self.is_distributed:
# Wait for all processes to finish
dist.barrier()
class PTTrainerMixin(MultiProcessingMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _activate_inference_mode(self, inference_mode):
if inference_mode:
self.training_model.eval()
else:
self.training_model.train()
def _get_model_state(self, model, model_name=None):
return model.state_dict()
def _get_optimizer_state(self, optimizer, optimizer_name=None):
return optimizer.state_dict()
def _set_model_state(self, model, state, model_name=None):
model.load_state_dict(state)
def _set_optimizer_state(self, optimizer, state, optimizer_name):
optimizer.load_state_dict(state)
class Trainer(PTTrainerMixin, TrainerBase):
pass
class DefaultTrainer(PTTrainerMixin, DefaultTrainerBase):
def __init__(self, *args, scaler=None, name="DefaultTrainer", **kwargs):
super(DefaultTrainer, self).__init__(*args, name=name, **kwargs)
self._scaler = scaler
if self.use_mixed_precision:
if scaler is None:
self._log.debug("Creating default scaler instance for automatic mixed precision ...")
self._scaler = torch.cuda.amp.GradScaler()
self._log.info(f"Using scaler instance for automatic mixed precision : {self._scaler}")
def set_learning_rate_for(self, optimizer_name, lr):
"""
Set learning rate for specific optimizer `optimizer_name` to `lr`
:param optimizer_name:
:param lr:
:return: True on success, else False
"""
optimizer = self.get_optimizer(optimizer_name)
if not hasattr(optimizer, 'param_groups'):
self._log.error(f"No valid optimizer available with name {optimizer_name}, unable to set learning rate")
return False
try:
for group in optimizer.param_groups:
group['lr'] = lr
except Exception as e:
_.log_exception(self._log, f"Unable to set learning rate for optimizer {optimizer_name}", e)
return False
self._log.debug(f"Learning rate of optimizer {optimizer_name} set to : {lr}")
return True
def evaluate_loss(self, batch_data, inference_mode, evaluate_settings=None):
if self.use_mixed_precision:
self._activate_inference_mode(inference_mode)
with autocast():
results = self._evaluate_loss(batch_data, evaluate_settings, inference_mode)
return normalize_evaluation(results)
else:
return super().evaluate_loss(batch_data, inference_mode, evaluate_settings)
def train_on(self, batch_data, training_settings=None):
"""
Use batch_data to perform a training iteration.
Optionally uses `batch_chunk_size` to evaluate the loss in chunks.
If a `batch_chunk_size` was given during construction of the trainer, the gradients are updated by evaluating
the batch in chunks.
*Note*
When using chunked batch processing, the default implementation assumes that the
loss, calculated over a chunk, is the average of the sample losses.
:param batch_data: batch_data object to train on (e.g. dict, list, tuple)
When `batch_chunk_size` is given, `batch_data` must be an object that implements the
`__len__` and `__getitem__` methods. Here the `__getitem__` method must be able to deal
with slices.
:param training_settings: optional training_settings object (usually dict)
:return: loss, auxiliary_results
loss : number (e.g. float)
auxiliary_results : can be anything, e.g dict or list with values or data items
"""
if not self.instance_valid():
raise TrainerInvalidException()
self._reset_gradients()
loss, auxiliary_results = self._calc_gradients(batch_data, training_settings=training_settings)
self._prepare_update_model_parameters()
self._update_model_parameters()
self._after_update_model_parameters()
return loss, auxiliary_results
def _reset_gradients(self):
for optimizer in self.get_optimizers().values():
optimizer.zero_grad()
def _calc_gradients(self, batch_data, training_settings=None):
"""
:param batch_data:
:type batch_data:
:param training_settings:
:type training_settings:
:return:
:rtype:
:raises LossNotAvailableException
"""
if not self.batch_chunk_size:
results = self.evaluate_loss(batch_data,
inference_mode=False,
evaluate_settings=training_settings)
if 'loss' not in results:
raise LossNotAvailableException()
loss = results['loss']
auxiliary_results = get_value_at('auxiliary_results', results, warn_on_failure=False)
self._back_propagate_from(loss)
else:
chunk_losses, chunk_aux_results, chunk_lengths = self._calc_gradients_chunked(batch_data, training_settings)
loss, auxiliary_results = self._combine_chunk_results(chunk_losses, chunk_aux_results, chunk_lengths)
return loss, auxiliary_results
def _calc_gradients_chunked(self, batch_data, training_settings=None):
"""
See `train_on` method.
This method slices the `batch_data` in slices of size `self.batch_chunk_size`. For each slice the loss is
calculated and the gradients are updated through back prop.
return: chunk_losses, chunk_aux_results, chunk_lengths
All three outputs are lists
"""
if not is_chunkable(batch_data):
raise BatchNotChunkableException()
chunk_losses = []
chunk_aux_results = BatchChunkingResults()
chunk_lengths = []
batch_size = len(batch_data)
num_chunks = math.ceil(batch_size / self.batch_chunk_size)
for chunk_idx in range(num_chunks):
chunk_start = chunk_idx*self.batch_chunk_size
chunk_end = min((chunk_idx+1)*self.batch_chunk_size, batch_size)
chunk_len = chunk_end-chunk_start
chunk = batch_data[chunk_start:chunk_end]
results = self.evaluate_loss(chunk, inference_mode=False, evaluate_settings=training_settings)
if 'loss' not in results:
raise LossNotAvailableException()
loss = results['loss']
aux_results = get_value_at('auxiliary_results', results, warn_on_failure=False)
# loss is assumed to be the average over the sample loss for the chunk
# Divide through batch size to factor in that this loss is part of a larger batch.
last_chunk = chunk_idx == (num_chunks-1)
self._back_propagate_from(chunk_len*loss/batch_size, last_chunk=last_chunk)
chunk_losses += [loss]
chunk_aux_results += [aux_results]
chunk_lengths += [chunk_len]
return chunk_losses, chunk_aux_results, chunk_lengths
def _combine_chunk_results(self, chunk_losses, chunk_aux_results, chunk_lengths):
"""
This default implementation assumes that the loss for a chunk is the average loss of all samples in the chunk.
There is no specific combination logic to combine the chunk auxiliary results
"""
loss = reduce(lambda tot, c: tot+(c[1]*c[0]), zip(chunk_losses, chunk_lengths), 0)
num_samples = reduce(lambda tot, l: tot+l, chunk_lengths, 0)
loss /= num_samples
return loss, chunk_aux_results
def _back_propagate_from(self, loss, last_chunk=False):
if self.use_mixed_precision:
self._scaler.scale(loss).backward()
else:
loss.backward()
def _prepare_update_model_parameters(self):
pass
def _update_model_parameters(self):
for optimizer in self.get_optimizers().values():
if self.use_mixed_precision:
self._scaler.step(optimizer)
else:
optimizer.step()
def _after_update_model_parameters(self):
if self.use_mixed_precision:
self._scaler.update()
| 35.51341 | 120 | 0.665012 |
4a26deae6f4741a708b08b3138844c9085fae5bf | 168 | py | Python | todo/urls.py | zhy0216-collection/django-todo | 9e91d5a0c800d461d5027300f76a0090769752b9 | [
"MIT"
] | null | null | null | todo/urls.py | zhy0216-collection/django-todo | 9e91d5a0c800d461d5027300f76a0090769752b9 | [
"MIT"
] | null | null | null | todo/urls.py | zhy0216-collection/django-todo | 9e91d5a0c800d461d5027300f76a0090769752b9 | [
"MIT"
] | null | null | null |
from django.conf.urls import patterns, url
from todo import views
urlpatterns = patterns('',
url(r'^$', views.TodoListView.as_view(), name="index")
)
| 8.4 | 58 | 0.654762 |
4a26dff8cc94b3371ec2f755637fb8a1597a16c9 | 3,232 | py | Python | voltha/adapters/loader.py | Balaji-P/voltha_docker_compose-rsk_tech_CKAD | 3cf9b2e2bbbaeed4744ff9f33a3e91e60c8020b7 | [
"Apache-2.0"
] | null | null | null | voltha/adapters/loader.py | Balaji-P/voltha_docker_compose-rsk_tech_CKAD | 3cf9b2e2bbbaeed4744ff9f33a3e91e60c8020b7 | [
"Apache-2.0"
] | null | null | null | voltha/adapters/loader.py | Balaji-P/voltha_docker_compose-rsk_tech_CKAD | 3cf9b2e2bbbaeed4744ff9f33a3e91e60c8020b7 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Loader to load each adapter.
In this initial simple implementation we scan all subdirs in this directory,
look for a python module with the same name as the subdir, and if module
has a class that implements the IAdapterInterface, instantiate class and
add it to plugins.
"""
import os
import structlog
from twisted.internet.defer import inlineCallbacks, returnValue
from zope.interface import implementer
from zope.interface.verify import verifyClass
from voltha.adapters.interface import IAdapterInterface
from voltha.core.adapter_agent import AdapterAgent
from voltha.protos import third_party
from voltha.registry import IComponent
log = structlog.get_logger()
mydir = os.path.abspath(os.path.dirname(__file__))
@implementer(IComponent)
class AdapterLoader(object):
def __init__(self, config):
self.config = config
self.adapter_agents = {} # adapter-name -> adapter instance
@inlineCallbacks
def start(self):
log.debug('starting')
for adapter_name, adapter_class in self._find_adapters():
agent = AdapterAgent(adapter_name, adapter_class)
yield agent.start()
self.adapter_agents[adapter_name] = agent
log.info('started')
returnValue(self)
@inlineCallbacks
def stop(self):
log.debug('stopping')
for proxy in self.adapter_agents.values():
yield proxy.stop()
self.adapter_agents = {}
log.info('stopped')
def get_agent(self, adapter_name):
return self.adapter_agents[adapter_name]
def _find_adapters(self):
subdirs = os.walk(mydir).next()[1]
try:
for subdir in subdirs:
adapter_name = subdir
py_file = os.path.join(mydir, subdir, subdir + '.py')
if os.path.isfile(py_file):
try:
package_name = __package__ + '.' + subdir
pkg = __import__(package_name, None, None, [adapter_name])
module = getattr(pkg, adapter_name)
except ImportError, e:
log.exception('cannot-load', file=py_file, e=e)
continue
for attr_name in dir(module):
cls = getattr(module, attr_name)
if isinstance(cls, type) and \
IAdapterInterface.implementedBy(cls):
verifyClass(IAdapterInterface, cls)
yield adapter_name, cls
except Exception, e:
log.exception('failed', e=e)
| 34.752688 | 82 | 0.64078 |
4a26e0d90a879e3c86e64765863bd46fbd36f9fb | 1,424 | py | Python | basic/dataset-processor.py | alexnguyen65/class6-homework | f3be0e5a6ec059f7ec56148fbd757f1eda2cef89 | [
"MIT"
] | null | null | null | basic/dataset-processor.py | alexnguyen65/class6-homework | f3be0e5a6ec059f7ec56148fbd757f1eda2cef89 | [
"MIT"
] | null | null | null | basic/dataset-processor.py | alexnguyen65/class6-homework | f3be0e5a6ec059f7ec56148fbd757f1eda2cef89 | [
"MIT"
] | null | null | null | import os
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
file_path = 'housing.data'
plots_dir = 'plots'
plots_format = 'png'
housing_df = pd.read_csv(file_path, sep='\s+', header=0)
housing_df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'BLACK', 'LSTAT', 'MEDV']
os.makedirs(plots_dir, exist_ok=True)
for feature1_idx, feature1_name in enumerate(housing_df.columns):
for feature2_idx, feature2_name in enumerate(housing_df.columns):
for feature3_idx, feature3_name in enumerate(housing_df.columns):
if (feature1_idx < feature2_idx) and (feature2_idx < feature3_idx):
print ('Generating 3d scatter plot with ' + feature1_name + ' + ' + feature2_name + ' + ' + feature3_name)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(housing_df[feature1_name], housing_df[feature2_name], housing_df[feature3_name])
ax.set_title('3D scatter plot')
ax.set_xlabel(feature1_name)
ax.set_ylabel(feature2_name)
ax.set_zlabel(feature3_name)
# ax.legend()
plots_file = plots_dir + '/3d_scatter_' + feature1_name + '_' + feature2_name + '_' + feature3_name + '.' + plots_format
plt.savefig(plots_file, format=plots_format)
plt.clf()
plt.close()
| 43.151515 | 133 | 0.653792 |
4a26e2843a1475a1faecfd3c0760d04cd29f8a64 | 24,082 | py | Python | tools/generate-wire.py | mosqueiro/lightning | 793a25a0e5ee0c3c80d1403d556a4a013ee233fd | [
"MIT"
] | null | null | null | tools/generate-wire.py | mosqueiro/lightning | 793a25a0e5ee0c3c80d1403d556a4a013ee233fd | [
"MIT"
] | null | null | null | tools/generate-wire.py | mosqueiro/lightning | 793a25a0e5ee0c3c80d1403d556a4a013ee233fd | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# Script to parse spec output CSVs and produce C files.
# Released by lisa neigut under CC0:
# https://creativecommons.org/publicdomain/zero/1.0/
#
# Reads from stdin, outputs C header or body file.
#
# Standard message types:
# msgtype,<msgname>,<value>[,<option>]
# msgdata,<msgname>,<fieldname>,<typename>,[<count>][,<option>]
#
# TLV types:
# tlvtype,<tlvstreamname>,<tlvname>,<value>[,<option>]
# tlvdata,<tlvstreamname>,<tlvname>,<fieldname>,<typename>,[<count>][,<option>]
#
# Subtypes:
# subtype,<subtypename>
# subtypedata,<subtypename>,<fieldname>,<typename>,[<count>]
from argparse import ArgumentParser, REMAINDER
from collections import OrderedDict
import copy
import fileinput
from mako.template import Template
import os
import re
import sys
# Generator to give us one line at a time.
def next_line(args, lines):
if lines is None:
lines = fileinput.input(args)
for i, line in enumerate(lines):
yield i + 1, line.strip()
# Class definitions, to keep things classy
class Field(object):
def __init__(self, name, type_obj, extensions=[],
field_comments=[], optional=False):
self.name = name
self.type_obj = type_obj
self.count = 1
self.len_field_of = None
self.len_field = None
self.implicit_len = False
self.extension_names = extensions
self.is_optional = optional
self.field_comments = field_comments
def __deepcopy__(self, memo):
deepcopy_method = self.__deepcopy__
self.__deepcopy__ = None
field = copy.deepcopy(self, memo)
self.__deepcopy__ = deepcopy_method
field.type_obj = self.type_obj
return field
def add_count(self, count):
self.count = int(count)
def add_len_field(self, len_field):
self.count = False
# we cache our len-field's name
self.len_field = len_field.name
# the len-field caches our name
len_field.len_field_of = self.name
def add_implicit_len(self):
self.count = False
self.implicit_len = True
def is_array(self):
return self.count > 1
def is_varlen(self):
return not self.count
def is_implicit_len(self):
return self.implicit_len
def is_extension(self):
return bool(self.extension_names)
def size(self, implicit_expression=None):
if self.count:
return self.count
if self.len_field:
return self.len_field
assert self.is_implicit_len()
assert implicit_expression
return implicit_expression
def needs_context(self):
""" A field needs a context if it's varsized """
return self.is_varlen() or self.type_obj.needs_context()
def arg_desc_to(self):
if self.len_field_of:
return ''
type_name = self.type_obj.type_name()
if self.is_array():
return ', const {} {}[{}]'.format(type_name, self.name, self.count)
if self.type_obj.is_assignable() and not self.is_varlen():
name = self.name
if self.is_optional:
name = '*' + name
return ', {} {}'.format(type_name, name)
if self.is_varlen() and self.type_obj.is_varsize():
return ', const {} **{}'.format(type_name, self.name)
return ', const {} *{}'.format(type_name, self.name)
def arg_desc_from(self):
type_name = self.type_obj.type_name()
if self.type_obj.is_const_ptr_ptr_type():
return ', const {} **{}'.format(type_name, self.name)
if self.len_field_of:
return ''
if self.is_array():
return ', {} {}[{}]'.format(type_name, self.name, self.count)
ptrs = '*'
if self.is_varlen() or self.is_optional or self.type_obj.is_varsize():
ptrs += '*'
if self.is_varlen() and self.type_obj.is_varsize():
ptrs += '*'
return ', {} {}{}'.format(type_name, ptrs, self.name)
class FieldSet(object):
def __init__(self):
self.fields = OrderedDict()
self.len_fields = {}
def add_data_field(self, field_name, type_obj, count=1,
extensions=[], comments=[], optional=False,
implicit_len_ok=False):
field = Field(field_name, type_obj, extensions=extensions,
field_comments=comments, optional=optional)
if bool(count):
try:
field.add_count(int(count))
except ValueError:
if count in self.fields:
len_field = self.find_data_field(count)
field.add_len_field(len_field)
self.len_fields[len_field.name] = len_field
else:
# '...' means "rest of TLV"
assert implicit_len_ok
assert count == '...'
field.add_implicit_len()
# You can't have any fields after an implicit-length field.
if len(self.fields) != 0:
assert not self.fields[next(reversed(self.fields))].is_implicit_len()
self.fields[field_name] = field
def find_data_field(self, field_name):
return self.fields[field_name]
def get_len_fields(self):
return list(self.len_fields.values())
def has_len_fields(self):
return bool(self.len_fields)
def needs_context(self):
return any([field.needs_context() or field.is_optional for field in self.fields.values()])
class Type(FieldSet):
assignables = [
'u8',
'u16',
'u32',
'u64',
'tu16',
'tu32',
'tu64',
'bool',
'amount_sat',
'amount_msat',
'bigsize',
'varint'
]
typedefs = [
'u8',
'u16',
'u32',
'u64',
'bool',
'secp256k1_ecdsa_signature',
'secp256k1_ecdsa_recoverable_signature',
'wirestring',
'double',
'bigsize',
'varint',
]
truncated_typedefs = [
'tu16',
'tu32',
'tu64',
]
# Externally defined variable size types (require a context)
varsize_types = [
'peer_features',
'gossip_getnodes_entry',
'gossip_getchannels_entry',
'failed_htlc',
'utxo',
'zcore_tx',
'wirestring',
'per_peer_state',
'zcore_tx_output',
'exclude_entry',
]
# Some BOLT types are re-typed based on their field name
# ('fieldname partial', 'original type', 'outer type'): ('true type', 'collapse array?')
name_field_map = {
('txid', 'sha256'): ('zcore_txid', False),
('amt', 'u64'): ('amount_msat', False),
('msat', 'u64'): ('amount_msat', False),
('satoshis', 'u64'): ('amount_sat', False),
('node_id', 'pubkey', 'channel_announcement'): ('node_id', False),
('node_id', 'pubkey', 'node_announcement'): ('node_id', False),
('temporary_channel_id', 'u8'): ('channel_id', True),
('secret', 'u8'): ('secret', True),
('preimage', 'u8'): ('preimage', True),
}
# For BOLT specified types, a few type names need to be simply 'remapped'
# 'original type': 'true type'
name_remap = {
'byte': 'u8',
'signature': 'secp256k1_ecdsa_signature',
'chain_hash': 'zcore_blkid',
'point': 'pubkey',
# FIXME: omits 'pad'
}
# Types that are const pointer-to-pointers, such as chainparams, i.e.,
# they set a reference to some const entry.
const_ptr_ptr_types = [
'chainparams'
]
@staticmethod
def true_type(type_name, field_name=None, outer_name=None):
""" Returns 'true' type of a given type and a flag if
we've remapped a variable size/array type to a single struct
(an example of this is 'temporary_channel_id' which is specified
as a 32*byte, but we re-map it to a channel_id
"""
if type_name in Type.name_remap:
type_name = Type.name_remap[type_name]
if field_name:
for t, true_type in Type.name_field_map.items():
if t[0] in field_name and t[1] == type_name:
if len(t) == 2 or outer_name == t[2]:
return true_type
return (type_name, False)
def __init__(self, name):
FieldSet.__init__(self)
self.name, self.is_enum = self.parse_name(name)
self.depends_on = {}
self.type_comments = []
self.tlv = False
def parse_name(self, name):
if name.startswith('enum '):
return name[5:], True
return name, False
def add_data_field(self, field_name, type_obj, count=1,
extensions=[], comments=[], optional=False):
FieldSet.add_data_field(self, field_name, type_obj, count,
extensions=extensions,
comments=comments, optional=optional)
if type_obj.name not in self.depends_on:
self.depends_on[type_obj.name] = type_obj
def type_name(self):
if self.name in self.typedefs:
return self.name
if self.name in self.truncated_typedefs:
return self.name[1:]
if self.is_enum:
prefix = 'enum '
else:
prefix = 'struct '
return prefix + self.struct_name()
# We only accelerate the u8 case: it's common and trivial.
def has_array_helper(self):
return self.name in ['u8']
def struct_name(self):
if self.is_tlv():
return self.tlv.struct_name()
return self.name
def subtype_deps(self):
return [dep for dep in self.depends_on.values() if dep.is_subtype()]
def is_subtype(self):
return bool(self.fields)
def is_const_ptr_ptr_type(self):
return self.name in self.const_ptr_ptr_types
def is_truncated(self):
return self.name in self.truncated_typedefs
def needs_context(self):
return self.is_varsize()
def is_assignable(self):
""" Generally typedef's and enums """
return self.name in self.assignables or self.is_enum
def is_varsize(self):
""" A type is variably sized if it's marked as such (in varsize_types)
or it contains a field of variable length """
return self.name in self.varsize_types or self.has_len_fields()
def add_comments(self, comments):
self.type_comments = comments
def mark_tlv(self, tlv):
self.tlv = tlv
def is_tlv(self):
return bool(self.tlv)
class Message(FieldSet):
def __init__(self, name, number, option=[], enum_prefix='wire',
struct_prefix=None, comments=[]):
FieldSet.__init__(self)
self.name = name
self.number = number
self.enum_prefix = enum_prefix
self.option = option[0] if len(option) else None
self.struct_prefix = struct_prefix
self.enumname = None
self.msg_comments = comments
self.if_token = None
def has_option(self):
return self.option is not None
def enum_name(self):
name = self.enumname if self.enumname else self.name
return "{}_{}".format(self.enum_prefix, name).upper()
def struct_name(self):
if self.struct_prefix:
return self.struct_prefix + "_" + self.name
return self.name
def add_if(self, if_token):
self.if_token = if_token
class Tlv(object):
def __init__(self, name):
self.name = name
self.messages = {}
def add_message(self, tokens, comments=[]):
""" tokens -> (name, value[, option]) """
self.messages[tokens[0]] = Message(tokens[0], tokens[1], option=tokens[2:],
enum_prefix=self.name,
struct_prefix=self.struct_name(),
comments=comments)
def type_name(self):
return 'struct ' + self.struct_name()
def struct_name(self):
return "tlv_{}".format(self.name)
def find_message(self, name):
return self.messages[name]
def ordered_msgs(self):
return sorted(self.messages.values(), key=lambda item: int(item.number))
class Master(object):
types = {}
tlvs = {}
messages = {}
extension_msgs = {}
inclusions = []
top_comments = []
def add_comments(self, comments):
self.top_comments += comments
def add_include(self, inclusion):
self.inclusions.append(inclusion)
def add_tlv(self, tlv_name):
if tlv_name not in self.tlvs:
self.tlvs[tlv_name] = Tlv(tlv_name)
if tlv_name not in self.types:
self.types[tlv_name] = Type(tlv_name)
return self.tlvs[tlv_name]
def add_message(self, tokens, comments=[]):
""" tokens -> (name, value[, option])"""
self.messages[tokens[0]] = Message(tokens[0], tokens[1], option=tokens[2:],
comments=comments)
def add_extension_msg(self, name, msg):
self.extension_msgs[name] = msg
def add_type(self, type_name, field_name=None, outer_name=None):
optional = False
if type_name.startswith('?'):
type_name = type_name[1:]
optional = True
# Check for special type name re-mapping
type_name, collapse_original = Type.true_type(type_name, field_name,
outer_name)
if type_name not in self.types:
self.types[type_name] = Type(type_name)
return self.types[type_name], collapse_original, optional
def find_type(self, type_name):
return self.types[type_name]
def find_message(self, msg_name):
if msg_name in self.messages:
return self.messages[msg_name]
if msg_name in self.extension_msgs:
return self.extension_msgs[msg_name]
return None
def find_tlv(self, tlv_name):
return self.tlvs[tlv_name]
def get_ordered_subtypes(self):
""" We want to order subtypes such that the 'no dependency'
types are printed first """
subtypes = [s for s in self.types.values() if s.is_subtype()]
# Start with subtypes without subtype dependencies
sorted_types = [s for s in subtypes if not len(s.subtype_deps())]
unsorted = [s for s in subtypes if len(s.subtype_deps())]
while len(unsorted):
names = [s.name for s in sorted_types]
for s in list(unsorted):
if all([dependency.name in names for dependency in s.subtype_deps()]):
sorted_types.append(s)
unsorted.remove(s)
return sorted_types
def tlv_messages(self):
return [m for tlv in self.tlvs.values() for m in tlv.messages.values()]
def find_template(self, options):
dirpath = os.path.dirname(os.path.abspath(__file__))
filename = dirpath + '/gen/{}{}_template'.format(
'print_' if options.print_wire else '', options.page)
return Template(filename=filename)
def post_process(self):
""" method to handle any 'post processing' that needs to be done.
for now, we just need match up types to TLVs """
for tlv_name, tlv in self.tlvs.items():
if tlv_name in self.types:
self.types[tlv_name].mark_tlv(tlv)
def write(self, options, output):
template = self.find_template(options)
enum_sets = []
enum_sets.append({
'name': options.enum_name,
'set': self.messages.values(),
})
stuff = {}
stuff['top_comments'] = self.top_comments
stuff['options'] = options
stuff['idem'] = re.sub(r'[^A-Z]+', '_', options.header_filename.upper())
stuff['header_filename'] = options.header_filename
stuff['includes'] = self.inclusions
stuff['enum_sets'] = enum_sets
subtypes = self.get_ordered_subtypes()
stuff['structs'] = subtypes + self.tlv_messages()
stuff['tlvs'] = self.tlvs
# We leave out extension messages in the printing pages. Any extension
# fields will get printed under the 'original' message, if present
if options.print_wire:
stuff['messages'] = list(self.messages.values())
else:
stuff['messages'] = list(self.messages.values()) + list(self.extension_msgs.values())
stuff['subtypes'] = subtypes
print(template.render(**stuff), file=output)
def main(options, args=None, output=sys.stdout, lines=None):
genline = next_line(args, lines)
comment_set = []
token_name = None
# Create a new 'master' that serves as the coordinator for the file generation
master = Master()
try:
while True:
ln, line = next(genline)
tokens = line.split(',')
token_type = tokens[0]
if not bool(line):
master.add_comments(comment_set)
comment_set = []
token_name = None
continue
if len(tokens) > 2:
token_name = tokens[1]
if token_type == 'subtype':
subtype, _, _ = master.add_type(tokens[1])
subtype.add_comments(list(comment_set))
comment_set = []
elif token_type == 'subtypedata':
subtype = master.find_type(tokens[1])
if not subtype:
raise ValueError('Unknown subtype {} for data.\nat {}:{}'
.format(tokens[1], ln, line))
type_obj, collapse, optional = master.add_type(tokens[3], tokens[2], tokens[1])
if optional:
raise ValueError('Subtypes cannot have optional fields {}.{}\n at {}:{}'
.format(subtype.name, tokens[2], ln, line))
if collapse:
count = 1
else:
count = tokens[4]
subtype.add_data_field(tokens[2], type_obj, count, comments=list(comment_set),
optional=optional)
comment_set = []
elif token_type == 'tlvtype':
tlv = master.add_tlv(tokens[1])
tlv.add_message(tokens[2:], comments=list(comment_set))
comment_set = []
elif token_type == 'tlvdata':
type_obj, collapse, optional = master.add_type(tokens[4], tokens[3], tokens[1])
if optional:
raise ValueError('TLV messages cannot have optional fields {}.{}\n at {}:{}'
.format(tokens[2], tokens[3], ln, line))
tlv = master.find_tlv(tokens[1])
if not tlv:
raise ValueError('tlvdata for unknown tlv {}.\nat {}:{}'
.format(tokens[1], ln, line))
msg = tlv.find_message(tokens[2])
if not msg:
raise ValueError('tlvdata for unknown tlv-message {}.\nat {}:{}'
.format(tokens[2], ln, line))
if collapse:
count = 1
else:
count = tokens[5]
msg.add_data_field(tokens[3], type_obj, count, comments=list(comment_set),
optional=optional, implicit_len_ok=True)
comment_set = []
elif token_type == 'msgtype':
master.add_message(tokens[1:], comments=list(comment_set))
comment_set = []
elif token_type == 'msgdata':
msg = master.find_message(tokens[1])
if not msg:
raise ValueError('Unknown message type {}. {}:{}'.format(tokens[1], ln, line))
type_obj, collapse, optional = master.add_type(tokens[3], tokens[2], tokens[1])
if collapse:
count = 1
elif len(tokens) < 5:
raise ValueError('problem with parsing {}:{}'.format(ln, line))
else:
count = tokens[4]
# if this is an 'extension' field*, we want to add a new 'message' type
# in the future, extensions will be handled as TLV's
#
# *(in the spec they're called 'optional', but that term is overloaded
# in that internal wire messages have 'optional' fields that are treated
# differently. for the sake of clarity here, for bolt-wire messages,
# we'll refer to 'optional' message fields as 'extensions')
#
if tokens[5:] == []:
msg.add_data_field(tokens[2], type_obj, count, comments=list(comment_set),
optional=optional)
else: # is one or more extension fields
if optional:
raise ValueError("Extension fields cannot be optional. {}:{}"
.format(ln, line))
orig_msg = msg
for extension in tokens[5:]:
extension_name = "{}_{}".format(tokens[1], extension)
msg = master.find_message(extension_name)
if not msg:
msg = copy.deepcopy(orig_msg)
msg.enumname = msg.name
msg.name = extension_name
master.add_extension_msg(msg.name, msg)
msg.add_data_field(tokens[2], type_obj, count, comments=list(comment_set), optional=optional)
# If this is a print_wire page, add the extension fields to the
# original message, so we can print them if present.
if options.print_wire:
orig_msg.add_data_field(tokens[2], type_obj, count=count,
extensions=tokens[5:],
comments=list(comment_set),
optional=optional)
comment_set = []
elif token_type.startswith('#include'):
master.add_include(token_type)
elif token_type.startswith('#if'):
msg = master.find_message(token_name)
if (msg):
if_token = token_type[token_type.index(' ') + 1:]
msg.add_if(if_token)
elif token_type.startswith('#'):
comment_set.append(token_type[1:])
else:
raise ValueError("Unknown token type {} on line {}:{}".format(token_type, ln, line))
except StopIteration:
pass
master.post_process()
master.write(options, output)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-s", "--expose-subtypes", help="print subtypes in header",
action="store_true", default=False)
parser.add_argument("-P", "--print_wire", help="generate wire printing source files",
action="store_true", default=False)
parser.add_argument("--page", choices=['header', 'impl'], help="page to print")
parser.add_argument('--expose-tlv-type', action='append', default=[])
parser.add_argument('header_filename', help='The filename of the header')
parser.add_argument('enum_name', help='The name of the enum to produce')
parser.add_argument("files", help='Files to read in (or stdin)', nargs=REMAINDER)
parsed_args = parser.parse_args()
main(parsed_args, parsed_args.files)
| 35.57164 | 117 | 0.561623 |
4a26e2b6a7e1d1a6f49fa031a3b6d151494fe821 | 9,521 | py | Python | docs/conf.py | eblume/flask-dance | 87d45328bbdaff833559a6d3da71461fe4579592 | [
"MIT"
] | null | null | null | docs/conf.py | eblume/flask-dance | 87d45328bbdaff833559a6d3da71461fe4579592 | [
"MIT"
] | null | null | null | docs/conf.py | eblume/flask-dance | 87d45328bbdaff833559a6d3da71461fe4579592 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Flask Dance documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 09:47:52 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
from flask_dance import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinxcontrib.seqdiag",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"Flask Dance"
copyright = u"2014-2019, David Baumgold"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "flask"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"index_logo": "flask-dance.png",
"github_fork": "singingwolfboy/flask-dance",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "FlaskDancedoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
"FlaskDance.tex",
u"Flask Dance Documentation",
u"David Baumgold",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/flask-dance.pdf"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "flaskdance", u"Flask Dance Documentation", [u"David Baumgold"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"FlaskDance",
u"Flask Dance Documentation",
u"David Baumgold",
"FlaskDance",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"flask": ("http://flask.pocoo.org/docs/", None),
"flask_login": ("https://flask-login.readthedocs.io/en/latest/", None),
"werkzeug": ("http://werkzeug.pocoo.org/docs/", None),
"requests": ("http://docs.python-requests.org/en/latest/", None),
"requests_oauthlib": ("https://requests-oauthlib.readthedocs.io/en/latest/", None),
"sqlalchemy": ("https://docs.sqlalchemy.org/en/latest/", None),
"sqlalchemy_utils": ("https://sqlalchemy-utils.readthedocs.io/en/latest/", None),
"betamax": ("https://betamax.readthedocs.io/en/latest", None),
}
autodoc_member_order = "bysource"
seqdiag_antialias = True
seqdiag_fontpath = [
"/usr/share/fonts/Arial Unicode.ttf",
"/usr/share/fonts/Arial.ttf",
"/Library/Fonts/Arial Unicode.ttf",
"/Library/Fonts/Arial.ttf",
]
| 31.842809 | 87 | 0.701607 |
4a26e3da81c291c52c46ff283399c5af6bffbbad | 15,849 | py | Python | backend/src/baserow/contrib/database/api/fields/views.py | lyhiving/baserow2 | 757148947a77e1469ad63540628730b637134cc2 | [
"MIT"
] | null | null | null | backend/src/baserow/contrib/database/api/fields/views.py | lyhiving/baserow2 | 757148947a77e1469ad63540628730b637134cc2 | [
"MIT"
] | null | null | null | backend/src/baserow/contrib/database/api/fields/views.py | lyhiving/baserow2 | 757148947a77e1469ad63540628730b637134cc2 | [
"MIT"
] | null | null | null | from django.db import transaction
from drf_spectacular.openapi import OpenApiParameter, OpenApiTypes
from drf_spectacular.utils import extend_schema
from rest_framework.decorators import permission_classes as method_permission_classes
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from baserow.api.decorators import validate_body_custom_fields, map_exceptions
from baserow.api.errors import ERROR_USER_NOT_IN_GROUP
from baserow.api.schemas import get_error_schema
from baserow.api.trash.errors import ERROR_CANNOT_DELETE_ALREADY_DELETED_ITEM
from baserow.api.utils import DiscriminatorCustomFieldsMappingSerializer
from baserow.api.utils import validate_data_custom_fields, type_from_data_or_registry
from baserow.contrib.database.api.fields.errors import (
ERROR_CANNOT_DELETE_PRIMARY_FIELD,
ERROR_CANNOT_CHANGE_FIELD_TYPE,
ERROR_FIELD_DOES_NOT_EXIST,
ERROR_MAX_FIELD_COUNT_EXCEEDED,
ERROR_RESERVED_BASEROW_FIELD_NAME,
ERROR_FIELD_WITH_SAME_NAME_ALREADY_EXISTS,
ERROR_INVALID_BASEROW_FIELD_NAME,
)
from baserow.contrib.database.api.tables.errors import ERROR_TABLE_DOES_NOT_EXIST
from baserow.contrib.database.api.tokens.authentications import TokenAuthentication
from baserow.contrib.database.api.tokens.errors import ERROR_NO_PERMISSION_TO_TABLE
from baserow.contrib.database.fields.exceptions import (
CannotDeletePrimaryField,
CannotChangeFieldType,
FieldDoesNotExist,
MaxFieldLimitExceeded,
ReservedBaserowFieldNameException,
FieldWithSameNameAlreadyExists,
InvalidBaserowFieldName,
)
from baserow.contrib.database.fields.handler import FieldHandler
from baserow.contrib.database.fields.models import Field
from baserow.contrib.database.fields.registries import field_type_registry
from baserow.contrib.database.table.exceptions import TableDoesNotExist
from baserow.contrib.database.table.handler import TableHandler
from baserow.contrib.database.tokens.exceptions import NoPermissionToTable
from baserow.contrib.database.tokens.handler import TokenHandler
from baserow.core.exceptions import UserNotInGroup
from baserow.core.trash.exceptions import CannotDeleteAlreadyDeletedItem
from .serializers import (
FieldSerializer,
CreateFieldSerializer,
UpdateFieldSerializer,
FieldSerializerWithRelatedFields,
RelatedFieldsSerializer,
)
class FieldsView(APIView):
authentication_classes = APIView.authentication_classes + [TokenAuthentication]
permission_classes = (IsAuthenticated,)
def get_permissions(self):
if self.request.method == "GET":
return [AllowAny()]
return super().get_permissions()
@extend_schema(
parameters=[
OpenApiParameter(
name="table_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Returns only the fields of the table related to the "
"provided value.",
)
],
tags=["Database table fields"],
operation_id="list_database_table_fields",
description=(
"Lists all the fields of the table related to the provided parameter if "
"the user has access to the related database's group. If the group is "
"related to a template, then this endpoint will be publicly accessible. A "
"table consists of fields and each field can have a different type. Each "
"type can have different properties. A field is comparable with a regular "
"table's column."
),
responses={
200: DiscriminatorCustomFieldsMappingSerializer(
field_type_registry, FieldSerializer, many=True
),
400: get_error_schema(["ERROR_USER_NOT_IN_GROUP"]),
401: get_error_schema(["ERROR_NO_PERMISSION_TO_TABLE"]),
404: get_error_schema(["ERROR_TABLE_DOES_NOT_EXIST"]),
},
)
@map_exceptions(
{
TableDoesNotExist: ERROR_TABLE_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
NoPermissionToTable: ERROR_NO_PERMISSION_TO_TABLE,
}
)
@method_permission_classes([AllowAny])
def get(self, request, table_id):
"""
Responds with a list of serialized fields that belong to the table if the user
has access to that group.
"""
table = TableHandler().get_table(table_id)
table.database.group.has_user(
request.user, raise_error=True, allow_if_template=True
)
TokenHandler().check_table_permissions(
request, ["read", "create", "update"], table, False
)
fields = Field.objects.filter(table=table).select_related("content_type")
data = [
field_type_registry.get_serializer(field, FieldSerializer).data
for field in fields
]
return Response(data)
@extend_schema(
parameters=[
OpenApiParameter(
name="table_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Creates a new field for the provided table related to the "
"value.",
)
],
tags=["Database table fields"],
operation_id="create_database_table_field",
description=(
"Creates a new field for the table related to the provided `table_id` "
"parameter if the authorized user has access to the related database's "
"group. Depending on the type, different properties can optionally be "
"set."
"If creating the field causes other fields to change then the specific"
"instances of those fields will be included in the related fields "
"response key."
),
request=DiscriminatorCustomFieldsMappingSerializer(
field_type_registry, CreateFieldSerializer
),
responses={
200: DiscriminatorCustomFieldsMappingSerializer(
field_type_registry, FieldSerializerWithRelatedFields
),
400: get_error_schema(
[
"ERROR_USER_NOT_IN_GROUP",
"ERROR_REQUEST_BODY_VALIDATION",
"ERROR_MAX_FIELD_COUNT_EXCEEDED",
"ERROR_RESERVED_BASEROW_FIELD_NAME",
"ERROR_FIELD_WITH_SAME_NAME_ALREADY_EXISTS",
"ERROR_INVALID_BASEROW_FIELD_NAME",
]
),
401: get_error_schema(["ERROR_NO_PERMISSION_TO_TABLE"]),
404: get_error_schema(["ERROR_TABLE_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@validate_body_custom_fields(
field_type_registry, base_serializer_class=CreateFieldSerializer
)
@map_exceptions(
{
TableDoesNotExist: ERROR_TABLE_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
MaxFieldLimitExceeded: ERROR_MAX_FIELD_COUNT_EXCEEDED,
NoPermissionToTable: ERROR_NO_PERMISSION_TO_TABLE,
FieldWithSameNameAlreadyExists: ERROR_FIELD_WITH_SAME_NAME_ALREADY_EXISTS,
ReservedBaserowFieldNameException: ERROR_RESERVED_BASEROW_FIELD_NAME,
InvalidBaserowFieldName: ERROR_INVALID_BASEROW_FIELD_NAME,
}
)
def post(self, request, data, table_id):
"""Creates a new field for a table."""
type_name = data.pop("type")
field_type = field_type_registry.get(type_name)
table = TableHandler().get_table(table_id)
table.database.group.has_user(request.user, raise_error=True)
# field_create permission doesn't exists, so any call of this endpoint with a
# token will be rejected.
TokenHandler().check_table_permissions(request, "field_create", table, False)
# Because each field type can raise custom exceptions while creating the
# field we need to be able to map those to the correct API exceptions which are
# defined in the type.
with field_type.map_api_exceptions():
field, updated_fields = FieldHandler().create_field(
request.user, table, type_name, return_updated_fields=True, **data
)
serializer = field_type_registry.get_serializer(
field, FieldSerializerWithRelatedFields, related_fields=updated_fields
)
return Response(serializer.data)
class FieldView(APIView):
permission_classes = (IsAuthenticated,)
@extend_schema(
parameters=[
OpenApiParameter(
name="field_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Returns the field related to the provided value.",
)
],
tags=["Database table fields"],
operation_id="get_database_table_field",
description=(
"Returns the existing field if the authorized user has access to the "
"related database's group. Depending on the type different properties "
"could be returned."
),
responses={
200: DiscriminatorCustomFieldsMappingSerializer(
field_type_registry, FieldSerializer
),
400: get_error_schema(["ERROR_USER_NOT_IN_GROUP"]),
404: get_error_schema(["ERROR_FIELD_DOES_NOT_EXIST"]),
},
)
@map_exceptions(
{
FieldDoesNotExist: ERROR_FIELD_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
}
)
def get(self, request, field_id):
"""Selects a single field and responds with a serialized version."""
field = FieldHandler().get_field(field_id)
field.table.database.group.has_user(request.user, raise_error=True)
serializer = field_type_registry.get_serializer(field, FieldSerializer)
return Response(serializer.data)
@extend_schema(
parameters=[
OpenApiParameter(
name="field_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Updates the field related to the provided value.",
)
],
tags=["Database table fields"],
operation_id="update_database_table_field",
description=(
"Updates the existing field if the authorized user has access to the "
"related database's group. The type can also be changed and depending on "
"that type, different additional properties can optionally be set. If you "
"change the field type it could happen that the data conversion fails, in "
"that case the `ERROR_CANNOT_CHANGE_FIELD_TYPE` is returned, but this "
"rarely happens. If a data value cannot be converted it is set to `null` "
"so data might go lost."
"If updated the field causes other fields to change then the specific"
"instances of those fields will be included in the related fields "
"response key."
),
request=DiscriminatorCustomFieldsMappingSerializer(
field_type_registry, UpdateFieldSerializer
),
responses={
200: DiscriminatorCustomFieldsMappingSerializer(
field_type_registry, FieldSerializerWithRelatedFields
),
400: get_error_schema(
[
"ERROR_USER_NOT_IN_GROUP",
"ERROR_CANNOT_CHANGE_FIELD_TYPE",
"ERROR_REQUEST_BODY_VALIDATION",
"ERROR_RESERVED_BASEROW_FIELD_NAME",
"ERROR_FIELD_WITH_SAME_NAME_ALREADY_EXISTS",
"ERROR_INVALID_BASEROW_FIELD_NAME",
]
),
404: get_error_schema(["ERROR_FIELD_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@map_exceptions(
{
FieldDoesNotExist: ERROR_FIELD_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
CannotChangeFieldType: ERROR_CANNOT_CHANGE_FIELD_TYPE,
FieldWithSameNameAlreadyExists: ERROR_FIELD_WITH_SAME_NAME_ALREADY_EXISTS,
ReservedBaserowFieldNameException: ERROR_RESERVED_BASEROW_FIELD_NAME,
InvalidBaserowFieldName: ERROR_INVALID_BASEROW_FIELD_NAME,
}
)
def patch(self, request, field_id):
"""Updates the field if the user belongs to the group."""
field = (
FieldHandler()
.get_field(field_id, base_queryset=Field.objects.select_for_update())
.specific
)
type_name = type_from_data_or_registry(request.data, field_type_registry, field)
field_type = field_type_registry.get(type_name)
data = validate_data_custom_fields(
type_name,
field_type_registry,
request.data,
base_serializer_class=UpdateFieldSerializer,
)
# Because each field type can raise custom exceptions at while updating the
# field we need to be able to map those to the correct API exceptions which are
# defined in the type.
with field_type.map_api_exceptions():
field, related_fields = FieldHandler().update_field(
request.user, field, type_name, return_updated_fields=True, **data
)
serializer = field_type_registry.get_serializer(
field, FieldSerializerWithRelatedFields, related_fields=related_fields
)
return Response(serializer.data)
@extend_schema(
parameters=[
OpenApiParameter(
name="field_id",
location=OpenApiParameter.PATH,
type=OpenApiTypes.INT,
description="Deletes the field related to the provided value.",
)
],
tags=["Database table fields"],
operation_id="delete_database_table_field",
description=(
"Deletes the existing field if the authorized user has access to the "
"related database's group. Note that all the related data to that field "
"is also deleted. Primary fields cannot be deleted because their value "
"represents the row. "
"If deleting the field causes other fields to change then the specific"
"instances of those fields will be included in the related fields "
"response key."
),
responses={
200: RelatedFieldsSerializer,
400: get_error_schema(
[
"ERROR_USER_NOT_IN_GROUP",
"ERROR_CANNOT_DELETE_PRIMARY_FIELD",
"ERROR_CANNOT_DELETE_ALREADY_DELETED_ITEM",
]
),
404: get_error_schema(["ERROR_FIELD_DOES_NOT_EXIST"]),
},
)
@transaction.atomic
@map_exceptions(
{
FieldDoesNotExist: ERROR_FIELD_DOES_NOT_EXIST,
UserNotInGroup: ERROR_USER_NOT_IN_GROUP,
CannotDeletePrimaryField: ERROR_CANNOT_DELETE_PRIMARY_FIELD,
CannotDeleteAlreadyDeletedItem: ERROR_CANNOT_DELETE_ALREADY_DELETED_ITEM,
}
)
def delete(self, request, field_id):
"""Deletes an existing field if the user belongs to the group."""
field = FieldHandler().get_field(field_id)
field_type = field_type_registry.get_by_model(field.specific_class)
with field_type.map_api_exceptions():
updated_fields = FieldHandler().delete_field(request.user, field)
return Response(RelatedFieldsSerializer({}, related_fields=updated_fields).data)
| 41.489529 | 88 | 0.659663 |
4a26e511262b460ad96cde480ec6f8e4c5eb6abc | 450 | py | Python | src/board.py | zoltax/tanks | 995cd9af260e5dd1d4485cfb95c80ce7f7c358ef | [
"MIT"
] | null | null | null | src/board.py | zoltax/tanks | 995cd9af260e5dd1d4485cfb95c80ce7f7c358ef | [
"MIT"
] | null | null | null | src/board.py | zoltax/tanks | 995cd9af260e5dd1d4485cfb95c80ce7f7c358ef | [
"MIT"
] | null | null | null | __author__ = 'filipkulig'
class Board():
# curses.LINES and curses.COLS
lines = None
cols = None
def __init__(self):
pass
def get(self):
return "plansza"
def update(self):
pass
def get_cols(self):
return self.cols
def get_lines(self):
return self.lines
def set_cols(self,cols):
self.cols = cols
def set_lines(self,lines):
self.lines = lines
| 13.636364 | 34 | 0.568889 |
4a26e55fe7fcbe5d0a9245c2c4457fc211a62299 | 991 | py | Python | KAUSIM_py/multirotor/drone/event_manager.py | RETELLIGENCE-IWEN/KAU_SIM_PY | b922fa361d0ee20b37752470c25370089a7690e8 | [
"MIT"
] | null | null | null | KAUSIM_py/multirotor/drone/event_manager.py | RETELLIGENCE-IWEN/KAU_SIM_PY | b922fa361d0ee20b37752470c25370089a7690e8 | [
"MIT"
] | null | null | null | KAUSIM_py/multirotor/drone/event_manager.py | RETELLIGENCE-IWEN/KAU_SIM_PY | b922fa361d0ee20b37752470c25370089a7690e8 | [
"MIT"
] | null | null | null | from typing import Callable, Dict, List, Any
class EventManager:
def __init__(self):
self.subsciptions: Dict[Any, List[Callable]] = {}
def register(self, event_name: Any):
"""새로운 event를 추가"""
if event_name in self.subsciptions:
raise ValueError(f"{event_name} is already registered")
self.subsciptions[event_name] = []
def subscribe(self, event_name: Any, function: Callable):
"""
이벤트 event_name이 발생했을 때 function이 실행되도록 추가
"""
if event_name not in self.subsciptions:
raise KeyError(f"{event_name} is not registered")
self.subsciptions[event_name].append(function)
def publish(self, event_name: Any, *data):
"""
이벤트 event_name이 발생했을 때
구독 함수들을 data 값을 인자로 실행
"""
if event_name not in self.subsciptions:
raise KeyError(f"{event_name} is not registered")
for f in self.subsciptions[event_name]:
f(*data)
| 30.030303 | 67 | 0.61554 |
4a26e6915066addb7c742321a16686127617861a | 2,892 | py | Python | google/appengine/ext/mapreduce/main.py | prashanth9962/gaepatch-1.6 | fbc19e3ecb1eb0709b6a92157d005d14e9b146f0 | [
"Apache-2.0"
] | 1 | 2020-10-13T19:53:04.000Z | 2020-10-13T19:53:04.000Z | google/appengine/ext/mapreduce/main.py | prashanth9962/gaepatch-1.6 | fbc19e3ecb1eb0709b6a92157d005d14e9b146f0 | [
"Apache-2.0"
] | null | null | null | google/appengine/ext/mapreduce/main.py | prashanth9962/gaepatch-1.6 | fbc19e3ecb1eb0709b6a92157d005d14e9b146f0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Main module for map-reduce implementation.
This module should be specified as a handler for mapreduce URLs in app.yaml:
handlers:
- url: /mapreduce(/.*)?
login: admin
script: mapreduce/main.py
"""
import wsgiref.handlers
import google
from google.appengine.ext import webapp
from google.appengine.ext.mapreduce import handlers
from google.appengine.ext.mapreduce import status
from google.appengine.ext.webapp import util
try:
from appengine_pipeline.src import pipeline
except ImportError:
pipeline = None
STATIC_RE = r".*/([^/]*\.(?:css|js)|status|detail)$"
class RedirectHandler(webapp.RequestHandler):
"""Redirects the user back to the status page."""
def get(self):
new_path = self.request.path
if not new_path.endswith("/"):
new_path += "/"
new_path += "status"
self.redirect(new_path)
def create_handlers_map():
"""Create new handlers map.
Returns:
list of (regexp, handler) pairs for WSGIApplication constructor.
"""
pipeline_handlers_map = []
if pipeline:
pipeline_handlers_map = pipeline.create_handlers_map(prefix=".*/pipeline")
return pipeline_handlers_map + [
(r".*/worker_callback", handlers.MapperWorkerCallbackHandler),
(r".*/controller_callback", handlers.ControllerCallbackHandler),
(r".*/kickoffjob_callback", handlers.KickOffJobHandler),
(r".*/finalizejob_callback", handlers.FinalizeJobHandler),
(r".*/command/start_job", handlers.StartJobHandler),
(r".*/command/cleanup_job", handlers.CleanUpJobHandler),
(r".*/command/abort_job", handlers.AbortJobHandler),
(r".*/command/list_configs", status.ListConfigsHandler),
(r".*/command/list_jobs", status.ListJobsHandler),
(r".*/command/get_job_detail", status.GetJobDetailHandler),
(STATIC_RE, status.ResourceHandler),
(r".*", RedirectHandler),
]
def create_application():
"""Create new WSGIApplication and register all handlers.
Returns:
an instance of webapp.WSGIApplication with all mapreduce handlers
registered.
"""
return webapp.WSGIApplication(create_handlers_map(),
debug=True)
APP = create_application()
def main():
util.run_wsgi_app(APP)
if __name__ == "__main__":
main()
| 22.952381 | 78 | 0.707469 |
4a26e6d51404a885e2e99157e3450afa88c5170e | 954 | py | Python | tfx_addons/utils/test_utils.py | digits/tfx-addons | df5dbe307a50988f31a6477775348213dd5d1886 | [
"Apache-2.0"
] | null | null | null | tfx_addons/utils/test_utils.py | digits/tfx-addons | df5dbe307a50988f31a6477775348213dd5d1886 | [
"Apache-2.0"
] | null | null | null | tfx_addons/utils/test_utils.py | digits/tfx-addons | df5dbe307a50988f31a6477775348213dd5d1886 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Util functions to assist with the TFX Addons tests """
from typing import List
def get_tfx_version(version: str) -> List[int]:
"""
Returns the TFX version as integers.
"""
return tuple([int(x) for x in version.split(".")]) # pylint: disable=R1728
| 38.16 | 80 | 0.675052 |
4a26e8ce5513995046825932396735897fb8739f | 1,415 | py | Python | setup.py | waltdisgrace/dbus-signature-pyparsing | fa705f1a7932dc5c5cb7f83858d0d7006fce6495 | [
"Apache-2.0"
] | null | null | null | setup.py | waltdisgrace/dbus-signature-pyparsing | fa705f1a7932dc5c5cb7f83858d0d7006fce6495 | [
"Apache-2.0"
] | null | null | null | setup.py | waltdisgrace/dbus-signature-pyparsing | fa705f1a7932dc5c5cb7f83858d0d7006fce6495 | [
"Apache-2.0"
] | null | null | null | # isort: STDLIB
import os
import sys
# isort: THIRDPARTY
import setuptools
if sys.version_info[0] < 3:
from codecs import open
def local_file(name):
return os.path.relpath(os.path.join(os.path.dirname(__file__), name))
README = local_file("README.rst")
with open(local_file("src/dbus_signature_pyparsing/_version.py")) as o:
exec(o.read())
setuptools.setup(
name="dbus-signature-pyparsing",
version=__version__,
author="Anne Mulhern",
author_email="[email protected]",
url="https://github.com/stratis-storage/dbus-signature-pyparsing",
description="dbus signature parser",
long_description=open(README, encoding="utf-8").read(),
platforms=["Linux"],
license="Apache 2.0",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=["pyparsing"],
package_dir={"": "src"},
packages=setuptools.find_packages("src"),
)
| 30.106383 | 73 | 0.660777 |
4a26e9383de0cdad501dcf4238b94ea2491309b0 | 10,217 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20190201/vpn_site.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/network/v20190201/vpn_site.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/network/v20190201/vpn_site.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['VpnSite']
class VpnSite(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.
:param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['address_space'] = address_space
__props__['bgp_properties'] = bgp_properties
__props__['device_properties'] = device_properties
__props__['id'] = id
__props__['ip_address'] = ip_address
__props__['is_security_site'] = is_security_site
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['site_key'] = site_key
__props__['tags'] = tags
__props__['virtual_wan'] = virtual_wan
__props__['vpn_site_name'] = vpn_site_name
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/latest:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VpnSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VpnSite, __self__).__init__(
'azure-nextgen:network/v20190201:VpnSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnSite':
"""
Get an existing VpnSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return VpnSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]:
"""
The device properties
"""
return pulumi.get(self, "device_properties")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> pulumi.Output[Optional[bool]]:
"""
IsSecuritySite flag
"""
return pulumi.get(self, "is_security_site")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> pulumi.Output[Optional[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The VirtualWAN to which the vpnSite belongs
"""
return pulumi.get(self, "virtual_wan")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.611607 | 1,426 | 0.65577 |
4a26e95cb8b1f932c5c25a228d16051e52dc88c3 | 264 | py | Python | .heroku/chrome-smoke-test.py | SalesforceFoundation/MetaCI | b80f880672d4f11594fc48cbda0d340311fa35e0 | [
"BSD-3-Clause"
] | 15 | 2017-11-06T05:26:15.000Z | 2018-10-12T03:15:37.000Z | .heroku/chrome-smoke-test.py | SalesforceFoundation/MetaCI | b80f880672d4f11594fc48cbda0d340311fa35e0 | [
"BSD-3-Clause"
] | 66 | 2017-10-31T21:26:30.000Z | 2018-10-19T15:55:27.000Z | .heroku/chrome-smoke-test.py | SalesforceFoundation/MetaCI | b80f880672d4f11594fc48cbda0d340311fa35e0 | [
"BSD-3-Clause"
] | 7 | 2018-01-07T11:30:47.000Z | 2018-10-11T11:51:58.000Z | import selenium
import selenium.webdriver
import sys
options = selenium.webdriver.chrome.options.Options()
options.headless = True
try:
selenium.webdriver.Chrome(options=options)
except Exception as e:
print(f"Unable to open chrome:\n{e}")
sys.exit(1)
| 24 | 53 | 0.761364 |
4a26ec7cdb3678976d307e8f884c85693463ff7a | 3,749 | py | Python | travis_pypi_setup.py | ulkaio/ulka | d33d8a695082cafcd5be808f7a745b6066931560 | [
"MIT"
] | null | null | null | travis_pypi_setup.py | ulkaio/ulka | d33d8a695082cafcd5be808f7a745b6066931560 | [
"MIT"
] | 425 | 2016-09-26T21:27:56.000Z | 2022-03-28T10:27:18.000Z | travis_pypi_setup.py | ulkaio/ulka | d33d8a695082cafcd5be808f7a745b6066931560 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'ulkaio/ulka'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| 30.479675 | 79 | 0.69992 |
4a26ec94eccfae57805a691a89571e7d5237ec32 | 3,585 | py | Python | tests/integration/sugar/test_wallet.py | sawdog/xrpl-py | ae6eac89d55a8db6b26a0189749b8c4b83d00d88 | [
"ISC"
] | null | null | null | tests/integration/sugar/test_wallet.py | sawdog/xrpl-py | ae6eac89d55a8db6b26a0189749b8c4b83d00d88 | [
"ISC"
] | null | null | null | tests/integration/sugar/test_wallet.py | sawdog/xrpl-py | ae6eac89d55a8db6b26a0189749b8c4b83d00d88 | [
"ISC"
] | 1 | 2022-02-21T07:36:36.000Z | 2022-02-21T07:36:36.000Z | from tests.integration.integration_test_case import IntegrationTestCase
from tests.integration.it_utils import submit_transaction_async, test_async_and_sync
from tests.integration.reusable_values import WALLET
from xrpl.asyncio.clients import AsyncJsonRpcClient, AsyncWebsocketClient
from xrpl.asyncio.wallet import generate_faucet_wallet
from xrpl.clients import JsonRpcClient, WebsocketClient
from xrpl.core.addresscodec import classic_address_to_xaddress
from xrpl.models.requests import AccountInfo
from xrpl.models.transactions import Payment
from xrpl.wallet import generate_faucet_wallet as sync_generate_faucet_wallet
class TestWallet(IntegrationTestCase):
@test_async_and_sync(
globals(),
["xrpl.wallet.generate_faucet_wallet"],
num_retries=5,
use_testnet=True,
)
async def test_generate_faucet_wallet_rel_sub(self, client):
destination = await generate_faucet_wallet(client)
wallet = await generate_faucet_wallet(client)
response = await submit_transaction_async(
Payment(
account=wallet.classic_address,
sequence=wallet.sequence,
fee="10",
amount="1",
destination=destination.classic_address,
),
wallet,
client=client,
)
self.assertTrue(response.is_successful())
async def test_generate_faucet_wallet_custom_host_async_websockets(self):
async with AsyncWebsocketClient(
"wss://xls20-sandbox.rippletest.net:51233"
) as client:
wallet = await generate_faucet_wallet(
client, faucet_host="faucet-nft.ripple.com"
)
result = await client.request(
AccountInfo(
account=wallet.classic_address,
),
)
self.assertTrue(int(result.result["account_data"]["Balance"]) > 0)
async def test_generate_faucet_wallet_custom_host_async_json_rpc(self):
client = AsyncJsonRpcClient("http://xls20-sandbox.rippletest.net:51234")
wallet = await generate_faucet_wallet(
client, faucet_host="faucet-nft.ripple.com"
)
result = await client.request(
AccountInfo(
account=wallet.classic_address,
),
)
self.assertTrue(int(result.result["account_data"]["Balance"]) > 0)
def test_generate_faucet_wallet_custom_host_sync_websockets(self):
with WebsocketClient("wss://xls20-sandbox.rippletest.net:51233") as client:
wallet = sync_generate_faucet_wallet(
client, faucet_host="faucet-nft.ripple.com"
)
result = client.request(
AccountInfo(
account=wallet.classic_address,
),
)
self.assertTrue(int(result.result["account_data"]["Balance"]) > 0)
def test_generate_faucet_wallet_custom_host_sync_json_rpc(self):
client = JsonRpcClient("http://xls20-sandbox.rippletest.net:51234")
wallet = sync_generate_faucet_wallet(
client, faucet_host="faucet-nft.ripple.com"
)
result = client.request(
AccountInfo(
account=wallet.classic_address,
),
)
self.assertTrue(int(result.result["account_data"]["Balance"]) > 0)
def test_wallet_get_xaddress(self):
expected = classic_address_to_xaddress(WALLET.classic_address, None, False)
self.assertEqual(WALLET.get_xaddress(), expected)
| 40.280899 | 84 | 0.656904 |
4a26ecef26d01fa381d384c1986d4b889f43be90 | 7,805 | py | Python | models/half_model.py | EliotChenKJ/CMTE | e3a7be59ea4eba09f37c20b9e19e64386e74818e | [
"BSD-3-Clause"
] | null | null | null | models/half_model.py | EliotChenKJ/CMTE | e3a7be59ea4eba09f37c20b9e19e64386e74818e | [
"BSD-3-Clause"
] | null | null | null | models/half_model.py | EliotChenKJ/CMTE | e3a7be59ea4eba09f37c20b9e19e64386e74818e | [
"BSD-3-Clause"
] | null | null | null | import torch
from .base_model import BaseModel
from . import networks
from .vgg import VGG, GramMatrix, GramMSELoss
import os
class halfModel(BaseModel):
"""
This is HalfModel for image texture extension.
The model training requires '-dataset_model half' dataset.
It trains a half-gan model, mapping from k * k size image to 2k * 2k size image.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""
Add new model-specific options and rewrite default values for existing options.
Parameters:
parser -- the option parser
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.set_defaults(netG='resnet_2x_6blocks', netD='n_layers', n_layers_D=4,
gan_mode='vanilla', pool_size=0, display_ncols=3,
niter=50000, niter_decay=50000, save_epoch_freq=10000, display_freq=3200, print_freq=320)
if is_train:
# parser.add_argument('--use_style', type=bool, default=True, help='use style loss')
parser.add_argument('--lambda_L1', type=float, default=100, help='l1 loss lambda')
parser.add_argument('--lambda_style', type=float, default=1e3, help='style loss lambda')
else:
parser.add_argument('--test_size', type=int, default=256, help='tested sample\'s size')
return parser
def __init__(self, opt):
"""
Initialize half model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
# specify the training losses. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
self.loss_names = ['G_GAN', 'G_L1', 'D_fake', 'D_real', 'Style']
# specify the important images. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = ['real_A', 'real_B', 'fake_B']
# specify the models. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
if self.isTrain:
self.model_names = ['G', 'D']
else:
self.model_names = ['G']
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.norm, not opt.no_dropout, opt.init_type,
opt.init_gain, self.gpu_ids)
if self.isTrain: # if istrain, get netD
self.netD = networks.define_D(opt.input_nc, opt.ndf,
opt.netD, opt.n_layers_D, 1, opt.norm,
opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # only defined during training time
# define loss functions.
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # GANLoss
self.criterionL1 = torch.nn.L1Loss() # L1Loss between fake_B and real_B
# losses of feature map
self.style_layers = ['r11', 'r21', 'r31', 'r41', 'r51']
self.style_weights = [self.opt.lambda_style / (t * t) for t in [64, 128, 256, 512, 512]]
self.criterionStyle = [GramMSELoss().to(self.device)] * len(self.style_layers)
self.vgg = VGG()
self.vgg.load_state_dict(torch.load(os.getcwd() + '/models/' + 'vgg_conv.pth'))
self.set_requires_grad(self.vgg, False)
self.vgg = self.vgg.to(self.device)
# define and initialize optimizers. You can define one optimizer for each network.
# If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = [self.optimizer_G, self.optimizer_D]
# show the network structure
networks.print_network_structure(self.netG)
if self.isTrain:
networks.print_network_structure(self.netD)
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
self.real_A = input['A'].to(self.device) # get image data A
self.real_B = input['B'].to(self.device) # get image data B
self.image_paths = input['A_paths'] # get image paths
def forward(self):
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG(self.real_A) # generate output image given the input data_A
def backward_D(self):
"""Calculate GAN loss for discriminator"""
# calculate loss given the input and intermediate results
pred_fake = self.netD(self.fake_B.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
pred_real = self.netD(self.real_B)
self.loss_D_real = self.criterionGAN(pred_real, True)
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward() # calculate gradients of network D w.r.t. loss_D
def backward_G(self):
"""Calculate GAN loss for generator"""
# calculate loss given the gan generator
# first, calculate the gan generator's loss
pred_fake = self.netD(self.fake_B)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# second, calculate the G l1 loss
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# then, calculate the style loss
style_out = self.vgg(self.fake_B, self.style_layers)
style_targets = [GramMatrix()(layer_out).detach() for layer_out in self.vgg(self.real_B, self.style_layers)]
temp = [self.criterionStyle[i](i_out, style_targets[i]) * self.style_weights[i] for i, i_out in enumerate(style_out)]
self.loss_Style = sum(temp)
# calculate the all loss
self.loss_G = self.loss_G_GAN + self.loss_G_L1 + self.loss_Style
# finally, get the gradients for generators' parameters
self.loss_G.backward()
def optimize_parameters(self):
"""Update network weights; it will be called in every training iteration."""
self.forward() # first call forward to calculate intermediate results
# update D
self.set_requires_grad(self.netD, True)
self.optimizer_D.zero_grad() # clear network G's existing gradients
self.backward_D() # calculate gradients for network G
self.optimizer_D.step() # update gradients for network G
# update G
self.set_requires_grad(self.netD, False)
self.optimizer_G.zero_grad() # clear network G's existing gradients
self.backward_G() # calculate gradients for network G
self.optimizer_G.step()
| 54.201389 | 151 | 0.641127 |
4a26ed42f86572071abccaf4d585abd6c3827680 | 1,740 | py | Python | app/main.py | AlpacaMax/Shadow-Browser | 65c95eb2712a2764384b851c8bc128702ea911d1 | [
"MIT"
] | 1 | 2018-07-26T13:54:44.000Z | 2018-07-26T13:54:44.000Z | app/main.py | AlpacaMax/Shadow-Browser | 65c95eb2712a2764384b851c8bc128702ea911d1 | [
"MIT"
] | null | null | null | app/main.py | AlpacaMax/Shadow-Browser | 65c95eb2712a2764384b851c8bc128702ea911d1 | [
"MIT"
] | null | null | null | from flask import *
import requests
import os
app = Flask(__name__)
SECRET = os.environ.get("SECRET")
HOSTNAME = os.environ.get("HOSTNAME")
def validate_token(token, hashes, secret):
api_url = "https://api.coinhive.com/token/verify"
data = {
'token': token,
'hashes': hashes,
'secret': secret
}
r = requests.post(api_url, data)
return r.text.find('true') > 0
def get_page(url):
return requests.get(url).text
@app.route("/")
def index():
host = HOSTNAME
info = get_page("http://docker_api/browsers_info")
info_list = info.split("#")
avail = info_list[0]
inuse = info_list[1]
per_avail = info_list[2]
per_inuse = info_list[3]
return render_template('index.html', hostname=host,
available=avail,
inuse=inuse,
per_available=per_avail,
per_inuse=per_inuse)
@app.route("/get_browser/<token>")
def get_browser(token):
if validate_token(str(token), 256, SECRET):
out = get_page("http://docker_api/run_chrome?duration=" + str(300))
out_list = out.split("#")
hostname = out_list[0]
port = out_list[1]
password = out_list[2]
return render_template('launch.html', hostname=hostname, port=port, password=password)
else:
return "Invalid token! Please play fair!"
@app.route("/helloworld")
def hello():
return get_page('http://docker_api/')
if __name__ == "__main__":
# Only for debugging while developing
app.run(host='0.0.0.0', debug=True, port=80)
| 26.363636 | 95 | 0.564368 |
4a26ee1c27e7102d8d0416365cee7dc5730c6e5a | 7,377 | py | Python | adafruit_bno08x/spi.py | jposada202020/Adafruit_CircuitPython_BNO08x | d96dc951a5f9aca6e2ea8ee4ebbc708cb3002cf4 | [
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null | adafruit_bno08x/spi.py | jposada202020/Adafruit_CircuitPython_BNO08x | d96dc951a5f9aca6e2ea8ee4ebbc708cb3002cf4 | [
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null | adafruit_bno08x/spi.py | jposada202020/Adafruit_CircuitPython_BNO08x | d96dc951a5f9aca6e2ea8ee4ebbc708cb3002cf4 | [
"MIT",
"MIT-0",
"Unlicense"
] | null | null | null | # SPDX-FileCopyrightText: Copyright (c) 2020 Bryan Siepert for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
Subclass of `adafruit_bno08x.BNO08X` to use SPI
================================================
"""
import time
from struct import pack_into
from digitalio import Direction, Pull
import adafruit_bus_device.spi_device as spi_device
from . import BNO08X, DATA_BUFFER_SIZE, _elapsed, Packet, PacketError
class BNO08X_SPI(BNO08X):
"""
Instantiate a `adafruit_bno08x.spi.BNO08X_SPI` instance to communicate with
the sensor using SPI
:param ~busio.SPI spi_bus: The SPI bus to use to communicate with the BNO08x
:param ~digitalio.DigitalInOut cspin: The pin object to use for the SPI Chip Select
:param ~digitalio.DigitalInOut intpin: The pin object to interrupt
:param ~digitalio.DigitalInOut resetpin: Required for SPI mode. Connected to the
RST pin on the device, and used to hard-reset the device.
:param int baudrate: baudrate of the SPI bus. Defaults to :const:`1000000`
:param bool debug: Enables print statements used for debugging. Defaults to `False`
**Quickstart: Importing and using the device**
Here is an example of using the :class:`BNO08X_SPI` class.
First you will need to import the libraries to use the sensor
.. code-block:: python
import board
from adafruit_bno08x.spi import BNO08X_SPI
Once this is done you can define your `board.SPI` object and define your sensor object
.. code-block:: python
spi = board.SPI()
bno = BNO08X_SPI(spi)
For this particular you need to define some things to get some data.
.. code-block:: python
bno.enable_feature(adafruit_bno08x.BNO_REPORT_ACCELEROMETER)
bno.enable_feature(adafruit_bno08x.BNO_REPORT_GYROSCOPE)
bno.enable_feature(adafruit_bno08x.BNO_REPORT_MAGNETOMETER)
bno.enable_feature(adafruit_bno08x.BNO_REPORT_ROTATION_VECTOR)
Now you have access to the :attr:`acceleration`, :attr:`gyro`
:attr:`magnetic` and :attr:`quaternion` attributes
.. code-block:: python
accel_x, accel_y, accel_z = bno.acceleration
gyro_x, gyro_y, gyro_z = bno.gyro
mag_x, mag_y, mag_z = bno.magnetic
quat_i, quat_j, quat_k, quat_real = bno.quaternion
"""
def __init__(
self, spi_bus, cspin, intpin, resetpin, baudrate=1000000, debug=False
): # pylint:disable=too-many-arguments
self._spi = spi_device.SPIDevice(
spi_bus, cspin, baudrate=baudrate, polarity=1, phase=1
)
self._int = intpin
super().__init__(resetpin, debug)
def hard_reset(self):
"""Hardware reset the sensor to an initial unconfigured state"""
self._reset.direction = Direction.OUTPUT
self._int.direction = Direction.INPUT
self._int.pull = Pull.UP
print("Hard resetting...")
self._reset.value = True # perform hardware reset
time.sleep(0.01)
self._reset.value = False
time.sleep(0.01)
self._reset.value = True
self._wait_for_int()
print("Done!")
self._read_packet()
def _wait_for_int(self):
# print("Waiting for INT...", end="")
start_time = time.monotonic()
while _elapsed(start_time) < 3.0:
if not self._int.value:
break
else:
self.hard_reset()
# raise RuntimeError("Could not wake up")
# print("OK")
def soft_reset(self):
"""Reset the sensor to an initial unconfigured state"""
# print("Soft resetting...", end="")
# data = bytearray(1)
# data[0] = 1
# _seq = self._send_packet(BNO_CHANNEL_EXE, data)
# time.sleep(0.5)
for _i in range(3):
try:
_packet = self._read_packet()
except PacketError:
time.sleep(0.1)
# print("OK!")
# all is good!
def _read_into(self, buf, start=0, end=None):
self._wait_for_int()
with self._spi as spi:
spi.readinto(buf, start=start, end=end, write_value=0x00)
# print("SPI Read buffer (", end-start, "b )", [hex(i) for i in buf[start:end]])
def _read_header(self):
"""Reads the first 4 bytes available as a header"""
self._wait_for_int()
# read header
with self._spi as spi:
spi.readinto(self._data_buffer, end=4, write_value=0x00)
self._dbg("")
self._dbg("SHTP READ packet header: ", [hex(x) for x in self._data_buffer[0:4]])
def _read_packet(self):
self._read_header()
halfpacket = False
print([hex(x) for x in self._data_buffer[0:4]])
if self._data_buffer[1] & 0x80:
halfpacket = True
header = Packet.header_from_buffer(self._data_buffer)
packet_byte_count = header.packet_byte_count
channel_number = header.channel_number
sequence_number = header.sequence_number
self._sequence_number[channel_number] = sequence_number
if packet_byte_count == 0:
raise PacketError("No packet available")
self._dbg(
"channel %d has %d bytes available"
% (channel_number, packet_byte_count - 4)
)
if packet_byte_count > DATA_BUFFER_SIZE:
self._data_buffer = bytearray(packet_byte_count)
# re-read header bytes since this is going to be a new transaction
self._read_into(self._data_buffer, start=0, end=packet_byte_count)
# print("Packet: ", [hex(i) for i in self._data_buffer[0:packet_byte_count]])
if halfpacket:
raise PacketError("read partial packet")
new_packet = Packet(self._data_buffer)
if self._debug:
print(new_packet)
self._update_sequence_number(new_packet)
return new_packet
def _read(self, requested_read_length):
self._dbg("trying to read", requested_read_length, "bytes")
unread_bytes = 0
# +4 for the header
total_read_length = requested_read_length + 4
if total_read_length > DATA_BUFFER_SIZE:
unread_bytes = total_read_length - DATA_BUFFER_SIZE
total_read_length = DATA_BUFFER_SIZE
with self._spi as spi:
spi.readinto(self._data_buffer, end=total_read_length)
return unread_bytes > 0
def _send_packet(self, channel, data):
data_length = len(data)
write_length = data_length + 4
pack_into("<H", self._data_buffer, 0, write_length)
self._data_buffer[2] = channel
self._data_buffer[3] = self._sequence_number[channel]
for idx, send_byte in enumerate(data):
self._data_buffer[4 + idx] = send_byte
self._wait_for_int()
with self._spi as spi:
spi.write(self._data_buffer, end=write_length)
self._dbg("Sending: ", [hex(x) for x in self._data_buffer[0:write_length]])
self._sequence_number[channel] = (self._sequence_number[channel] + 1) % 256
return self._sequence_number[channel]
@property
def _data_ready(self):
try:
self._wait_for_int()
return True
except RuntimeError:
return False
| 35.296651 | 94 | 0.63088 |
4a26eec2e8d3ecbe57b5eab87ad9f527ea61f1ff | 127 | py | Python | is_even/_typings.py | spookyahell/PyIsEven | da0fe5a181c77ef84e858a2da5c04f66b7916591 | [
"MIT"
] | 45 | 2021-04-02T22:19:49.000Z | 2022-02-01T15:38:33.000Z | is_even/_typings.py | spookyahell/PyIsEven | da0fe5a181c77ef84e858a2da5c04f66b7916591 | [
"MIT"
] | 20 | 2021-04-03T10:06:51.000Z | 2021-08-20T22:46:23.000Z | is_even/_typings.py | spookyahell/PyIsEven | da0fe5a181c77ef84e858a2da5c04f66b7916591 | [
"MIT"
] | 11 | 2021-04-02T17:25:46.000Z | 2021-12-14T03:28:30.000Z | from typing import TypedDict
class Success(TypedDict):
ad: str
iseven: bool
class Error(TypedDict):
error: str
| 11.545455 | 28 | 0.692913 |
4a26eec7c95d374f67c63256a1e01a11e9ff3a2f | 2,100 | py | Python | web/addons/account/project/wizard/account_analytic_chart.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | 1 | 2019-12-29T11:53:56.000Z | 2019-12-29T11:53:56.000Z | odoo/addons/account/project/wizard/account_analytic_chart.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | null | null | null | odoo/addons/account/project/wizard/account_analytic_chart.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 3 | 2020-10-08T14:42:10.000Z | 2022-01-28T14:12:29.000Z | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_chart(osv.osv_memory):
_name = 'account.analytic.chart'
_description = 'Account Analytic Chart'
_columns = {
'from_date': fields.date('From'),
'to_date': fields.date('To'),
}
def analytic_account_chart_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result_context = {}
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_account_analytic_account_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
data = self.read(cr, uid, ids, [])[0]
if data['from_date']:
result_context.update({'from_date': data['from_date']})
if data['to_date']:
result_context.update({'to_date': data['to_date']})
result['context'] = str(result_context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 42 | 106 | 0.61619 |
4a26eedc512b2a1f7c479fbad185fc88a1544978 | 21,050 | py | Python | analysis_module.py | huangbrandon432/Investing-Trading-Tool | 370015b906b7ee90c0fb48ca69865ac7428b3917 | [
"BSD-3-Clause"
] | null | null | null | analysis_module.py | huangbrandon432/Investing-Trading-Tool | 370015b906b7ee90c0fb48ca69865ac7428b3917 | [
"BSD-3-Clause"
] | null | null | null | analysis_module.py | huangbrandon432/Investing-Trading-Tool | 370015b906b7ee90c0fb48ca69865ac7428b3917 | [
"BSD-3-Clause"
] | null | null | null |
import robin_stocks as r
import pandas as pd
import numpy as np
from datetime import date, timedelta
import yfinance as yf
from collections import deque
###Stocks/Crypto#########################################################################################################################################
class StocksCrypto:
def __init__(self, orders, crypto = 'no'):
self.orders = orders
self.crypto = crypto
def examine_trades(self):
self.total_gain = 0
self.total_loss = 0
self.trades = []
trading_dict = {}
net_gain_loss = 0
for i in range(len(self.orders)):
side = self.orders.loc[i, 'side']
symbol = self.orders.loc[i, 'symbol']
date = self.orders.loc[i, 'date'].strftime('%Y-%m-%d')
quantity = self.orders.loc[i, 'quantity']
avg_price = self.orders.loc[i, 'average_price']
total = round(self.orders.loc[i, 'total'],2)
if side == 'buy':
if symbol+'_avgprice' in trading_dict:
cur_total = trading_dict[symbol+'_quantity']*trading_dict[symbol+'_avgprice']
new_total = cur_total + quantity * avg_price
trading_dict[symbol+'_quantity'] += quantity
trading_dict[symbol+'_avgprice'] = new_total/trading_dict[symbol+'_quantity']
else:
trading_dict[symbol+'_avgprice'] = avg_price
trading_dict[symbol+'_quantity'] = quantity
cur_avg_price = round(trading_dict[symbol+'_avgprice'],2)
cur_quantity = round(trading_dict[symbol+'_quantity'],2)
self.trades.append([side, symbol, date, round(quantity, 2), round(avg_price, 2), cur_quantity, cur_avg_price, total, 0, str(0) + '%', net_gain_loss, ''])
#if sell
if side == 'sell':
if symbol+'_avgprice' in trading_dict:
gain = round((avg_price - trading_dict[symbol+'_avgprice']) * quantity,2)
perc_gain = round((avg_price - trading_dict[symbol+'_avgprice'])/trading_dict[symbol+'_avgprice']*100,2)
if gain >= 0:
self.total_gain += gain
else:
self.total_loss += gain
trading_dict[symbol+'_quantity'] -= quantity
net_gain_loss = round(self.total_gain + self.total_loss,2)
cur_avg_price = round(trading_dict[symbol+'_avgprice'],2)
cur_quantity = round(trading_dict[symbol+'_quantity'],2)
self.trades.append([side, symbol, date, round(quantity, 2), round(avg_price, 2), cur_quantity, cur_avg_price, total, gain, str(perc_gain) + '%', net_gain_loss, ''])
#if holding = 0, pop symbol avgprice and quantity
if trading_dict[symbol+'_quantity'] == 0:
trading_dict.pop(symbol+'_avgprice')
trading_dict.pop(symbol+'_quantity')
else:
gain = round(avg_price * quantity,2)
self.total_gain += gain
net_gain_loss = round(self.total_gain + self.total_loss,2)
self.trades.append([side, symbol, date, round(quantity, 2), round(avg_price, 2), None, None, total, gain, str(0) + '%', net_gain_loss, 'Yes'])
self.trades_df = pd.DataFrame(self.trades, columns = ['Side', 'Symbol', 'Date', 'Quantity', 'Avg_Price', 'Cur Quantity', 'Cur_Avg_Cost', 'Total', 'Gain', '% Gain', 'Net Gain/Loss', 'Free/Acquired Stock'])
self.gains_df = self.trades_df[(self.trades_df['Gain'] >= 0) & (self.trades_df['Side'] == 'sell')].sort_values('Gain', ascending = False).reset_index(drop=True)
self.losses_df = self.trades_df[(self.trades_df['Gain'] < 0) & (self.trades_df['Side'] == 'sell')].sort_values('Gain').reset_index(drop=True)
def add_price_diff(self):
if self.crypto == 'no':
stocks_sold = list(set(self.trades_df[self.trades_df['Side'] == 'sell']['Symbol']))
ticker_cur_price = []
for i in stocks_sold:
try:
ticker = yf.Ticker(i)
close = round(ticker.history(period = "1d").reset_index(drop=True).loc[0, 'Close'],2)
ticker_cur_price.append((i, close, 'sell'))
except:
pass
ticker_cur_price = pd.DataFrame(ticker_cur_price, columns =['Symbol', 'Current Price', 'Side'])
self.trades_df_with_price_diff = self.trades_df.merge(ticker_cur_price, how = 'left', on = ['Symbol', 'Side'])
for i in range(len(self.trades_df_with_price_diff)):
transac_date = pd.to_datetime(self.trades_df_with_price_diff.loc[i, 'Date'])
symbol = self.trades_df_with_price_diff.loc[i, 'Symbol']
if symbol == 'SOXL' and transac_date < pd.to_datetime('2021-03-02'):
self.trades_df_with_price_diff.loc[i, 'Current Price'] *= 15
if symbol == 'TECL' and transac_date < pd.to_datetime('2021-03-02'):
self.trades_df_with_price_diff.loc[i, 'Current Price'] *= 10
if symbol == 'AAPL' and transac_date < pd.to_datetime('2020-08-28'):
self.trades_df_with_price_diff.loc[i, 'Current Price'] *= 4
if symbol == 'TSLA' and transac_date < pd.to_datetime('2020-08-31'):
self.trades_df_with_price_diff.loc[i, 'Current Price'] *= 5
else:
crypto_sold = list(set(self.trades_df[self.trades_df['Side'] == 'sell']['Symbol']))
crypto_cur_price = []
for i in crypto_sold:
try:
crypto_market_price = float(r.crypto.get_crypto_quote(i, info='mark_price'))
crypto_cur_price.append((i, crypto_market_price, 'sell'))
except:
pass
crypto_cur_price = pd.DataFrame(crypto_cur_price, columns =['Symbol', 'Current Price', 'Side'])
self.trades_df_with_price_diff = self.trades_df.merge(crypto_cur_price, how = 'left', on = ['Symbol', 'Side'])
self.trades_df_with_price_diff['Price Sold & Curr Price % Diff'] = round((self.trades_df_with_price_diff['Current Price'] - self.trades_df_with_price_diff["Avg_Price"])/self.trades_df_with_price_diff["Avg_Price"] * 100, 2)
self.trades_df_with_price_diff['Avg Cost & Curr Price % Diff'] = round((self.trades_df_with_price_diff['Current Price'] - self.trades_df_with_price_diff["Cur_Avg_Cost"])/self.trades_df_with_price_diff["Cur_Avg_Cost"] * 100, 2)
self.trades_df_with_price_diff['Current Price'].fillna('', inplace=True)
self.trades_df_with_price_diff['Price Sold & Curr Price % Diff'].fillna('', inplace=True)
self.trades_df_with_price_diff['Avg Cost & Curr Price % Diff'].fillna('', inplace=True)
self.trades_df_with_price_diff['Price Sold & Curr Price % Diff'] = self.trades_df_with_price_diff['Price Sold & Curr Price % Diff'].apply(lambda x: str(x) + '%' if x != '' else '')
self.trades_df_with_price_diff['Avg Cost & Curr Price % Diff'] = self.trades_df_with_price_diff['Avg Cost & Curr Price % Diff'].apply(lambda x: str(x) + '%' if x != '' else '')
self.gains_df_with_price_diff = self.trades_df_with_price_diff[(self.trades_df_with_price_diff['Gain'] >= 0) & (self.trades_df_with_price_diff['Side'] == 'sell')].sort_values('Gain', ascending = False).reset_index(drop=True)
self.losses_df_with_price_diff = self.trades_df_with_price_diff[(self.trades_df_with_price_diff['Gain'] < 0) & (self.trades_df_with_price_diff['Side'] == 'sell')].sort_values('Gain').reset_index(drop=True)
def add_hold_time(self):
self.trades_df_with_price_diff['Days Held'] = None
symbols = {}
for i in range(len(self.trades_df_with_price_diff)):
side = self.trades_df_with_price_diff.loc[i, 'Side']
symbol = self.trades_df_with_price_diff.loc[i, 'Symbol']
date = self.trades_df_with_price_diff.loc[i, 'Date']
quantity = self.trades_df_with_price_diff.loc[i, 'Quantity']
if side == 'buy':
if symbol not in symbols:
symbols[symbol] = deque([])
symbols[symbol].append([date, quantity])
if side == 'sell':
if symbol in symbols and len(symbols[symbol]) > 0:
first_in_queue_quantity = symbols[symbol][0][1]
symbols[symbol][0][1] -= quantity
hold_time = (pd.to_datetime(date) - pd.to_datetime(symbols[symbol][0][0])).days
if symbols[symbol][0][1] == 0:
self.trades_df_with_price_diff.loc[i, 'Days Held'] = hold_time
symbols[symbol].popleft()
elif symbols[symbol][0][1] > 0:
self.trades_df_with_price_diff.loc[i, 'Days Held'] = hold_time
else:
first_in_queue_weight_times_holdtime = 0
while symbols[symbol][0][1] < 0:
#neg value
quantity_excess = symbols[symbol][0][1]
hold_time = (pd.to_datetime(date) - pd.to_datetime(symbols[symbol][0][0])).days
first_in_que_weight = first_in_queue_quantity/quantity
first_in_queue_weight_times_holdtime += first_in_que_weight * hold_time
symbols[symbol].popleft()
first_in_queue_quantity = symbols[symbol][0][1]
symbols[symbol][0][1] += quantity_excess
hold_time = (pd.to_datetime(date) - pd.to_datetime(symbols[symbol][0][0])).days
if symbols[symbol][0][1] == 0:
self.trades_df_with_price_diff.loc[i, 'Days Held'] = round(first_in_queue_weight_times_holdtime + first_in_queue_quantity/quantity * hold_time,2)
symbols[symbol].popleft()
elif symbols[symbol][0][1] > 0:
self.trades_df_with_price_diff.loc[i, 'Days Held'] = round(first_in_queue_weight_times_holdtime + (first_in_queue_quantity - symbols[symbol][0][1])/quantity * hold_time,2)
###Options#########################################################################################################################################
class Options:
def __init__(self, option_orders):
self.option_orders = option_orders
def examine_trades(self):
self.total_optionsgain = 0
self.total_optionsloss = 0
self.trades = []
long_trading_dict = {}
long_symbol_strike_exp_type_list = []
short_trading_dict = {}
short_symbol_strike_exp_type_list = []
net_gain_loss = 0
for i in range(len(self.option_orders)):
side = self.option_orders.loc[i, 'side']
symbol = self.option_orders.loc[i, 'chain_symbol']
exp = self.option_orders.loc[i, 'expiration_date']
strike = self.option_orders.loc[i, 'strike_price']
option_type = self.option_orders.loc[i, 'option_type']
order_date = self.option_orders.loc[i, 'order_created_at'].strftime('%Y-%m-%d')
quantity = self.option_orders.loc[i, 'processed_quantity']
opening_strategy = self.option_orders.loc[i, 'opening_strategy']
closing_strategy = self.option_orders.loc[i, 'closing_strategy']
avg_price = self.option_orders.loc[i, 'price']
total = round(avg_price * quantity * 100,2)
symb_exp_strike_type = f'{symbol} {exp} {strike} {option_type}'
if side == 'buy' and opening_strategy in ['long_call', 'long_put']:
if symb_exp_strike_type not in long_symbol_strike_exp_type_list:
long_symbol_strike_exp_type_list.append(symb_exp_strike_type)
if symb_exp_strike_type+'_avgprice' in long_trading_dict:
cur_total = long_trading_dict[symb_exp_strike_type+'_quantity']*long_trading_dict[symb_exp_strike_type+'_avgprice']
new_total = cur_total + quantity * avg_price
long_trading_dict[symb_exp_strike_type+'_quantity'] += quantity
long_trading_dict[symb_exp_strike_type+'_avgprice'] = new_total/long_trading_dict[symb_exp_strike_type+'_quantity']
else:
long_trading_dict[symb_exp_strike_type+'_avgprice'] = avg_price
long_trading_dict[symb_exp_strike_type+'_quantity'] = quantity
cur_long_avg_price = round(long_trading_dict[symb_exp_strike_type+'_avgprice'],2)
cur_long_quantity = round(long_trading_dict[symb_exp_strike_type+'_quantity'],2)
self.trades.append([side, symbol, option_type, opening_strategy, exp, strike, order_date, quantity, avg_price, cur_long_avg_price, cur_long_quantity, total, 0, str(0), '', net_gain_loss])
elif side == 'sell' and closing_strategy in ['long_call', 'long_put']:
if symb_exp_strike_type+'_avgprice' in long_trading_dict:
cur_long_avg_price = round(long_trading_dict[symb_exp_strike_type+'_avgprice'],2)
gain = round((avg_price - cur_long_avg_price) * quantity*100,2)
perc_gain = round((avg_price - cur_long_avg_price)/cur_long_avg_price*100,2)
if gain >= 0:
self.total_optionsgain += gain
else:
self.total_optionsloss += gain
long_trading_dict[symb_exp_strike_type+'_quantity'] -= quantity
net_gain_loss = round(self.total_optionsgain + self.total_optionsloss, 2)
cur_long_quantity = round(long_trading_dict[symb_exp_strike_type+'_quantity'], 2)
self.trades.append([side, symbol, option_type, closing_strategy, exp, strike, order_date, quantity, avg_price, cur_long_avg_price, cur_long_quantity, total, gain, str(perc_gain) + '%', '', net_gain_loss])
#if holding = 0, pop chain_symbol avgprice and quantity
if long_trading_dict[symb_exp_strike_type+'_quantity'] == 0:
long_trading_dict.pop(symb_exp_strike_type+'_avgprice')
long_trading_dict.pop(symb_exp_strike_type+'_quantity')
long_symbol_strike_exp_type_list.remove(symb_exp_strike_type)
elif side == 'sell' and opening_strategy in ['short_call', 'short_put']:
if symb_exp_strike_type not in short_symbol_strike_exp_type_list:
short_symbol_strike_exp_type_list.append(symb_exp_strike_type)
if symb_exp_strike_type+'_avgprice' in short_trading_dict:
cur_total = short_trading_dict[symb_exp_strike_type+'_quantity']*short_trading_dict[symb_exp_strike_type+'_avgprice']
new_total = cur_total + quantity * avg_price
short_trading_dict[symb_exp_strike_type+'_quantity'] += quantity
short_trading_dict[symb_exp_strike_type+'_avgprice'] = new_total/short_trading_dict[symb_exp_strike_type+'_quantity']
#else add chain_symbol_avgprice = buy price in df and chain_symbol_quantity = bought quantity
else:
short_trading_dict[symb_exp_strike_type+'_avgprice'] = avg_price
short_trading_dict[symb_exp_strike_type+'_quantity'] = quantity
cur_short_avg_price = round(short_trading_dict[symb_exp_strike_type+'_avgprice'], 2)
cur_short_quantity = round(short_trading_dict[symb_exp_strike_type+'_quantity'], 2)
self.trades.append([side, symbol, option_type, opening_strategy, exp, strike, order_date, quantity, avg_price, cur_short_avg_price, cur_short_quantity, total, 0, str(0), '', net_gain_loss])
elif side == 'buy' and closing_strategy in ['short_call', 'short_put']:
if symb_exp_strike_type+'_avgprice' in short_trading_dict:
cur_short_avg_price = round(short_trading_dict[symb_exp_strike_type+'_avgprice'],2)
gain = round((cur_short_avg_price - avg_price) * quantity * 100, 2)
perc_gain = round( (cur_short_avg_price - avg_price) / cur_short_avg_price * 100, 2)
if gain >= 0:
self.total_optionsgain += gain
else:
self.total_optionsloss += gain
short_trading_dict[symb_exp_strike_type+'_quantity'] -= quantity
net_gain_loss = round(self.total_optionsgain + self.total_optionsloss, 2)
cur_short_quantity = round(short_trading_dict[symb_exp_strike_type+'_quantity'], 2)
self.trades.append([side, symbol, option_type, closing_strategy, exp, strike, order_date, quantity, avg_price, cur_short_avg_price, cur_short_quantity, total, gain, str(perc_gain) + '%', '', net_gain_loss])
#if holding position = 0, then pop chain_symbol avgprice and chain_symbol quantity
if short_trading_dict[symb_exp_strike_type+'_quantity'] == 0:
short_trading_dict.pop(symb_exp_strike_type+'_avgprice')
short_trading_dict.pop(symb_exp_strike_type+'_quantity')
short_symbol_strike_exp_type_list.remove(symb_exp_strike_type)
#expired orders
for i in range(len(self.option_orders)):
symbol = self.option_orders.loc[i, 'chain_symbol']
exp = self.option_orders.loc[i, 'expiration_date']
strike = self.option_orders.loc[i, 'strike_price']
option_type = self.option_orders.loc[i, 'option_type']
quantity = self.option_orders.loc[i, 'processed_quantity']
order_date = self.option_orders.loc[i, 'order_created_at'].strftime('%Y-%m-%d')
opening_strategy = self.option_orders.loc[i, 'opening_strategy']
closing_strategy = self.option_orders.loc[i, 'closing_strategy']
avg_price = self.option_orders.loc[i, 'price']
symb_exp_strike_type = f'{symbol} {exp} {strike} {option_type}'
if symb_exp_strike_type in long_symbol_strike_exp_type_list and opening_strategy in ['long_call', 'long_put'] and exp < date.today():
total = long_trading_dict[symb_exp_strike_type+'_avgprice'] * long_trading_dict[symb_exp_strike_type+'_quantity'] * 100
long_trading_dict.pop(symb_exp_strike_type+'_avgprice')
long_trading_dict.pop(symb_exp_strike_type+'_quantity')
long_symbol_strike_exp_type_list.remove(symb_exp_strike_type)
self.total_optionsloss -= total
net_gain_loss = round(self.total_optionsgain + self.total_optionsloss, 2)
self.trades.append([side, symbol, option_type, opening_strategy, exp, strike, order_date, quantity, avg_price, 0, 0, total, -total, '-100%', 'Yes', net_gain_loss])
if symb_exp_strike_type in short_symbol_strike_exp_type_list and opening_strategy in ['short_call', 'short_put'] and exp < date.today():
total = short_trading_dict[symb_exp_strike_type+'_avgprice'] * short_trading_dict[symb_exp_strike_type+'_quantity'] * 100
short_trading_dict.pop(symb_exp_strike_type+'_avgprice')
short_trading_dict.pop(symb_exp_strike_type+'_quantity')
short_symbol_strike_exp_type_list.remove(symb_exp_strike_type)
self.total_optionsgain += total
net_gain_loss = round(self.total_optionsgain + self.total_optionsloss, 2)
self.trades.append([side, symbol, option_type, opening_strategy, exp, strike, order_date, quantity, avg_price, 0, 0, total, total, '', 'Yes', net_gain_loss])
self.trades_df = pd.DataFrame(self.trades, columns = ['Side', 'Symbol', 'Option Type', 'Strategy', 'Expiration', 'Strike', 'Date', 'Quantity', 'Avg_Price', 'Cur_Avg_Cost', 'Cur Quantity', 'Total', 'Gain', '% Gain', 'Expired', 'Net Gain/Loss'])
self.trades_df['Expiration'] = self.trades_df['Expiration'].astype(str).str.replace(' 00:00:00', '')
self.trades_df['Gain'] = self.trades_df['Gain'].astype('float64')
self.gains_df = self.trades_df[(self.trades_df['Gain'] > 0)].sort_values('Gain', ascending = False).reset_index(drop=True)
self.losses_df = self.trades_df[(self.trades_df['Gain'] < 0)].sort_values('Gain').reset_index(drop=True)
| 45.960699 | 251 | 0.606936 |
4a26eede28f40dbcdf207bf7b86c723f153cb695 | 10,816 | py | Python | opsgenie_sdk/configuration.py | BATS/opsgenie-python-sdk | b59b3881187288c4cff401f05e4a84ca380425ab | [
"Apache-2.0"
] | null | null | null | opsgenie_sdk/configuration.py | BATS/opsgenie-python-sdk | b59b3881187288c4cff401f05e4a84ca380425ab | [
"Apache-2.0"
] | null | null | null | opsgenie_sdk/configuration.py | BATS/opsgenie-python-sdk | b59b3881187288c4cff401f05e4a84ca380425ab | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Python SDK for Opsgenie REST API
Python SDK for Opsgenie REST API # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class TypeWithDefault(type):
def __init__(cls, name, bases, dct):
super(TypeWithDefault, cls).__init__(name, bases, dct)
cls._default = None
def __call__(cls):
if cls._default is None:
cls._default = type.__call__(cls)
return copy.copy(cls._default)
def set_default(cls, default):
cls._default = copy.copy(default)
class Configuration(six.with_metaclass(TypeWithDefault, object)):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self):
"""Constructor"""
# Default Base url
self.host = "https://api.opsgenie.com"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
self.api_key_prefix['Authorization'] = 'GenieKey'
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("opsgenie_sdk")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# Retry policy settings
# Retry count
self.retry_count = 5
# Delay time between attempts
self.retry_delay = 30
# Maximum amount of delay
self.retry_max_delay = 60
# Multiplier applied to delay between attempts
self.back_off = 1
# Http status codes on which to perform retry
self.retry_http_response = ['429', '500', '502-599']
# To enable/disable retry feature
self.retry_enabled = True
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Proxy headers
self.proxy_headers = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
self.metrics_transaction_id = None
self.short_polling_max_retries = 10
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if (self.api_key.get(identifier) and
self.api_key_prefix.get(identifier)):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'GenieKey':
{
'type': 'api_key',
'in': 'header',
'key': 'Authorization',
'value': self.get_api_key_with_prefix('Authorization')
},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 2.0.0\n"\
"SDK Package Version: 2.0.3".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "https://api.opsgenie.com/",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables={}):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:return: URL based on host settings
"""
servers = self.get_host_settings()
# check array index out of bound
if index < 0 or index >= len(servers):
raise ValueError(
"Invalid index {} when selecting the host settings. Must be less than {}" # noqa: E501
.format(index, len(servers)))
server = servers[index]
url = server['url']
# go through variable and assign a value
for variable_name in server['variables']:
if variable_name in variables:
if variables[variable_name] in server['variables'][
variable_name]['enum_values']:
url = url.replace("{" + variable_name + "}",
variables[variable_name])
else:
raise ValueError(
"The variable `{}` in the host URL has invalid value {}. Must be {}." # noqa: E501
.format(
variable_name, variables[variable_name],
server['variables'][variable_name]['enum_values']))
else:
# use default value
url = url.replace(
"{" + variable_name + "}",
server['variables'][variable_name]['default_value'])
return url
# If retry policy passed as config object
def set_retry_policy(self, custom={}):
if "retry_count" in custom.keys():
self.retry_count = custom["retry_count"]
if "retry_delay" in custom.keys():
self.retry_delay = custom["retry_delay"]
if "back_off" in custom.keys():
self.back_off = custom["back_off"]
if "retry_http_response" in custom.keys():
self.retry_http_response = custom["retry_http_response"]
if "retry_max_delay" in custom.keys():
self.retry_max_delay = custom["retry_max_delay"]
if "retry_enabled" in custom.keys():
self.retry_enabled = custom["retry_enabled"]
| 32.87538 | 107 | 0.592825 |
4a26eee8278a2e29877eb1fa8a8c546ca6b6bd68 | 7,269 | py | Python | test/test_motor_pool.py | myfreecomm/motor | bf1382c906045d9d9ad14106486a02f6b8721ada | [
"Apache-2.0"
] | null | null | null | test/test_motor_pool.py | myfreecomm/motor | bf1382c906045d9d9ad14106486a02f6b8721ada | [
"Apache-2.0"
] | null | null | null | test/test_motor_pool.py | myfreecomm/motor | bf1382c906045d9d9ad14106486a02f6b8721ada | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
"""Test Motor, an asynchronous driver for MongoDB and Tornado."""
import functools
import greenlet
import random
import unittest
import pymongo.errors
from tornado import stack_context
from tornado.concurrent import Future
from tornado.testing import gen_test
import test
from test import MotorTest, assert_raises, SkipTest
from test.utils import delay
class MotorPoolTest(MotorTest):
@gen_test
def test_max_size_default(self):
yield self.cx.open()
pool = self.cx._get_primary_pool()
# Current defaults
self.assertEqual(100, pool.max_size)
self.assertEqual(None, pool.wait_queue_timeout)
self.assertEqual(None, pool.wait_queue_multiple)
@gen_test(timeout=30)
def test_max_size(self):
if not test.env.v8:
raise SkipTest("Need multithreaded Javascript in mongod for test")
max_pool_size = 5
cx = self.motor_client(max_pool_size=max_pool_size)
# Lazy connection.
self.assertEqual(None, cx._get_primary_pool())
yield cx.motor_test.test_collection.remove()
pool = cx._get_primary_pool()
self.assertEqual(max_pool_size, pool.max_size)
self.assertEqual(1, len(pool.sockets))
self.assertEqual(1, pool.motor_sock_counter)
# Grow to max_pool_size.
ops_completed = Future()
nops = 100
results = []
def callback(i, result, error):
self.assertFalse(error)
results.append(i)
if len(results) == nops:
ops_completed.set_result(None)
collection = cx.motor_test.test_collection
yield collection.insert({}) # Need a document.
for i in range(nops):
# Introduce random delay, avg 5ms, just to make sure we're async.
collection.find_one(
{'$where': delay(random.random() / 10)},
callback=functools.partial(callback, i))
yield ops_completed
# All ops completed, but not in order.
self.assertEqual(list(range(nops)), sorted(results))
self.assertNotEqual(list(range(nops)), results)
self.assertEqual(max_pool_size, len(pool.sockets))
self.assertEqual(max_pool_size, pool.motor_sock_counter)
cx.close()
@gen_test(timeout=30)
def test_wait_queue_timeout(self):
# Do a find_one that takes 1 second, and set waitQueueTimeoutMS to 500,
# 5000, and None. Verify timeout iff max_wait_time < 1 sec.
where_delay = 1
yield self.collection.insert({})
for waitQueueTimeoutMS in (500, 5000, None):
cx = self.motor_client(
max_pool_size=1, waitQueueTimeoutMS=waitQueueTimeoutMS)
yield cx.open()
pool = cx._get_primary_pool()
if waitQueueTimeoutMS:
self.assertEqual(
waitQueueTimeoutMS, pool.wait_queue_timeout * 1000)
else:
self.assertTrue(pool.wait_queue_timeout is None)
collection = cx.motor_test.test_collection
future = collection.find_one({'$where': delay(where_delay)})
if waitQueueTimeoutMS and waitQueueTimeoutMS < where_delay * 1000:
with assert_raises(pymongo.errors.ConnectionFailure):
yield collection.find_one()
else:
# No error
yield collection.find_one()
yield future
cx.close()
@gen_test
def test_connections_unacknowledged_writes(self):
# Verifying that unacknowledged writes don't open extra connections
collection = self.cx.motor_test.test_collection
yield collection.drop()
pool = self.cx._get_primary_pool()
self.assertEqual(1, pool.motor_sock_counter)
nops = 10
for i in range(nops - 1):
collection.insert({'_id': i}, w=0)
# We have only one socket open, and it's already back in the pool
self.assertEqual(1, pool.motor_sock_counter)
self.assertEqual(1, len(pool.sockets))
# Acknowledged write; uses same socket and blocks for all inserts
yield collection.insert({'_id': nops - 1})
self.assertEqual(1, pool.motor_sock_counter)
# Socket is back in the idle pool
self.assertEqual(1, len(pool.sockets))
# All ops completed
docs = yield collection.find().sort('_id').to_list(length=100)
self.assertEqual(list(range(nops)), [doc['_id'] for doc in docs])
@gen_test
def test_stack_context(self):
# See http://tornadoweb.org/en/stable/stack_context.html
# MotorPool.get_socket can block waiting for a callback in another
# context to return a socket. We verify MotorPool's stack-context
# handling by testing that exceptions raised in get_socket's
# continuation are caught in get_socket's stack context, not
# return_socket's.
loop = self.io_loop
history = []
cx = self.motor_client(max_pool_size=1)
# Open a socket
yield cx.motor_test.test_collection.find_one()
pool = cx._get_primary_pool()
self.assertEqual(1, len(pool.sockets))
sock_info = pool.get_socket()
main_gr = greenlet.getcurrent()
def catch_get_sock_exc(exc_type, exc_value, exc_traceback):
history.extend(['get_sock_exc', exc_value])
return True # Don't propagate
def catch_return_sock_exc(exc_type, exc_value, exc_traceback):
history.extend(['return_sock_exc', exc_value])
return True # Don't propagate
def get_socket():
# Blocks until socket is available, since max_pool_size is 1.
pool.get_socket()
loop.add_callback(raise_callback)
my_assert = AssertionError('foo')
def raise_callback():
history.append('raise')
raise my_assert
def return_socket():
with stack_context.ExceptionStackContext(catch_return_sock_exc):
pool.maybe_return_socket(sock_info)
main_gr.switch()
with stack_context.ExceptionStackContext(catch_get_sock_exc):
loop.add_callback(greenlet.greenlet(get_socket).switch)
greenlet.greenlet(return_socket).switch()
yield self.pause(0.1)
# 'return_sock_exc' was *not* added to history, because stack context
# wasn't leaked from return_socket to get_socket.
self.assertEqual(['raise', 'get_sock_exc', my_assert], history)
cx.close()
if __name__ == '__main__':
unittest.main()
| 34.947115 | 79 | 0.649745 |
4a26f0816b209c1d559188e8cdc9467006e51228 | 118 | py | Python | week1/yaml_test1.py | nlinkov/test_a | 091dc66134f1e77ede35f58a774593862a44ded2 | [
"MIT"
] | null | null | null | week1/yaml_test1.py | nlinkov/test_a | 091dc66134f1e77ede35f58a774593862a44ded2 | [
"MIT"
] | null | null | null | week1/yaml_test1.py | nlinkov/test_a | 091dc66134f1e77ede35f58a774593862a44ded2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from pprint import pprint as pp
from yaml_helper import read_yaml
pp(read_yaml('test_file3.yml'))
| 16.857143 | 33 | 0.779661 |
4a26f0a264024ee1aa72a1a6bb0e8bf6ea932197 | 127 | py | Python | predict/urls.py | yen936/adaptic_public | 905e287843c152d8a743a2a64ceac539aac96149 | [
"MIT"
] | 3 | 2019-05-18T14:26:18.000Z | 2020-04-25T16:15:24.000Z | predict/urls.py | yen936/adaptic_public | 905e287843c152d8a743a2a64ceac539aac96149 | [
"MIT"
] | 2 | 2020-02-12T00:17:32.000Z | 2020-06-05T20:53:28.000Z | predict/urls.py | yen936/adaptic_public | 905e287843c152d8a743a2a64ceac539aac96149 | [
"MIT"
] | null | null | null | from django.urls import path
from predict.views import Predictor
urlpatterns = [
path('predict/', Predictor.as_view()),
]
| 18.142857 | 42 | 0.732283 |
4a26f2cf117c6b090f145a881a079198f16fb028 | 2,968 | py | Python | openff/benchmark/analysis/readwrite.py | openforcefield/openbenchmark | 65a3519f18cb587622471a6bda6d58d9e39cd1d2 | [
"MIT"
] | 3 | 2020-03-14T22:52:18.000Z | 2020-05-21T10:28:44.000Z | openff/benchmark/analysis/readwrite.py | openforcefield/openbenchmark | 65a3519f18cb587622471a6bda6d58d9e39cd1d2 | [
"MIT"
] | 5 | 2020-03-24T19:35:38.000Z | 2020-04-30T22:37:29.000Z | openff/benchmark/analysis/readwrite.py | openforcefield/openbenchmark | 65a3519f18cb587622471a6bda6d58d9e39cd1d2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
io.py
I/O operations for the analysis/report part of the openff-benchmark workflow
By: David F. Hahn
Version: Nov 18 2020
"""
import os
import numpy as np
import pandas as pd
import pint
ureg = pint.UnitRegistry()
from openforcefield.topology import Molecule
from openforcefield.utils.toolkits import GLOBAL_TOOLKIT_REGISTRY, OpenEyeToolkitWrapper
oetk_loaded = False
for tkw in GLOBAL_TOOLKIT_REGISTRY.registered_toolkits:
if isinstance(tkw, OpenEyeToolkitWrapper):
oetk_loaded = True
if oetk_loaded:
GLOBAL_TOOLKIT_REGISTRY.deregister_toolkit(OpenEyeToolkitWrapper)
def read_sdfs(path):
mols = []
if (os.path.exists(path) and path.split('.')[-1] == 'sdf' ):
mols.append(Molecule.from_file(path, 'SDF', allow_undefined_stereo=True))
elif os.path.isdir(path):
for root, dirs, files in os.walk(path):
for file in files:
file_name = os.path.join(root, file)
if (os.path.exists(file_name) and file_name.split('.')[-1] == 'sdf' ):
mols.append(Molecule.from_file(file_name, 'SDF', allow_undefined_stereo=True))
return mols
def convert_to_quantity(dataframe, columns='final_energy', to='kilocalories / mole'):
if type(columns) is str:
columns=[columns]
for col in columns:
dataframe[col] = dataframe[col].apply(lambda val: ureg.Quantity(val).to(to).magnitude)
def mols_to_dataframe(mols):
moldata = []
for mol in mols:
moldata_i = {
'name': mol.name,
'mol': mol
}
for key, item in mol.properties.items():
moldata_i[key] = item
moldata.append(moldata_i)
df = pd.DataFrame(moldata)
df['molecule_index'] = df['molecule_index'].astype(int)
df['conformer_index'] = df['conformer_index'].astype(int)
# add a unique molecule identifier in case there are two or molecules with the same
# molecule index, but different group names
df['molecule_identifier'] = df.apply(lambda row: f"{row['group_name']}_{row['molecule_index']:05d}", axis=1)
convert_to_quantity(df, columns=['initial_energy', 'final_energy'])
df.set_index('name', drop=False, inplace=True)
return df
def write_results(dataframe, file_name, columns=['name', 'group_name', 'molecule_index', 'conformer_index', 'rmsd', 'tfd', 'dde']):
if 'molecule_index' in columns:
dataframe['molecule_index'] = dataframe['molecule_index'].apply(lambda x: f'{x:05d}')
if 'conformer_index' in columns:
dataframe['conformer_index'] = dataframe['conformer_index'].apply(lambda x: f'{x:02d}')
dataframe = dataframe.loc[:,columns]
if 'dde' in columns:
dataframe.rename(columns={'dde': 'dde[kcal/mol]'}, inplace=True)
dataframe.to_csv(file_name,
index=False if 'name' in columns else True,
float_format='%15.8e')
| 34.511628 | 131 | 0.65566 |
4a26f37b1e1618e41d525358192d1fed3f05ca23 | 473 | py | Python | delphi_epidata/__init__.py | lee14257/delphi-epidata-py | ca84147fb75a50b073bab43e77dcb32b52b26f4b | [
"MIT"
] | null | null | null | delphi_epidata/__init__.py | lee14257/delphi-epidata-py | ca84147fb75a50b073bab43e77dcb32b52b26f4b | [
"MIT"
] | null | null | null | delphi_epidata/__init__.py | lee14257/delphi-epidata-py | ca84147fb75a50b073bab43e77dcb32b52b26f4b | [
"MIT"
] | 1 | 2021-12-22T23:56:58.000Z | 2021-12-22T23:56:58.000Z | """Fetch data from Delphi's API.
"""
from ._constants import __version__
from ._model import (
EpiRange,
EpiRangeDict,
EpiDataResponse,
EpiRangeLike,
InvalidArgumentException,
EpiRangeParam,
IntParam,
StringParam,
EpiDataFormatType,
AEpiDataCall,
)
from ._covidcast import (
DataSignal,
DataSource,
WebLink,
DataSignalGeoStatistics,
CovidcastDataSources,
GeoType,
TimeType,
)
__author__ = "Delphi Group"
| 17.518519 | 35 | 0.693446 |
4a26f3db1020108c23c59d18f242e4fd22d7dc15 | 1,837 | py | Python | aio_graphite_api/cluster/cluster.py | yunstanford/aio-graphite-api | 819b82c451c23ce5928e711202bde79a5399c3da | [
"Apache-2.0"
] | 1 | 2016-12-09T04:14:16.000Z | 2016-12-09T04:14:16.000Z | aio_graphite_api/cluster/cluster.py | yunstanford/aio-graphite-api | 819b82c451c23ce5928e711202bde79a5399c3da | [
"Apache-2.0"
] | 1 | 2017-02-06T22:15:07.000Z | 2017-02-06T23:01:04.000Z | aio_graphite_api/cluster/cluster.py | yunstanford/aio-graphite-api | 819b82c451c23ce5928e711202bde79a5399c3da | [
"Apache-2.0"
] | null | null | null | import aiohttp
async def init_cluster(config):
"""
a helper function for initing a cluster that talk with remote
graphite-web.
"""
return await Cluster(config["cluster_servers"],
config["graphite_web_cluster_protocol"],
config["cluster_timeout"])
class Cluster:
"""
an aiohttp ClientSession class, which can talk with remote
graphite-web.
"""
def __init__(self, cluster, protocol="http", timeout):
self.cluster = cluster
self.session = aiohttp.ClientSession()
self.time_out = timeout
async def query(self, server, target, fromTime, untilTime):
"""
query one server.
"""
url = _query_url(server)
params = _query_params(target, fromTime, untilTime)
result = []
try:
async with session.get(
url,
timeout=self.time_out
params=params) as resp:
result = await resp.json()
except asyncio.TimeoutError as err:
raise err
except Exception as e:
raise e
return result
async def query_all(self, target, fromTime, untilTime):
"""
query all servers in cluster.
#Todoooooooo optimization: understand same consistent hashing as used in writing metrics.
"""
results = []
for server in self.cluster:
try:
data = await self.query(server, target, fromTime, untilTime)
results.append(data)
except Exception as e:
raise e
return merge_data(results)
def merge_data(self, data):
"""
Merge data from different servers.
"""
pass
def _query_url(self, server):
"""
generate url for query.
"""
return "{protocol}://{server}/render/".format(self.protocol, server)
def _query_params(self, target, fromTime, untilTime):
# Only support json for now.
# time interval process should be happened before come here.
# NoCache
return {
"target": target,
"from": fromTime,
"until": untilTime,
"noCache": 1,
}
| 21.360465 | 91 | 0.688078 |
4a26f4a594ff2847d4d76b85b3586ca2c01a07e2 | 4,450 | py | Python | sdk/keyvault/azure-keyvault-certificates/samples/backup_restore_operations_async.py | omziv/azure-sdk-for-python | 3fbb0f9e1f86acc4e0a8cb13b790dccf32e3860f | [
"MIT"
] | 1 | 2021-04-26T21:15:01.000Z | 2021-04-26T21:15:01.000Z | sdk/keyvault/azure-keyvault-certificates/samples/backup_restore_operations_async.py | omziv/azure-sdk-for-python | 3fbb0f9e1f86acc4e0a8cb13b790dccf32e3860f | [
"MIT"
] | 2 | 2021-08-24T15:32:30.000Z | 2021-08-24T23:21:34.000Z | sdk/keyvault/azure-keyvault-certificates/samples/backup_restore_operations_async.py | omziv/azure-sdk-for-python | 3fbb0f9e1f86acc4e0a8cb13b790dccf32e3860f | [
"MIT"
] | null | null | null | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
import os
from azure.keyvault.certificates.aio import CertificateClient
from azure.keyvault.certificates import CertificatePolicy
from azure.identity.aio import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-certificates and azure-identity packages (pip install these)
#
# 3. Set Environment variables AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, VAULT_URL
# (See https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/keyvault/azure-keyvault-keys#authenticate-the-client)
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates the basic backup and restore operations on a vault(certificates) resource for Azure Key Vault
#
# 1. Create a certificate (create_certificate)
#
# 2. Backup a certificate (backup_certificate)
#
# 3. Delete a certificate (delete_certificate)
#
# 4. Purge a certificate (purge_deleted_certificate)
#
# 5. Restore a certificate (restore_certificate_backup)
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a certificate client that will be used to call the service.
# Notice that the client is using default Azure credentials.
# To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID',
# 'AZURE_CLIENT_SECRET' and 'AZURE_TENANT_ID' are set with the service principal credentials.
vault_url = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = CertificateClient(vault_url=vault_url, credential=credential)
try:
print("\n.. Create Certificate")
cert_name = "BackupRestoreCertificate"
# Let's create a certificate for your key vault.
# if the certificate already exists in the Key Vault, then a new version of the certificate is created.
# Awaiting the call returns a KeyVaultCertificate if creation is successful, and a CertificateOperation if not.
certificate = await client.create_certificate(
certificate_name=cert_name, policy=CertificatePolicy.get_default()
)
print("Certificate with name '{0}' created.".format(certificate.name))
# Backups are good to have, if in case certificates gets deleted accidentally.
# For long term storage, it is ideal to write the backup to a file.
print("\n.. Create a backup for an existing certificate")
certificate_backup = await client.backup_certificate(cert_name)
print("Backup created for certificate with name '{0}'.".format(cert_name))
# The storage account certificate is no longer in use, so you can delete it.
print("\n.. Delete the certificate")
await client.delete_certificate(cert_name)
print("Deleted certificate with name '{0}'".format(cert_name))
# Purge the deleted certificate.
# The purge will take some time, so wait before restoring the backup to avoid a conflict.
print("\n.. Purge the certificate")
await client.purge_deleted_certificate(cert_name)
await asyncio.sleep(60)
print("Purged certificate with name '{0}'".format(cert_name))
# In the future, if the certificate is required again, we can use the backup value to restore it in the Key Vault.
print("\n.. Restore the certificate using the backed up certificate bytes")
certificate = await client.restore_certificate_backup(certificate_backup)
print("Restored certificate with name '{0}'".format(certificate.name))
except HttpResponseError as e:
print("\nrun_sample has caught an error. {0}".format(e.message))
finally:
print("\nrun_sample done")
await credential.close()
await client.close()
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
loop.close()
except Exception as e:
print("Top level Error: {0}".format(str(e)))
| 45.408163 | 123 | 0.656854 |
4a26f5493094b13ab10540aebfefc50c1414ac15 | 2,960 | py | Python | load_tests/create_bus_subscription.py | mbta/alerts_concierge | d8e643445ef06f80ca273f2914c6959daea146f6 | [
"MIT"
] | null | null | null | load_tests/create_bus_subscription.py | mbta/alerts_concierge | d8e643445ef06f80ca273f2914c6959daea146f6 | [
"MIT"
] | 21 | 2021-03-12T17:05:30.000Z | 2022-02-16T21:48:35.000Z | load_tests/create_bus_subscription.py | mbta/alerts_concierge | d8e643445ef06f80ca273f2914c6959daea146f6 | [
"MIT"
] | 1 | 2021-12-09T15:09:53.000Z | 2021-12-09T15:09:53.000Z | from locust import TaskSequence, seq_task
from page import Page
class CreateBusSubscription(TaskSequence):
"""Sequence to create a bus subscription"""
@seq_task(1)
def visit_new_subscription_page(self):
response = self.client.get("/trip/new")
self.csrf_token = Page.get_csrf_token(response)
@seq_task(2)
def select_route(self):
response = self.client.post(
"/trip/leg",
json={
"mode_toggle": "bus",
"trip": {
"alternate_routes": "%7B%221%20-%201%22:%5B%22701%20-%201~~Route%20CT1~~bus%22%5D%7D",
"from_new_trip": "true",
"round_trip": "true",
"route": "1 - 1~~Route 1~~bus"
},
"_utf8": "✓",
"_csrf_token": self.csrf_token
}
)
self.csrf_token = Page.get_csrf_token(response)
@seq_task(3)
def select_direction(self):
response = self.client.post(
"/trip/leg",
json={
"mode_toggle": "subway",
"trip": {
"alternate_routes": "%7B%221%20-%200%22:%5B%22701%20-%200~~Route%20CT1~~bus%22%5D%7D",
"destination": None,
"direction": "0",
"new_leg": "false",
"origin": None,
"round_trip": "true",
"route": "Red~~Red Line~~subway",
"saved_leg": "1 - 0",
"saved_mode": "bus"
},
"_utf8": "✓",
"_csrf_token": self.csrf_token
}
)
self.csrf_token = Page.get_csrf_token(response)
@seq_task(4)
def select_times(self):
self.client.post(
"/trip",
json={
"trip": {
"alternate_routes": "%7B%221%20-%200%22:[%22701%20-%200~~Route%20CT1~~bus%22]%7D",
"bike_storage": "false",
"destinations": [None],
"elevator": "false",
"end_time": {"am_pm": "AM", "hour": "9", "minute": "0"},
"escalator": "false",
"legs": ["1 - 0"],
"modes": ["bus"],
"origins": [None],
"parking_area": "false",
"relevant_days": ["saturday", "sunday"],
"return_end_time": {"am_pm": "PM", "hour": "6", "minute": "0"},
"return_start_time": {"am_pm": "PM", "hour": "5", "minute": "0"},
"round_trip": "true",
"start_time": {"am_pm": "AM", "hour": "8", "minute": "0"}
},
"_utf8": "✓",
"_csrf_token": self.csrf_token
}
)
@seq_task(5)
def visit_trips_index_page(self):
self.client.get("/trips")
| 35.238095 | 106 | 0.430743 |
4a26f55b723ec75566ffee7337d8ade5fd46eb9b | 817 | py | Python | app/core/tests/test_commands.py | dulvinw/recipe-api | f132345987a5962134755e5425e88dde4c56d5fe | [
"Apache-2.0"
] | 1 | 2021-07-08T05:15:38.000Z | 2021-07-08T05:15:38.000Z | app/core/tests/test_commands.py | TMEU/recipe-api | f132345987a5962134755e5425e88dde4c56d5fe | [
"Apache-2.0"
] | null | null | null | app/core/tests/test_commands.py | TMEU/recipe-api | f132345987a5962134755e5425e88dde4c56d5fe | [
"Apache-2.0"
] | 1 | 2021-07-08T05:15:42.000Z | 2021-07-08T05:15:42.000Z | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db until db is ready"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command("wait_for_db")
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for DB"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
| 34.041667 | 74 | 0.676867 |
4a26f59f27eb36e1af3e2061e9c12a6ddb7542c5 | 5,002 | py | Python | track-gps-l1.py | wumouyan/GNSS-SDR-Python | 61292c2ba151724538808663e2a6d0b048635401 | [
"MIT"
] | 68 | 2015-06-23T17:30:06.000Z | 2022-03-29T22:06:54.000Z | track-gps-l1.py | wumouyan/GNSS-SDR-Python | 61292c2ba151724538808663e2a6d0b048635401 | [
"MIT"
] | 4 | 2018-03-01T05:14:36.000Z | 2021-12-05T11:07:39.000Z | track-gps-l1.py | wumouyan/GNSS-SDR-Python | 61292c2ba151724538808663e2a6d0b048635401 | [
"MIT"
] | 43 | 2015-06-26T10:27:05.000Z | 2022-03-30T02:47:09.000Z | #!/usr/bin/env python
import optparse
import numpy as np
import gnsstools.gps.ca as ca
import gnsstools.nco as nco
import gnsstools.io as io
import gnsstools.discriminator as discriminator
import gnsstools.util as util
class tracking_state:
def __init__(self,fs,prn,code_p,code_f,code_i,carrier_p,carrier_f,carrier_i,mode):
self.fs = fs
self.prn = prn
self.code_p = code_p
self.code_f = code_f
self.code_i = code_i
self.carrier_p = carrier_p
self.carrier_f = carrier_f
self.carrier_i = carrier_i
self.mode = mode
self.prompt1 = 0 + 0*(1j)
self.carrier_e1 = 0
self.code_e1 = 0
self.eml = 0
self.carrier_cyc = 0
self.code_cyc = 0
# tracking loops
def track(x,s):
n = len(x)
fs = s.fs
nco.mix(x,-s.carrier_f/fs, s.carrier_p)
s.carrier_p = s.carrier_p - n*s.carrier_f/fs
t = np.mod(s.carrier_p,1)
dcyc = int(round(s.carrier_p-t))
s.carrier_cyc += dcyc
s.carrier_p = t
cf = (s.code_f+s.carrier_f/1540.0)/fs
p_early = ca.correlate(x, s.prn, 0, s.code_p-0.05, cf, ca.ca_code(prn))
p_prompt = ca.correlate(x, s.prn, 0, s.code_p, cf, ca.ca_code(prn))
p_late = ca.correlate(x, s.prn, 0, s.code_p+0.05, cf, ca.ca_code(prn))
if s.mode=='FLL_WIDE':
fll_k = 3.0
a = p_prompt
b = s.prompt1
e = discriminator.fll_atan(a,b)
s.carrier_f = s.carrier_f + fll_k*e
s.prompt1 = p_prompt
elif s.mode=='FLL_NARROW':
fll_k = 0.8
a = p_prompt
b = s.prompt1
e = discriminator.fll_atan(a,b)
s.carrier_f = s.carrier_f + fll_k*e
s.prompt1 = p_prompt
elif s.mode=='PLL':
pll_k1 = 0.1
pll_k2 = 3.5
e = discriminator.pll_costas(p_prompt)
e1 = s.carrier_e1
s.carrier_f = s.carrier_f + pll_k1*e + pll_k2*(e-e1)
s.carrier_e1 = e
# code loop
dll_k1 = 0.00002
dll_k2 = 0.2
s.early = np.absolute(p_early)
s.prompt = np.absolute(p_prompt)
s.late = np.absolute(p_late)
if (s.late+s.early)==0:
e = 0
else:
e = (s.late-s.early)/(s.late+s.early)
s.eml = e
e1 = s.code_e1
s.code_f = s.code_f + dll_k1*e + dll_k2*(e-e1)
s.code_e1 = e
s.code_p = s.code_p + n*cf
t = np.mod(s.code_p,ca.code_length)
dcyc = int(round(s.code_p-t))
s.code_cyc += dcyc
s.code_p = t
return p_prompt,s
#
# main program
#
parser = optparse.OptionParser(usage="""track-gps-l1.py [options] input_filename sample_rate carrier_offset PRN doppler code_offset
Track GPS L1 signal
Examples:
Track with default options:
track-gps-l1.py /dev/stdin 69984000 -9334875 31 1200.0 831.15
Track with pure PLL (no FLL intervals at the start) and with a specified carrier phase:
track-gps-l1.py --carrier-phase 0.214 /dev/stdin 69984000 -9334875 31 1200.0 831.15
Arguments:
input_filename input data file, i/q interleaved, 8 bit signed
sample_rate sampling rate in Hz
carrier_offset offset to L1 carrier in Hz (positive or negative)
PRN PRN
doppler Doppler estimate from acquisition
code_offset Code-offset estimate from acquisition""")
parser.disable_interspersed_args()
parser.add_option("--loop-dwells", default="500,500", help="initial time intervals for wide FLL, then narrow FLL, in milliseconds (default %default)")
parser.add_option("--carrier-phase", help="initial carrier phase in cycles (disables FLL: uses PLL from the start)")
(options, args) = parser.parse_args()
filename = args[0]
fs = float(args[1])
coffset = float(args[2])
prn = int(args[3])
doppler = float(args[4])
code_offset = float(args[5])
loop_dwells = util.parse_list_floats(options.loop_dwells)
carrier_p = 0.0
if options.carrier_phase is not None:
carrier_p = float(options.carrier_phase)
loop_dwells = 0,0
fll_wide_time,fll_narrow_time = loop_dwells
fp = open(filename,"rb")
n = int(fs*0.001*((ca.code_length-code_offset)/ca.code_length)) # align with 1 ms code boundary
x = io.get_samples_complex(fp,n)
code_offset += n*1000.0*ca.code_length/fs
s = tracking_state(fs=fs, prn=prn, # initialize tracking state
code_p=code_offset, code_f=ca.chip_rate, code_i=0,
carrier_p=carrier_p, carrier_f=doppler, carrier_i=0,
mode='FLL_WIDE')
block = 0
coffset_phase = 0.0
samp = 0
while True:
if block>=fll_wide_time:
s.mode = 'FLL_NARROW'
if block>=fll_wide_time+fll_narrow_time:
s.mode = 'PLL'
if s.code_p<ca.code_length/2:
n = int(fs*0.001*(ca.code_length-s.code_p)/ca.code_length)
else:
n = int(fs*0.001*(2*ca.code_length-s.code_p)/ca.code_length)
x = io.get_samples_complex(fp,n)
if x is None:
break
samp += n
nco.mix(x,-coffset/fs,coffset_phase)
coffset_phase = coffset_phase - n*coffset/fs
coffset_phase = np.mod(coffset_phase,1)
p_prompt,s = track(x,s)
vars = block, np.real(p_prompt), np.imag(p_prompt), s.carrier_f, s.code_f-ca.chip_rate, (180/np.pi)*np.angle(p_prompt), s.early, s.prompt, s.late, s.code_cyc, s.code_p, s.carrier_cyc, s.carrier_p, samp
print('%d %f %f %f %f %f %f %f %f %d %f %d %f %d' % vars)
block = block + 1
| 27.788889 | 203 | 0.681128 |
4a26f5a8b8d008fb61af5e44696deddbf15128a4 | 4,392 | py | Python | google/ads/google_ads/v3/proto/enums/conversion_attribution_event_type_pb2.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v3/proto/enums/conversion_attribution_event_type_pb2.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v3/proto/enums/conversion_attribution_event_type_pb2.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/enums/conversion_attribution_event_type.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/enums/conversion_attribution_event_type.proto',
package='google.ads.googleads.v3.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v3.enumsB#ConversionAttributionEventTypeProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v3/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V3.Enums\312\002\035Google\\Ads\\GoogleAds\\V3\\Enums\352\002!Google::Ads::GoogleAds::V3::Enums'),
serialized_pb=_b('\nKgoogle/ads/googleads_v3/proto/enums/conversion_attribution_event_type.proto\x12\x1dgoogle.ads.googleads.v3.enums\x1a\x1cgoogle/api/annotations.proto\"\x85\x01\n\"ConversionAttributionEventTypeEnum\"_\n\x1e\x43onversionAttributionEventType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0e\n\nIMPRESSION\x10\x02\x12\x0f\n\x0bINTERACTION\x10\x03\x42\xf8\x01\n!com.google.ads.googleads.v3.enumsB#ConversionAttributionEventTypeProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v3/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V3.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V3\\Enums\xea\x02!Google::Ads::GoogleAds::V3::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CONVERSIONATTRIBUTIONEVENTTYPEENUM_CONVERSIONATTRIBUTIONEVENTTYPE = _descriptor.EnumDescriptor(
name='ConversionAttributionEventType',
full_name='google.ads.googleads.v3.enums.ConversionAttributionEventTypeEnum.ConversionAttributionEventType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMPRESSION', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERACTION', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=179,
serialized_end=274,
)
_sym_db.RegisterEnumDescriptor(_CONVERSIONATTRIBUTIONEVENTTYPEENUM_CONVERSIONATTRIBUTIONEVENTTYPE)
_CONVERSIONATTRIBUTIONEVENTTYPEENUM = _descriptor.Descriptor(
name='ConversionAttributionEventTypeEnum',
full_name='google.ads.googleads.v3.enums.ConversionAttributionEventTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_CONVERSIONATTRIBUTIONEVENTTYPEENUM_CONVERSIONATTRIBUTIONEVENTTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=141,
serialized_end=274,
)
_CONVERSIONATTRIBUTIONEVENTTYPEENUM_CONVERSIONATTRIBUTIONEVENTTYPE.containing_type = _CONVERSIONATTRIBUTIONEVENTTYPEENUM
DESCRIPTOR.message_types_by_name['ConversionAttributionEventTypeEnum'] = _CONVERSIONATTRIBUTIONEVENTTYPEENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ConversionAttributionEventTypeEnum = _reflection.GeneratedProtocolMessageType('ConversionAttributionEventTypeEnum', (_message.Message,), dict(
DESCRIPTOR = _CONVERSIONATTRIBUTIONEVENTTYPEENUM,
__module__ = 'google.ads.googleads_v3.proto.enums.conversion_attribution_event_type_pb2'
,
__doc__ = """Container for enum indicating the event type the conversion is
attributed to.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.enums.ConversionAttributionEventTypeEnum)
))
_sym_db.RegisterMessage(ConversionAttributionEventTypeEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 42.640777 | 700 | 0.806239 |
4a26f61f1742dfaa300b67797b8d7dd1684f18f2 | 5,333 | py | Python | src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/test/python/scripts/test_red_variables.py | alex729/RED | 128bf203cf035892c02805aabd0c915f96006bb0 | [
"Apache-2.0"
] | 375 | 2015-11-02T19:15:30.000Z | 2022-03-19T03:32:10.000Z | src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/test/python/scripts/test_red_variables.py | alex729/RED | 128bf203cf035892c02805aabd0c915f96006bb0 | [
"Apache-2.0"
] | 433 | 2015-11-03T13:24:40.000Z | 2022-03-30T11:20:14.000Z | src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/test/python/scripts/test_red_variables.py | alex729/RED | 128bf203cf035892c02805aabd0c915f96006bb0 | [
"Apache-2.0"
] | 133 | 2016-05-02T02:20:06.000Z | 2022-01-06T06:01:28.000Z | import unittest
import os
from red_variables import get_variables
from red_variables import get_global_variables
from robot.errors import DataError
class VariablesRetrievingTests(unittest.TestCase):
def test_if_global_variables_are_returned(self):
result = get_global_variables()
self.assertEqual(set(result.keys()),
set(['${/}', '${:}', '${CURDIR}', '${DEBUG_FILE}', '${EMPTY}', '${EXECDIR}', '${False}',
'${KEYWORD_MESSAGE}', '${KEYWORD_STATUS}', '${LOG_FILE}', '${LOG_LEVEL}', '${None}',
'${OUTPUT_DIR}', '${OUTPUT_FILE}', '${PREV_TEST_MESSAGE}', '${PREV_TEST_NAME}',
'${PREV_TEST_STATUS}', '${REPORT_FILE}', '${SPACE}', '${SUITE_DOCUMENTATION}',
'${SUITE_MESSAGE}', '${SUITE_NAME}', '${SUITE_SOURCE}', '${SUITE_STATUS}',
'${SUMMARY_FILE}', '${TEMPDIR}', '${TEST_DOCUMENTATION}', '${TEST_MESSAGE}',
'${TEST_NAME}', '${TEST_STATUS}', '${True}', '${\\n}', '${null}', '&{EMPTY}',
'&{SUITE_METADATA}', '@{EMPTY}', '@{TEST_TAGS}']))
def test_if_empty_result_is_returned_for_empty_file(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'empty.py')
result = get_variables(vars_location, [])
self.assertEqual(result, {})
def test_if_result_is_returned_for_vars_in_lines(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'vars_in_lines.py')
result = get_variables(vars_location, [])
self.assertEqual(result, {'first': '123', 'second': '234', 'third': '345'})
def test_if_result_is_returned_for_vars_in_method(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'vars_in_method.py')
result = get_variables(vars_location, [])
self.assertEqual(result, {'a': '1', 'b': '2', 'c': '3'})
def test_if_result_is_returned_for_vars_in_class(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'vars_in_class.py')
result = get_variables(vars_location, [])
self.assertEqual(result, {'x': '9', 'y': '8', 'z': '7'})
def test_if_result_is_returned_for_vars_with_argument(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'vars_with_argument.py')
result = get_variables(vars_location, ['_arg'])
self.assertEqual(result, {'a': '1_arg', 'b': '2_arg', 'c': '3_arg'})
def test_if_result_is_returned_for_vars_with_proper_casing(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'vars_with_case_sensitive_names.py')
result = get_variables(vars_location, [])
self.assertEqual(result, {'first': '123', 'SECOND': '234', 'ThIrD': '345', 'FORTH_with_UnderSCORES' : '456'})
def test_if_result_is_returned_with_defined_classes_visible_as_variables(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'vars_class_as_var.py')
result = get_variables(vars_location, [])
self.assertSetEqual(set(result.keys()), set(['SomeClass']))
def test_if_data_error_is_raised_for_file_without_arguments(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'vars_with_argument.py')
with self.assertRaises(DataError) as cm:
get_variables(vars_location, [])
self.assertTrue('TypeError: ' in cm.exception.message)
def test_if_data_error_is_raised_for_not_existing_file(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'not_existing.py')
with self.assertRaises(DataError) as cm:
get_variables(vars_location, [])
self.assertTrue('File or directory does not exist' in cm.exception.message)
def test_if_data_error_is_raised_for_file_with_unsupported_extension(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'vars_in_unsupported.robot')
with self.assertRaises(DataError) as cm:
get_variables(vars_location, [])
self.assertTrue('Not a valid file or directory to import' in cm.exception.message)
def test_if_data_error_is_raised_for_file_with_error(self):
parent_path = os.path.dirname(os.path.realpath(__file__))
vars_location = os.path.join(parent_path, 'res_test_red_variables', 'vars_with_syntax.py')
with self.assertRaises(DataError) as cm:
get_variables(vars_location, [])
self.assertTrue('SyntaxError: ' in cm.exception.message)
| 46.780702 | 117 | 0.663979 |
4a26f70def3b16570b4ec65b48b67c354c27cee1 | 8,581 | py | Python | tests/repositories/test_installed_repository.py | sivanbecker/poetry | 72497bcb66b5a1cc20e3aa95973c523a22b05bfa | [
"MIT"
] | null | null | null | tests/repositories/test_installed_repository.py | sivanbecker/poetry | 72497bcb66b5a1cc20e3aa95973c523a22b05bfa | [
"MIT"
] | null | null | null | tests/repositories/test_installed_repository.py | sivanbecker/poetry | 72497bcb66b5a1cc20e3aa95973c523a22b05bfa | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import TYPE_CHECKING
from typing import Dict
from typing import List
from typing import Optional
import pytest
from poetry.repositories.installed_repository import InstalledRepository
from poetry.utils._compat import metadata
from poetry.utils.env import MockEnv as BaseMockEnv
from tests.compat import zipp
if TYPE_CHECKING:
from pytest_mock.plugin import MockerFixture
from poetry.core.packages.package import Package
FIXTURES_DIR = Path(__file__).parent / "fixtures"
ENV_DIR = (FIXTURES_DIR / "installed").resolve()
SITE_PURELIB = ENV_DIR / "lib" / "python3.7" / "site-packages"
SITE_PLATLIB = ENV_DIR / "lib64" / "python3.7" / "site-packages"
SRC = ENV_DIR / "src"
VENDOR_DIR = ENV_DIR / "vendor" / "py3.7"
INSTALLED_RESULTS = [
metadata.PathDistribution(SITE_PURELIB / "cleo-0.7.6.dist-info"),
metadata.PathDistribution(SRC / "pendulum" / "pendulum.egg-info"),
metadata.PathDistribution(
zipp.Path(str(SITE_PURELIB / "foo-0.1.0-py3.8.egg"), "EGG-INFO")
),
metadata.PathDistribution(VENDOR_DIR / "attrs-19.3.0.dist-info"),
metadata.PathDistribution(SITE_PURELIB / "standard-1.2.3.dist-info"),
metadata.PathDistribution(SITE_PURELIB / "editable-2.3.4.dist-info"),
metadata.PathDistribution(SITE_PURELIB / "editable-with-import-2.3.4.dist-info"),
metadata.PathDistribution(SITE_PLATLIB / "lib64-2.3.4.dist-info"),
metadata.PathDistribution(SITE_PLATLIB / "bender-2.0.5.dist-info"),
metadata.PathDistribution(SITE_PURELIB / "git_pep_610-1.2.3.dist-info"),
metadata.PathDistribution(SITE_PURELIB / "url_pep_610-1.2.3.dist-info"),
metadata.PathDistribution(SITE_PURELIB / "file_pep_610-1.2.3.dist-info"),
metadata.PathDistribution(SITE_PURELIB / "directory_pep_610-1.2.3.dist-info"),
metadata.PathDistribution(
SITE_PURELIB / "editable_directory_pep_610-1.2.3.dist-info"
),
]
class MockEnv(BaseMockEnv):
@property
def paths(self) -> Dict[str, Path]:
return {
"purelib": SITE_PURELIB,
"platlib": SITE_PLATLIB,
}
@property
def sys_path(self) -> List[Path]:
return [ENV_DIR, SITE_PLATLIB, SITE_PURELIB]
@pytest.fixture
def env() -> MockEnv:
return MockEnv(path=ENV_DIR)
@pytest.fixture
def repository(mocker: "MockerFixture", env: MockEnv) -> InstalledRepository:
mocker.patch(
"poetry.utils._compat.metadata.Distribution.discover",
return_value=INSTALLED_RESULTS,
)
mocker.patch(
"poetry.core.vcs.git.Git.rev_parse",
return_value="bb058f6b78b2d28ef5d9a5e759cfa179a1a713d6",
)
mocker.patch(
"poetry.core.vcs.git.Git.remote_urls",
side_effect=[
{"remote.origin.url": "https://github.com/sdispater/pendulum.git"},
{"remote.origin.url": "[email protected]:sdispater/pendulum.git"},
],
)
mocker.patch("poetry.repositories.installed_repository._VENDORS", str(VENDOR_DIR))
return InstalledRepository.load(env)
def get_package_from_repository(
name: str, repository: InstalledRepository
) -> Optional["Package"]:
for pkg in repository.packages:
if pkg.name == name:
return pkg
return None
def test_load_successful(repository: InstalledRepository):
assert len(repository.packages) == len(INSTALLED_RESULTS) - 1
def test_load_ensure_isolation(repository: InstalledRepository):
package = get_package_from_repository("attrs", repository)
assert package is None
def test_load_standard_package(repository: InstalledRepository):
cleo = get_package_from_repository("cleo", repository)
assert cleo is not None
assert cleo.name == "cleo"
assert cleo.version.text == "0.7.6"
assert (
cleo.description
== "Cleo allows you to create beautiful and testable command-line interfaces."
)
foo = get_package_from_repository("foo", repository)
assert foo is not None
assert foo.version.text == "0.1.0"
def test_load_git_package(repository: InstalledRepository):
pendulum = get_package_from_repository("pendulum", repository)
assert pendulum is not None
assert pendulum.name == "pendulum"
assert pendulum.version.text == "2.0.5"
assert pendulum.description == "Python datetimes made easy"
assert pendulum.source_type == "git"
assert pendulum.source_url in [
"[email protected]:sdispater/pendulum.git",
"https://github.com/sdispater/pendulum.git",
]
assert pendulum.source_reference == "bb058f6b78b2d28ef5d9a5e759cfa179a1a713d6"
def test_load_git_package_pth(repository: InstalledRepository):
bender = get_package_from_repository("bender", repository)
assert bender is not None
assert bender.name == "bender"
assert bender.version.text == "2.0.5"
assert bender.source_type == "git"
def test_load_platlib_package(repository: InstalledRepository):
lib64 = get_package_from_repository("lib64", repository)
assert lib64 is not None
assert lib64.name == "lib64"
assert lib64.version.text == "2.3.4"
def test_load_editable_package(repository: InstalledRepository):
# test editable package with text .pth file
editable = get_package_from_repository("editable", repository)
assert editable is not None
assert editable.name == "editable"
assert editable.version.text == "2.3.4"
assert editable.source_type == "directory"
assert (
editable.source_url
== Path("/path/to/editable").resolve(strict=False).as_posix()
)
def test_load_editable_with_import_package(repository: InstalledRepository):
# test editable package with executable .pth file
editable = get_package_from_repository("editable-with-import", repository)
assert editable is not None
assert editable.name == "editable-with-import"
assert editable.version.text == "2.3.4"
assert editable.source_type is None
assert editable.source_url is None
def test_load_standard_package_with_pth_file(repository: InstalledRepository):
# test standard packages with .pth file is not treated as editable
standard = get_package_from_repository("standard", repository)
assert standard is not None
assert standard.name == "standard"
assert standard.version.text == "1.2.3"
assert standard.source_type is None
assert standard.source_url is None
def test_load_pep_610_compliant_git_packages(repository: InstalledRepository):
package = get_package_from_repository("git-pep-610", repository)
assert package is not None
assert package.name == "git-pep-610"
assert package.version.text == "1.2.3"
assert package.source_type == "git"
assert package.source_url == "https://github.com/demo/git-pep-610.git"
assert package.source_reference == "my-branch"
assert package.source_resolved_reference == "123456"
def test_load_pep_610_compliant_url_packages(repository: InstalledRepository):
package = get_package_from_repository("url-pep-610", repository)
assert package is not None
assert package.name == "url-pep-610"
assert package.version.text == "1.2.3"
assert package.source_type == "url"
assert (
package.source_url
== "https://python-poetry.org/distributions/url-pep-610-1.2.3.tar.gz"
)
def test_load_pep_610_compliant_file_packages(repository: InstalledRepository):
package = get_package_from_repository("file-pep-610", repository)
assert package is not None
assert package.name == "file-pep-610"
assert package.version.text == "1.2.3"
assert package.source_type == "file"
assert package.source_url == "/path/to/distributions/file-pep-610-1.2.3.tar.gz"
def test_load_pep_610_compliant_directory_packages(repository: InstalledRepository):
package = get_package_from_repository("directory-pep-610", repository)
assert package is not None
assert package.name == "directory-pep-610"
assert package.version.text == "1.2.3"
assert package.source_type == "directory"
assert package.source_url == "/path/to/distributions/directory-pep-610"
assert not package.develop
def test_load_pep_610_compliant_editable_directory_packages(
repository: InstalledRepository,
):
package = get_package_from_repository("editable-directory-pep-610", repository)
assert package is not None
assert package.name == "editable-directory-pep-610"
assert package.version.text == "1.2.3"
assert package.source_type == "directory"
assert package.source_url == "/path/to/distributions/directory-pep-610"
assert package.develop
| 35.903766 | 86 | 0.725207 |
4a26f80e01a9b6dc8ff701a28d28cc2eea97b737 | 768 | py | Python | thinc/tests/regression/issue519/test_issue519.py | TheVinhLuong102/thinc | 7b54f728ddec7765a1d8a5e553d4b4b90b9edaec | [
"MIT"
] | 2,542 | 2016-10-20T07:02:59.000Z | 2022-03-30T20:18:35.000Z | thinc/tests/regression/issue519/test_issue519.py | TheVinhLuong102/thinc | 7b54f728ddec7765a1d8a5e553d4b4b90b9edaec | [
"MIT"
] | 453 | 2016-10-19T21:09:35.000Z | 2022-03-31T11:01:15.000Z | thinc/tests/regression/issue519/test_issue519.py | TheVinhLuong102/thinc | 7b54f728ddec7765a1d8a5e553d4b4b90b9edaec | [
"MIT"
] | 265 | 2016-11-14T14:53:58.000Z | 2022-03-31T02:25:24.000Z | import subprocess
try:
import importlib.resources as importlib_resources
except ImportError:
import importlib_resources # type: ignore
import pytest
@pytest.mark.slow
def test_issue519():
"""
Test ability of Thinc mypy plugin to handle variadic arguments.
This test can take up to 45 seconds, and is thus marked as slow.
"""
# Determine the name of the parent module (which contains the test program)
parent_module_name = __name__[:__name__.rfind(".")]
# Load test program that calls a Thinc API with variadic arguments
program_text = importlib_resources.read_text(parent_module_name, "program.py")
# Ask Mypy to type-check the loaded program text
subprocess.run(["mypy", "--command", program_text], check=True)
| 29.538462 | 82 | 0.733073 |
4a26f86bce83518e28ba8ec427802d6ae5252a84 | 1,315 | py | Python | girder_utils/templatetags/girder_utils.py | girder/django-girder-utils | 587919e4c7b48b67624be40df3389d02380c69bb | [
"Apache-2.0"
] | 2 | 2020-10-12T18:46:54.000Z | 2021-01-28T21:02:24.000Z | girder_utils/templatetags/girder_utils.py | girder/django-girder-utils | 587919e4c7b48b67624be40df3389d02380c69bb | [
"Apache-2.0"
] | 12 | 2020-12-01T20:13:31.000Z | 2021-12-19T02:32:05.000Z | girder_utils/templatetags/girder_utils.py | girder/django-girder-utils | 587919e4c7b48b67624be40df3389d02380c69bb | [
"Apache-2.0"
] | 1 | 2021-01-04T22:26:47.000Z | 2021-01-04T22:26:47.000Z | import json
from typing import Any, Mapping, Optional, TypeVar
from django import template
register = template.Library()
_KT = TypeVar('_KT')
_VT_co = TypeVar('_VT_co', covariant=True)
@register.filter
def getitem(value: Mapping[_KT, _VT_co], arg: _KT) -> Optional[_VT_co]:
"""
Retrieve `value[arg]` from a mapping `value`, where `arg` can be a variable.
This will return `None` if `arg` is not found. It may be useful to chain the `default_if_none`
filter after this one.
Sample usage::
{% load girder_utils %}
{% for key in some_keys %}
{{ my_dict|getitem:key }}
{% endfor %}
"""
return value.get(arg, None)
@register.filter
def pretty_json(value: Any, indent: Optional[int] = None) -> str:
"""
Convert `value` to a JSON-formatted string.
Optionally, `indent` can be specified as an positive integer number of spaces to pretty-print
indentation with; `None` (the default) will disable pretty-printing.
The output should typically be embedded within HTML an `<pre>` element.
If `indent` is specified, the output will likely contain newlines, which `<pre>` will render.
Sample usage::
{% load girder_utils %}
<pre>{{ my_object|pretty_json:4 }}</pre>
"""
return json.dumps(value, indent=indent)
| 28.586957 | 98 | 0.66692 |
4a26f8950bd69b6143fa438d7ff6e3326a3a28a8 | 11,438 | py | Python | cogs/extra.py | SilentSerenityy/JDBot | cd404000f06d51757439e435b2aaedbbab233144 | [
"MIT"
] | null | null | null | cogs/extra.py | SilentSerenityy/JDBot | cd404000f06d51757439e435b2aaedbbab233144 | [
"MIT"
] | null | null | null | cogs/extra.py | SilentSerenityy/JDBot | cd404000f06d51757439e435b2aaedbbab233144 | [
"MIT"
] | null | null | null | from discord.ext import commands
import discord, random, asuna_api, math, aiohttp, io, chardet, aiogtts, mystbin, alexflipnote, os
class Extra(commands.Cog):
def __init__(self,client):
self.client = client
@commands.command(brief="a way to look up minecraft usernames",help="using the official minecraft api, looking up minecraft information has never been easier(tis only gives minecraft account history relating to name changes)")
async def mchistory(self,ctx,*,args=None):
if args is None:
await ctx.send("Please pick a minecraft user.")
if args:
asuna = asuna_api.Client(self.client.session)
minecraft_info=await asuna.mc_user(args)
embed=discord.Embed(title=f"Minecraft Username: {args}",color=random.randint(0, 16777215))
embed.set_footer(text=f"Minecraft UUID: {minecraft_info.uuid}")
embed.add_field(name="Orginal Name:",value=minecraft_info.name)
y = 0
for x in minecraft_info.history:
if y > 0:
embed.add_field(name=f"Username:\n{x['name']}",value=f"Date Changed:\n{x['changedToAt']}\n \nTime Changed: \n {x['timeChangedAt']}")
y = y + 1
embed.set_author(name=f"Requested by {ctx.author}",icon_url=(ctx.author.avatar_url))
await ctx.send(embed=embed)
@commands.command(help="This gives random history using Sp46's api.",brief="a command that uses SP46's api's random history command to give you random history responses")
async def random_history(self,ctx,*,args=None):
if args is None:
args = 1
asuna = asuna_api.Client(self.client.session)
response = await asuna.random_history(args)
for x in response:
await ctx.send(f":earth_africa: {x}")
@commands.command(brief="gives you the digits of pi that Python knows")
async def pi(self,ctx):
await ctx.send(math.pi)
@commands.command(brief="reverses text")
async def reverse(self,ctx,*,args=None):
if args:
await ctx.send(args[::-1])
if args is None:
await ctx.send("Try sending actual to reverse")
@commands.command(brief="Oh no Dad Jokes, AHHHHHH!")
async def dadjoke(self,ctx):
response=await self.client.session.get("https://icanhazdadjoke.com/",headers={"Accept": "application/json"})
joke=await response.json()
embed = discord.Embed(title="Random Dad Joke:",color=random.randint(0, 16777215))
embed.set_author(name=f"Dad Joke Requested by {ctx.author}",icon_url=(ctx.author.avatar_url))
embed.add_field(name="Dad Joke:",value=joke["joke"])
embed.set_footer(text=f"View here:\n https://icanhazdadjoke.com/j/{joke['id']}")
await ctx.send(embed=embed)
@commands.command(brief="gets a panel from the xkcd comic",aliases=["astrojoke","astro_joke"])
async def xkcd(self,ctx):
response=await self.client.session.get("https://xkcd.com/info.0.json")
info=await response.json()
num = random.randint(1,info["num"])
comic = await self.client.session.get(f"https://xkcd.com/{num}/info.0.json")
data=await comic.json()
title = data["title"]
embed=discord.Embed(title=f"Title: {title}",color=random.randint(0, 16777215))
embed.set_image(url=data["img"])
embed.set_footer(text=f"Made on {data['month']}/{data['day']}/{data['year']}")
await ctx.send(embed=embed)
@commands.command()
async def http_cat(self,ctx,args=None):
if args is None:
code = "404"
if args:
if args.isdigit():
if int(args) > 99 and int(args) < 600:
code = args
if args.isdigit() is False:
await ctx.send("Not a valid arg using 404")
code = "404"
response = await self.client.session.get(f"https://http.cat/{code}")
if response.status:
image = f"https://http.cat/{code}.jpg"
embed=discord.Embed(title=f"Status Code: {code}",color=random.randint(0, 16777215))
embed.set_author(name=f"Requested by {ctx.author}",icon_url=ctx.author.avatar_url)
embed.set_image(url=image)
embed.set_footer(text="Powered by http.cat")
await ctx.send(embed=embed)
@commands.command(help="Gives advice from JDJG api.",aliases=["ad"])
async def advice(self,ctx):
r=await self.client.session.get('https://jdjgapi.nom.mu/api/advice')
res = await r.json()
embed = discord.Embed(title = "Here is some advice for you!",color=random.randint(0, 16777215))
embed.add_field(name = f"{res['text']}", value = "Hopefully this helped!")
embed.set_footer(text="Powered by JDJG Api!")
try:
await ctx.send(embed=embed)
except:
await ctx.send("was too long...")
@commands.command(help="gives random compliment")
async def compliment(self,ctx):
r=await self.client.session.get('https://jdjgapi.nom.mu/api/compliment')
res = await r.json()
embed = discord.Embed(title = "Here is a compliment:",color=random.randint(0, 16777215))
embed.add_field(name = f"{res['text']}", value = "Hopefully this helped your day!")
embed.set_footer(text="Powered by JDJG Api!")
await ctx.send(embed=embed)
@commands.command(help="gives an insult")
async def insult(self,ctx):
r=await self.client.session.get('https://jdjgapi.nom.mu/api/insult')
res = await r.json()
embed = discord.Embed(title = "Here is a insult:",color=random.randint(0, 16777215))
embed.add_field(name = f"{res['text']}", value = "Hopefully this Helped?")
embed.set_footer(text="Powered by JDJG Api!")
await ctx.send(embed=embed)
@commands.command(help="gives response to slur")
async def noslur(self,ctx):
r=await self.client.session.get('https://jdjgapi.nom.mu/api/noslur')
res = await r.json()
embed = discord.Embed(title = "Don't Swear",color=random.randint(0, 16777215))
embed.add_field(name = f"{res['text']}", value = "WHY MUST YOU SWEAR?")
embed.set_footer(text="Powered by JDJG Api!")
await ctx.send(embed=embed)
@commands.command(help="gives random message",aliases=["rm"])
async def random_message(self,ctx):
r=await self.client.session.get('https://jdjgapi.nom.mu/api/randomMessage')
res = await r.json()
embed = discord.Embed(title = "Random Message:",color=random.randint(0, 16777215))
embed.add_field(name="Here:",value=res["text"])
embed.set_footer(text="Powered by JDJG Api!")
await ctx.send(embed=embed)
async def google_tts(self,ctx,text):
await ctx.send("if you have a lot of text it may take a bit")
mp3_fp = io.BytesIO()
tts=aiogtts.aiogTTS()
await tts.write_to_fp(text,mp3_fp,lang='en')
mp3_fp.seek(0)
file = discord.File(mp3_fp,"tts.mp3")
await ctx.send(file=file)
@commands.command(help="a command to talk to Google TTS",brief="using the power of the GTTS module you can now do tts")
async def tts(self,ctx,*,args=None):
if args:
await self.google_tts(ctx,args)
if ctx.message.attachments:
for x in ctx.message.attachments:
file=await x.read()
if len(file) > 0:
encoding=chardet.detect(file)["encoding"]
if encoding:
text = file.decode(encoding)
await self.google_tts(ctx,text)
if encoding is None:
await ctx.send("it looks like it couldn't decode this file, if this is an issue DM JDJG Inc. Official#3439")
if len(file ) < 1:
await ctx.send("this doesn't contain any bytes.")
if args is None and len(ctx.message.attachments) < 1:
await ctx.send("You didn't specify any value.")
@commands.command(help="learn about a secret custom xbox controller",brief="this will give you a message of JDJG's classic wanted xbox design.")
async def secret_controller(self,ctx):
embed = discord.Embed(color=random.randint(0, 16777215))
embed.set_author(name="Secret Xbox Image:")
embed.add_field(name="Body:",value="Zest Orange")
embed.add_field(name="Back:",value="Zest Orange")
embed.add_field(name="Bumpers:",value="Zest Orange")
embed.add_field(name="Triggers:",value="Zest Orange")
embed.add_field(name="D-pad:",value="Electric Green")
embed.add_field(name="Thumbsticks:",value="Electric Green")
embed.add_field(name="ABXY:",value="Colors on Black")
embed.add_field(name="View & Menu:",value="White on Black")
embed.add_field(name="Engraving(not suggested):",value="JDJG Inc.")
embed.add_field(name="Disclaimer:",value="I do not work at microsoft,or suggest you buy this I just wanted a place to represent a controller that I designed a while back.")
embed.set_image(url="https://i.imgur.com/QCh4M2W.png")
embed.set_footer(text="This is Xbox's custom controller design that I picked for myself.\nXbox is owned by Microsoft. I don't own the image")
await ctx.send(embed=embed)
@commands.command(brief="repeats what you say",help="a command that repeats what you say the orginal message is deleted")
async def say(self,ctx,*,args=None):
if args:
args = discord.utils.escape_mentions(args)
args=discord.utils.escape_markdown(args,as_needed=False,ignore_links=False)
try:
await ctx.message.delete()
except discord.errors.Forbidden:
pass
await ctx.send(args)
if args is None:
await ctx.send("You didn't give us any text to use.")
@commands.command(brief="a command to backup text",help="please don't upload any private files that aren't meant to be seen")
async def text_backup(self,ctx):
if ctx.message.attachments:
for x in ctx.message.attachments:
file=await x.read()
if len(file) > 0:
encoding=chardet.detect(file)["encoding"]
if encoding:
text = file.decode(encoding)
mystbin_client = mystbin.Client(session=self.client.session)
paste = await mystbin_client.post(text)
await ctx.send(content=f"Added text file to mystbin: \n{paste.url}")
if encoding is None:
await ctx.send("it looks like it couldn't decode this file, if this is an issue DM JDJG Inc. Official#3439 or it wasn't a text file.")
if len(file ) < 1:
await ctx.send("this doesn't contain any bytes.")
@commands.group(name="apply",invoke_without_command=True)
async def apply(self,ctx):
await ctx.send("this command is meant to apply")
@apply.command(brief="a command to apply for our Bloopers.",help="a command to apply for our bloopers.")
async def bloopers(self,ctx,*,args=None):
if args is None:
await ctx.send("You didn't give us any info.")
if args:
if isinstance(ctx.message.channel, discord.TextChannel):
await ctx.message.delete()
for x in [708167737381486614,168422909482762240]:
apply_user = self.client.get_user(x)
if (apply_user.dm_channel is None):
await apply_user.create_dm()
embed_message = discord.Embed(title=args,color=random.randint(0, 16777215),timestamp=(ctx.message.created_at))
embed_message.set_author(name=f"Application from {ctx.author}",icon_url=(ctx.author.avatar_url))
embed_message.set_footer(text = f"{ctx.author.id}")
embed_message.set_thumbnail(url="https://i.imgur.com/PfWlEd5.png")
await apply_user.send(embed=embed_message)
@commands.command()
async def caw(self,ctx):
alex_api = alexflipnote.Client(os.environ["alex_apikey"],session=self.client.session)
url=await alex_api.birb()
await ctx.send(url)
def setup(client):
client.add_cog(Extra(client))
| 44.162162 | 228 | 0.678703 |
4a26f9a975958731b04bde1c5702cbe1de1fac78 | 74 | py | Python | Numpy-Arrays.py | H2oPtic/Codecademy-Education | 454ecff36a055fa17b4b338e1c6f1e9b3b94ef66 | [
"MIT"
] | null | null | null | Numpy-Arrays.py | H2oPtic/Codecademy-Education | 454ecff36a055fa17b4b338e1c6f1e9b3b94ef66 | [
"MIT"
] | null | null | null | Numpy-Arrays.py | H2oPtic/Codecademy-Education | 454ecff36a055fa17b4b338e1c6f1e9b3b94ef66 | [
"MIT"
] | null | null | null | import numpy as np
test_1 = np.array([92,94,88,91,87])
print(test_1) | 14.8 | 36 | 0.662162 |
4a26fba4d46ab33071ae8c09e60058986522f1ad | 1,523 | py | Python | pylearn2/scripts/papers/jia_huang_wkshp_11/assemble.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 2,045 | 2015-01-01T14:07:52.000Z | 2022-03-08T08:56:41.000Z | pylearn2/scripts/papers/jia_huang_wkshp_11/assemble.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 305 | 2015-01-02T13:18:24.000Z | 2021-08-20T18:03:28.000Z | pylearn2/scripts/papers/jia_huang_wkshp_11/assemble.py | ikervazquezlopez/Pylearn2 | 2971e8f64374ffde572d4cf967aad5342beaf5e0 | [
"BSD-3-Clause"
] | 976 | 2015-01-01T17:08:51.000Z | 2022-03-25T19:53:17.000Z | from __future__ import print_function
import numpy as np
import os
from theano.compat.six.moves import xrange
#check that the right files are present
names = os.listdir('.')
if 'features.npy' in names:
print("Not doing anything, features.npy already exists.")
quit(0)
chunk_names = [ 'features_A.npy',
'features_B.npy',
'features_C.npy',
'features_D.npy',
'features_E.npy' ]
for name in chunk_names:
assert name in names
for name in chunk_names:
if name.startswith('features') and name.endswith('.npy'):
if name not in chunk_names:
print("I'm not sure what to do with "+name)
print("The existence of this file makes me think extract_features.yaml has changed")
print("I don't want to do something incorrect so I'm going to give up.")
quit(-1)
#Do the conversion
print('loading '+chunk_names[0])
first_chunk = np.load(chunk_names[0])
final_shape = list(first_chunk.shape)
final_shape[0] = 50000
print('making output')
X = np.zeros(final_shape,dtype='float32')
idx = first_chunk.shape[0]
X[0:idx,:] = first_chunk
for i in xrange(1, len(chunk_names)):
arg = chunk_names[i]
print('loading '+arg)
chunk = np.load(arg)
chunk_span = chunk.shape[0]
X[idx:idx+chunk_span,...] = chunk
idx += chunk_span
print("Saving features.npy...")
np.save('features.npy',X)
print("Deleting the chunks...")
for chunk_name in chunk_names:
os.remove(chunk_name)
| 22.397059 | 96 | 0.653972 |
4a26fc58ffddcf5f99e402731e986a9079a16d03 | 170 | py | Python | ciclo1_python/udea/MisionTIC_UdeA_Ciclo1/Material/Semana_7/Semana 7/msc.py | felipeescallon/mision_tic_2022 | 20496fc40b18d2e98114d6362928f34fde41aaae | [
"CC0-1.0"
] | 7 | 2021-07-05T21:25:50.000Z | 2021-11-09T11:09:41.000Z | ciclo1_python/udea/MisionTIC_UdeA_Ciclo1/Material/Semana_7/Semana 7/msc.py | felipeescallon/mision_tic_2022 | 20496fc40b18d2e98114d6362928f34fde41aaae | [
"CC0-1.0"
] | null | null | null | ciclo1_python/udea/MisionTIC_UdeA_Ciclo1/Material/Semana_7/Semana 7/msc.py | felipeescallon/mision_tic_2022 | 20496fc40b18d2e98114d6362928f34fde41aaae | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 29 11:15:12 2021
@author: USUARIO
"""
import os
dta=os.getcwd()
Rutaab=os.dta.realpath(__file__)
print (Rutaab) | 15.454545 | 36 | 0.629412 |
4a26fd5fed52f52f3c649b64e011bf8ef79c968a | 30,313 | py | Python | tests/conftest.py | nismod/onion | 950bb5ee7bff929277133a6e82ce0d8e4ce2542b | [
"MIT"
] | 2 | 2019-02-12T15:33:04.000Z | 2019-02-13T13:51:17.000Z | tests/conftest.py | nismod/onion | 950bb5ee7bff929277133a6e82ce0d8e4ce2542b | [
"MIT"
] | 1 | 2019-02-14T10:24:38.000Z | 2019-02-14T10:24:38.000Z | tests/conftest.py | nismod/onion | 950bb5ee7bff929277133a6e82ce0d8e4ce2542b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dummy conftest.py for onion.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
import pytest
@pytest.fixture()
def geojson_no_substation():
data = \
{
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"type": "railwaystation",
"code": "APF",
"pax": 6562
},
"geometry": {
"type": "Point",
"coordinates": [
-1.24262902639089,
51.6389953266527
]
}
},
{
"type": "Feature",
"properties": {
"type": "railwaystation",
"code": "CHO",
"pax": 271850
},
"geometry": {
"type": "Point",
"coordinates": [
-1.15824245298617,
51.5700313809884
]
}
}
]
}
return data
@pytest.fixture()
def geojson():
data = \
{
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"type": "substation",
"operating_voltage": "400kV",
"name": "MOULSFORD DOWN 400KV SUBSTATION"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.16028744986759,
51.5351050684567
]
}
},
{
"type": "Feature",
"properties": {
"type": "substation",
"operating_voltage": "400kV",
"name": "CULHAM JET 400KV SUBSTATION"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.22835338989177,
51.6611686319984
]
}
},
{
"type": "Feature",
"properties": {
"type": "substation",
"operating_voltage": "400kV",
"name": "DIDCOT 400KV SUBSTATION"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.25934538640586,
51.6255544962863
]
}
},
{
"type": "Feature",
"properties": {
"type": "railwaystation",
"code": "APF",
"pax": 6562
},
"geometry": {
"type": "Point",
"coordinates": [
-1.24262902639089,
51.6389953266527
]
}
},
{
"type": "Feature",
"properties": {
"type": "railwaystation",
"code": "CHO",
"pax": 271850
},
"geometry": {
"type": "Point",
"coordinates": [
-1.15824245298617,
51.5700313809884
]
}
},
{
"type": "Feature",
"properties": {
"type": "railwaystation",
"code": "DID",
"pax": 3185928
},
"geometry": {
"type": "Point",
"coordinates": [
-1.24252767003678,
51.6112380133509
]
}
},
{
"type": "Feature",
"properties": {
"type": "railwaystation",
"code": "RAD",
"pax": 141792
},
"geometry": {
"type": "Point",
"coordinates": [
-1.24027394922877,
51.6856723771796
]
}
},
{
"type": "Feature",
"properties": {
"type": "railwaystation",
"code": "CUM",
"pax": 83908
},
"geometry": {
"type": "Point",
"coordinates": [
-1.23655176310231,
51.6541065120975
]
}
},
{
"type": "Feature",
"properties": {
"name": "Grim's Ditch; section 600yds (550m) long on East Ginge Down",
"type": "monument"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.35711307254206,
51.5661547905162
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A34",
"traffic": 54439,
"length": 4.6
},
"geometry": {
"type": "Point",
"coordinates": [
-1.2892109818117,
51.6837622099323
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A329",
"traffic": 3345,
"length": 7.1
},
"geometry": {
"type": "Point",
"coordinates": [
-1.13467728496191,
51.6556706308269
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4130",
"traffic": 19149,
"length": 3.2
},
"geometry": {
"type": "Point",
"coordinates": [
-1.2806100994504,
51.6167284821114
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4074",
"traffic": 17856,
"length": 4.8
},
"geometry": {
"type": "Point",
"coordinates": [
-1.1207516332245,
51.613307190284
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A34",
"traffic": 48111,
"length": 6.2
},
"geometry": {
"type": "Point",
"coordinates": [
-1.30043835995313,
51.5952687707253
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A415",
"traffic": 14424,
"length": 6.3
},
"geometry": {
"type": "Point",
"coordinates": [
-1.24534907877791,
51.6636087954115
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4074",
"traffic": 14875,
"length": 2.1
},
"geometry": {
"type": "Point",
"coordinates": [
-1.18382921973757,
51.6672212045913
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4130",
"traffic": 10892,
"length": 2.9
},
"geometry": {
"type": "Point",
"coordinates": [
-1.30684035471683,
51.6090636612318
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A415",
"traffic": 17857,
"length": 0.3
},
"geometry": {
"type": "Point",
"coordinates": [
-1.28152109832397,
51.6715411557998
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A415",
"traffic": 27740,
"length": 1.8
},
"geometry": {
"type": "Point",
"coordinates": [
-1.29801515902622,
51.6695683429367
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4074",
"traffic": 17849,
"length": 4.9
},
"geometry": {
"type": "Point",
"coordinates": [
-1.20829873037113,
51.694362780306
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4183",
"traffic": 18705,
"length": 1
},
"geometry": {
"type": "Point",
"coordinates": [
-1.26918111978116,
51.6943165705608
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4130",
"traffic": 9314,
"length": 9.2
},
"geometry": {
"type": "Point",
"coordinates": [
-1.17877314103447,
51.5973312387089
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A329",
"traffic": 6865,
"length": 5.8
},
"geometry": {
"type": "Point",
"coordinates": [
-1.13839914671395,
51.5641327865591
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A415",
"traffic": 7587,
"length": 2.5
},
"geometry": {
"type": "Point",
"coordinates": [
-1.19278305240772,
51.6580758499934
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4185",
"traffic": 10001,
"length": 3.4
},
"geometry": {
"type": "Point",
"coordinates": [
-1.30409210625312,
51.5857325425876
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A417",
"traffic": 11512,
"length": 7.2
},
"geometry": {
"type": "Point",
"coordinates": [
-1.3604697222294,
51.5949546630729
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A34",
"traffic": 58188,
"length": 5.9
},
"geometry": {
"type": "Point",
"coordinates": [
-1.30688917633811,
51.6437933042357
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A415",
"traffic": 12092,
"length": 3.8
},
"geometry": {
"type": "Point",
"coordinates": [
-1.33672034162554,
51.669384089834
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A417",
"traffic": 4443,
"length": 13.5
},
"geometry": {
"type": "Point",
"coordinates": [
-1.23387514581252,
51.568436279247
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4074",
"traffic": 18534,
"length": 4.3
},
"geometry": {
"type": "Point",
"coordinates": [
-1.15692787879399,
51.6445930974072
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A415",
"traffic": 14810,
"length": 0.3
},
"geometry": {
"type": "Point",
"coordinates": [
-1.28324879440082,
51.670189515777
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A34",
"traffic": 45394,
"length": 2.1
},
"geometry": {
"type": "Point",
"coordinates": [
-1.29797963216915,
51.5627678276496
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A417",
"traffic": 4443,
"length": 1.5
},
"geometry": {
"type": "Point",
"coordinates": [
-1.15329139732202,
51.5312868903388
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4185",
"traffic": 10001,
"length": 0.1
},
"geometry": {
"type": "Point",
"coordinates": [
-1.29704629058976,
51.5718436707401
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4183",
"traffic": 10570,
"length": 2.2
},
"geometry": {
"type": "Point",
"coordinates": [
-1.27626539149497,
51.6814758207361
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4130",
"traffic": 10338,
"length": 0.9
},
"geometry": {
"type": "Point",
"coordinates": [
-1.22609756716941,
51.610550961041
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4130",
"traffic": 11629,
"length": 3.3
},
"geometry": {
"type": "Point",
"coordinates": [
-1.24119571523403,
51.6144708576188
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4130",
"traffic": 13359,
"length": 1.3
},
"geometry": {
"type": "Point",
"coordinates": [
-1.12041694600567,
51.5876472976218
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A415",
"traffic": 17932,
"length": 0.4
},
"geometry": {
"type": "Point",
"coordinates": [
-1.28370531434468,
51.671500542698
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A34",
"traffic": 55022,
"length": 11.8
},
"geometry": {
"type": "Point",
"coordinates": [
-1.30423213044639,
51.5061595600564
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4130",
"traffic": 9782,
"length": 8.7
},
"geometry": {
"type": "Point",
"coordinates": [
-1.04654527847476,
51.5861265187696
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4074",
"traffic": 8140,
"length": 14.7
},
"geometry": {
"type": "Point",
"coordinates": [
-1.05465715207522,
51.5360187312564
]
}
},
{
"type": "Feature",
"properties": {
"type": "road",
"roadnumber": "A4074",
"traffic": 18324,
"length": 1.1
},
"geometry": {
"type": "Point",
"coordinates": [
-1.10733820652602,
51.5921342917369
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": ""
},
"geometry": {
"type": "Point",
"coordinates": [
-1.26810891166814,
51.6328804155471
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "National\r\nPower Plc"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.24367383461684,
51.6754029312736
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": ""
},
"geometry": {
"type": "Point",
"coordinates": [
-1.27581375970588,
51.6734608951868
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "ARC Limited"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.31024382134045,
51.6350287103914
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "Thames Water"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.17560451083803,
51.6449123308464
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "M L Parker"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.24796196859382,
51.6859645202037
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "ARC\r\nLimited"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.31547514340399,
51.6337512701145
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "Oxfordshire Highways"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.34630491382614,
51.5983703806159
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "H S Raynor"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.29176826174873,
51.6262153240203
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "ARC Limited"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.26584440907651,
51.6352844903224
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "ARC Limited"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.25278048046993,
51.6302398847646
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "ARC Limited"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.24942904559911,
51.6377549146346
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "Oxfordshire County Council"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.33775987095526,
51.677427069764
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "Tuckwells"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.23307182072792,
51.6820128263116
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "J Curtis and Sons"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.28698949651945,
51.6571554353294
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "J Curtis and Sons"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.29202780839381,
51.6505271122317
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "Thames Water"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.26584145907056,
51.6491343354668
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "ARC Limited"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.16174439429281,
51.657735268529
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "ARC"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.16241131301098,
51.6540996801051
]
}
},
{
"type": "Feature",
"properties": {
"type": "landfill",
"name": "Mr Pratt"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.16627422349311,
51.6503690664972
]
}
},
{
"type": "Feature",
"properties": {
"type": "urbanwastewater",
"name": "ABINGDON (OXON STW"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.29023400482192,
51.6525458905304
]
}
},
{
"type": "Feature",
"properties": {
"type": "urbanwastewater",
"name": "BENSON, HENLEY ROA"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.10591156441916,
51.6096569683235
]
}
},
{
"type": "Feature",
"properties": {
"type": "urbanwastewater",
"name": "CHOLSEY, CHOLSEY,"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.14847942810127,
51.5805710109343
]
}
},
{
"type": "Feature",
"properties": {
"type": "urbanwastewater",
"name": "CULHAM, CULHAM, CL"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.22224255617706,
51.6557120182403
]
}
},
{
"type": "Feature",
"properties": {
"type": "urbanwastewater",
"name": "DIDCOT, FOXHALL RO"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.25117697460096,
51.6192149712736
]
}
},
{
"type": "Feature",
"properties": {
"type": "urbanwastewater",
"name": "DORCHESTER STW (OX"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.16329570438292,
51.6382232186528
]
}
},
{
"type": "Feature",
"properties": {
"type": "urbanwastewater",
"name": "DRAYTON, DRAYTON,"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.30348721780704,
51.6366205266501
]
}
},
{
"type": "Feature",
"properties": {
"type": "urbanwastewater",
"name": "ABINGDON STW"
},
"geometry": {
"type": "Point",
"coordinates": [
-1.29023400482192,
51.6525458905304
]
}
}
]
}
return data
| 26.778269 | 86 | 0.290601 |
4a26fd6ca0439a024a0b752d4cd7ff3946118f0c | 2,121 | py | Python | pymeasure/thread.py | NeoBeats/pymeasure | e48f9d679d6ee970e2e875d2fc9a5679378b07aa | [
"MIT"
] | 2 | 2020-11-24T11:08:21.000Z | 2021-01-23T01:46:05.000Z | pymeasure/thread.py | NeoBeats/pymeasure | e48f9d679d6ee970e2e875d2fc9a5679378b07aa | [
"MIT"
] | 1 | 2021-01-07T19:22:49.000Z | 2021-01-07T19:22:49.000Z | pymeasure/thread.py | NeoBeats/pymeasure | e48f9d679d6ee970e2e875d2fc9a5679378b07aa | [
"MIT"
] | 1 | 2021-05-19T05:39:28.000Z | 2021-05-19T05:39:28.000Z | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2020 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from threading import Thread, Event
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class StoppableThread(Thread):
""" Base class for Threads which require the ability
to be stopped by a thread-safe method call
"""
def __init__(self):
super().__init__()
self._should_stop = Event()
self._should_stop.clear()
def join(self, timeout=0):
""" Joins the current thread and forces it to stop after
the timeout if necessary
:param timeout: Timeout duration in seconds
"""
self._should_stop.wait(timeout)
if not self.should_stop():
self.stop()
return super().join(0)
def stop(self):
self._should_stop.set()
def should_stop(self):
return self._should_stop.is_set()
def __repr__(self):
return "<%s(should_stop=%s)>" % (
self.__class__.__name__, self.should_stop())
| 33.666667 | 79 | 0.710042 |
4a26fd8110674d991d74099660c3019f71bda1ae | 849 | py | Python | PycharmProjects/PythonExercíciosHARDMODE/ex033HM.py | RodrigoMASRamos/Projects.py | ed15981b320914c9667305dcd5fb5b7906fd9b00 | [
"MIT"
] | null | null | null | PycharmProjects/PythonExercíciosHARDMODE/ex033HM.py | RodrigoMASRamos/Projects.py | ed15981b320914c9667305dcd5fb5b7906fd9b00 | [
"MIT"
] | null | null | null | PycharmProjects/PythonExercíciosHARDMODE/ex033HM.py | RodrigoMASRamos/Projects.py | ed15981b320914c9667305dcd5fb5b7906fd9b00 | [
"MIT"
] | null | null | null | # ex033: Faça um programa que leia três números e mostre qual é o maior e qual é o menor.
print('\033[1;30mEste programa irá ler \033[1;35mtrês números \033[1;30me indicar qual é o \033[1;32mmaior '
'\033[1;30me o \033[1;31mmenor.\033[m')
n1_ = input('\033[0;30mDigite o primeiro número: \033[m').strip()
n2_ = input('\033[0;30mDigite o segundo número: \033[m').strip()
n3_ = input('\033[0;30mAgora, digite o terceiro número: \033[m').strip()
n1 = float(n1_)
n2 = float(n2_)
n3 = float(n3_)
maior = n1
menor = n1
if n2 > n1 and n2 > n3:
maior = n2
if n3 > n1 and n3 > n2:
maior = n3
if n2 < n1 and n2 < n3:
menor = n2
if n3 < n1 and n3 < n1:
menor = n3
print(f'\033[0;30mO \033[1;32mmaior \033[0;30mnúmero é \033[1;32m{maior}\033[0;30m, e o \033[1;31mmenor '
f'\033[0;30mnúmero é \033[1;31m{menor}\033[0;30m.\033[m')
| 32.653846 | 108 | 0.644287 |
4a26fdb73e1fad0f5ad9cd39da6e1342ccf6ffac | 1,118 | py | Python | leetcode/linked-list/reverse-linked-list.py | jaimeulloa61/data-structure-and-algorithms | 76140bb36b62ebc7c60914c48a323aae4956fb0a | [
"MIT"
] | 81 | 2020-05-22T14:22:04.000Z | 2021-12-18T10:11:23.000Z | leetcode/linked-list/reverse-linked-list.py | jaimeulloa61/data-structure-and-algorithms | 76140bb36b62ebc7c60914c48a323aae4956fb0a | [
"MIT"
] | 4 | 2020-08-06T21:08:00.000Z | 2021-03-31T16:07:50.000Z | leetcode/linked-list/reverse-linked-list.py | jaimeulloa61/data-structure-and-algorithms | 76140bb36b62ebc7c60914c48a323aae4956fb0a | [
"MIT"
] | 37 | 2020-05-22T14:25:21.000Z | 2021-12-30T03:13:13.000Z | """
## Questions
### 206. [Reverse Linked List](https://leetcode.com/problems/reverse-linked-list/)
Reverse a singly linked list.
Example:
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
Follow up:
A linked list can be reversed either iteratively or recursively. Could you implement both?
"""
# Solutions
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
"""
Time Complexity: O( n )
Space Complexity: O( 1 )
"""
def reverseList(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
# Pointer to track the reversed list
prev = None
while head:
# Storing next value
temp = head.next
# Reverse
head.next = prev
prev = head
# Increment head
head = temp
return prev
# Runtime: 28 ms, faster than 97.79% of Python3 online submissions
# Memory Usage: 15.1 MB, less than 93.46% of Python3 online submissions
| 21.09434 | 90 | 0.595707 |
4a26fe30fe72882a815cb3c16026f443926e3ea3 | 3,283 | py | Python | large_dat.py | ck44liu/ICA-blind-source-separation | ad42c6cf00bb3686aa7ea4550f8222f4a9890415 | [
"MIT"
] | null | null | null | large_dat.py | ck44liu/ICA-blind-source-separation | ad42c6cf00bb3686aa7ea4550f8222f4a9890415 | [
"MIT"
] | null | null | null | large_dat.py | ck44liu/ICA-blind-source-separation | ad42c6cf00bb3686aa7ea4550f8222f4a9890415 | [
"MIT"
] | null | null | null | from scipy.io import loadmat # import loadmat to read .mat file
# from scipy.io import wavfile # import wavfile if we want to transform to sound files
import numpy as np
from numpy import array, matmul, identity, longdouble
from matplotlib import pyplot as plt
# use loadmat to read sounds.mat
U = loadmat('sounds.mat').get('sounds')
U = array(U, dtype=longdouble)
(n, t) = U.shape
print('U shape: {}'.format((n,t))) # should be n=5 and t=44000
# set m, the number of mixed signals we want to have
m = 5
# set a random seed and generate our mixing matrix A
np.random.seed(10)
A = np.random.uniform(-5, 5, (m, n))
print('A:\n', A)
# mix the signals
X = matmul(A, U)
# initialize matrix W with small random values
W = np.random.uniform(0, 1e-3, (n, m))
print('initial W:\n', W)
# update algorithm
num = 100
eta = 1e-5
for k in range(num):
Y = matmul(W, X)
# print("Y shape:", Y.shape)
Z = np.zeros((n, t), dtype=longdouble)
# print("Z.shape:", Z.shape)
# compute Z[i][j], note that if Y[i][j] is too large or too small,
# the exponent will be 0 or very large; to make the computation faster,
# we directly set Z[i][j] to 1 or 0 in such case.
for i in range(n):
for j in range(t):
if Y[i][j] > 20: # exp will be close to 0
Z[i][j] = 1
elif Y[i][j] < -20: # exp will be very large
Z[i][j] = 0
else:
Z[i][j] = 1 / (1 + np.exp(-Y[i][j]))
Mat = t * identity(n) + matmul(1 - 2 * Z, Y.T)
delta_W = eta * matmul(Mat, W)
W = W + delta_W
# since the algorithm takes longer time to run than the small dataset one,
# we print the W after each 20 iterations to check the progress
if k % 20 == 0:
print('W after iteration', k, ':\n', W)
print('W after', num, 'iterations:\n', W)
# compute our estimation of U
U_hat = matmul(W, X)
# create sig and sig_hat to store original and recovered signals
sig = np.zeros((n,t))
sig_hat = np.zeros((n,t))
for i in range(n):
sig[i,:] = U[i, :]
sig_hat[i,:] = U_hat[i, :]
# rescale the recovered sig_hat to [0,1]
# sig_hat[i,:] = (sig_hat[i,:]-min(sig_hat[i,:]))/(max(sig_hat[i,:])-min(sig_hat[i,:]))
# compute and print correlation matrix
r = np.corrcoef(sig, sig_hat)
# there are 10*10 entries in correlation matrix, we only print the 5*5 part
# that shows the correlation between original and recovered signals
print('Correlation matrix:\n', r[0:n, n:2*n])
# plot signals
x = np.arange(t)
plt.figure()
for i in range(n): # original signals
plt.subplot(n,1,i+1)
plt.plot(x, sig[i])
plt.figure()
for i in range(m): # mixed signals
plt.subplot(m,1,i+1)
plt.plot(x, X[i])
plt.figure()
for i in range(n): # recovered signals
plt.subplot(n,1,i+1)
plt.plot(x, sig_hat[i])
plt.show()
# # use wavfile to generate the .wav sound files, scaling might be needed to
# # ensure the audibility of the sound files
# wavfile.write('original_sounds_1.wav', 11000, np.int16(sig[0,:]*40000))
# wavfile.write('mixed_sounds_1.wav', 11000, np.int16(mix[0,:]*40000))
# wavfile.write('recovered_sounds_1.wav', 11000, np.int16(sig_hat[0,:]*40000)) | 32.186275 | 92 | 0.607981 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.