content
stringlengths 5
1.05M
|
---|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training script for UNet-3D."""
from __future__ import absolute_import
from __future__ import division
#Standard imports
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow as tf
import input_reader
import params_dict
import tpu_executor
import unet_config
import unet_model
tpu_executor.define_tpu_flags()
flags.DEFINE_string(
'mode', 'train', 'Mode to run: train or eval or train_and_eval '
'(default: train)')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string('training_file_pattern', None,
'Location of the train data.')
flags.DEFINE_string('eval_file_pattern', None, 'Location of ther eval data')
flags.DEFINE_string('config_file', '', 'a YAML file which specifies overrides.')
flags.DEFINE_string('params_override', '',
'A JSON-style string that specifies overrides.')
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def run_executer(params,
train_input_shapes=None, eval_input_shapes=None,
train_input_fn=None, eval_input_fn=None):
"""Runs Mask RCNN model on distribution strategy defined by the user."""
executer = tpu_executor.TPUEstimatorExecuter(
unet_model.unet_model_fn, params,
train_input_shapes=train_input_shapes,
eval_input_shapes=eval_input_shapes)
if FLAGS.mode == 'train':
assert train_input_fn is not None
results = executer.train(train_input_fn)
elif FLAGS.mode == 'eval':
assert eval_input_fn is not None
results = executer.evaluate(eval_input_fn)
elif FLAGS.mode == 'train_and_eval':
assert train_input_fn is not None
assert eval_input_fn is not None
results = executer.train_and_eval(train_input_fn, eval_input_fn)
else:
raise ValueError('Mode must be one of `train`, `eval`, or `train_and_eval`')
return results
def main(argv):
del argv # Unused.
params = params_dict.ParamsDict(unet_config.UNET_CONFIG,
unet_config.UNET_RESTRICTIONS)
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=False)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=False)
params.override(
{
'training_file_pattern': FLAGS.training_file_pattern,
'eval_file_pattern': FLAGS.eval_file_pattern,
'model_dir': FLAGS.model_dir,
'min_eval_interval': FLAGS.min_eval_interval,
'eval_timeout': FLAGS.eval_timeout,
'tpu_config': tpu_executor.get_tpu_flags()
},
is_strict=False)
params.validate()
params.lock()
train_input_fn = None
eval_input_fn = None
train_input_shapes = None
eval_input_shapes = None
if FLAGS.mode in ('train', 'train_and_eval'):
train_input_fn = input_reader.LiverInputFn(
params.training_file_pattern, params, mode=tf.estimator.ModeKeys.TRAIN)
train_input_shapes = train_input_fn.get_input_shapes(params)
if FLAGS.mode in ('eval', 'train_and_eval'):
eval_input_fn = input_reader.LiverInputFn(
params.eval_file_pattern, params, mode=tf.estimator.ModeKeys.EVAL)
eval_input_shapes = eval_input_fn.get_input_shapes(params)
assert train_input_shapes is not None or eval_input_shapes is not None
run_executer(params,
train_input_shapes=train_input_shapes,
eval_input_shapes=eval_input_shapes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn)
if __name__ == '__main__':
app.run(main)
|
"""
syntax_abbrev.py - Abbreviations for pretty-printing syntax.asdl.
"""
from _devbuild.gen.id_kind_asdl import Id
from asdl import runtime
def _AbbreviateToken(tok, out):
# type: (token, List[runtime._PrettyBase]) -> None
if tok.id != Id.Lit_Chars:
n1 = runtime.PrettyLeaf(tok.id.name, runtime.Color_OtherConst)
out.append(n1)
n2 = runtime.PrettyLeaf(tok.val, runtime.Color_StringConst)
out.append(n2)
def _token(obj):
# type: (token) -> PrettyNode
p_node = runtime.PrettyNode()
p_node.abbrev = True
p_node.node_type = '' # don't show
p_node.left = '<'
p_node.right = '>'
_AbbreviateToken(obj, p_node.unnamed_fields)
return p_node
def _speck(obj):
# type: (speck) -> PrettyNode
"""Always abbreviate a speck as the Id."""
p_node = runtime.PrettyNode()
p_node.abbrev = True
p_node.node_type = '' # don't show
n1 = runtime.PrettyLeaf(obj.id.name, runtime.Color_OtherConst)
p_node.unnamed_fields.append(n1)
return p_node
def _double_quoted(obj):
# type: (double_quoted) -> PrettyNode
if obj.left.id != Id.Left_DoubleQuote:
return None # Fall back on obj._AbbreviatedTree()
p_node = runtime.PrettyNode()
p_node.abbrev = True
p_node.node_type = 'DQ'
for part in obj.parts:
p_node.unnamed_fields.append(part.AbbreviatedTree())
return p_node
def _single_quoted(obj):
# type: (single_quoted) -> PrettyNode
# Only abbreviate 'foo', not $'foo\n'
if obj.left.id != Id.Left_SingleQuoteRaw:
return None # Fall back on obj._AbbreviatedTree()
p_node = runtime.PrettyNode()
p_node.abbrev = True
p_node.node_type = 'SQ'
for token in obj.tokens:
p_node.unnamed_fields.append(token.AbbreviatedTree())
return p_node
def _simple_var_sub(obj):
# type: (simple_var_sub) -> PrettyNode
p_node = runtime.PrettyNode()
p_node.abbrev = True
p_node.node_type = '$'
_AbbreviateToken(obj.token, p_node.unnamed_fields)
return p_node
def _braced_var_sub(obj):
# type: (braced_var_sub) -> PrettyNode
p_node = runtime.PrettyNode()
if obj.prefix_op or obj.bracket_op or obj.suffix_op:
return None # we have other fields to display; don't abbreviate
p_node.abbrev = True
p_node.node_type = '${'
_AbbreviateToken(obj.token, p_node.unnamed_fields)
return p_node
def _word_part__Literal(obj):
# type: (word_part__Literal) -> PrettyNode
p_node = runtime.PrettyNode()
p_node.abbrev = True
p_node.node_type = '' # don't show
_AbbreviateToken(obj.token, p_node.unnamed_fields)
return p_node
def _word__Compound(obj):
# type: (word__Compound) -> PrettyNode
p_node = runtime.PrettyNode()
p_node.abbrev = True
p_node.node_type = '' # don't show
p_node.left = '{'
p_node.right = '}'
for part in obj.parts:
p_node.unnamed_fields.append(part.AbbreviatedTree())
return p_node
def _command__Simple(obj):
# type: (command__Simple) -> PrettyNode
p_node = runtime.PrettyNode()
if obj.redirects or obj.more_env or obj.block:
return None # we have other fields to display; don't abbreviate
p_node.abbrev = True
p_node.node_type = 'C'
for w in obj.words:
p_node.unnamed_fields.append(w.AbbreviatedTree())
return p_node
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/datastore/v1beta3/datastore.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from gcloud.datastore._generated import entity_pb2 as google_dot_datastore_dot_v1beta3_dot_entity__pb2
from gcloud.datastore._generated import query_pb2 as google_dot_datastore_dot_v1beta3_dot_query__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/datastore/v1beta3/datastore.proto',
package='google.datastore.v1beta3',
syntax='proto3',
serialized_pb=b'\n(google/datastore/v1beta3/datastore.proto\x12\x18google.datastore.v1beta3\x1a\x1cgoogle/api/annotations.proto\x1a%google/datastore/v1beta3/entity.proto\x1a$google/datastore/v1beta3/query.proto\"\x8d\x01\n\rLookupRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12;\n\x0cread_options\x18\x01 \x01(\x0b\x32%.google.datastore.v1beta3.ReadOptions\x12+\n\x04keys\x18\x03 \x03(\x0b\x32\x1d.google.datastore.v1beta3.Key\"\xb1\x01\n\x0eLookupResponse\x12\x35\n\x05\x66ound\x18\x01 \x03(\x0b\x32&.google.datastore.v1beta3.EntityResult\x12\x37\n\x07missing\x18\x02 \x03(\x0b\x32&.google.datastore.v1beta3.EntityResult\x12/\n\x08\x64\x65\x66\x65rred\x18\x03 \x03(\x0b\x32\x1d.google.datastore.v1beta3.Key\"\x98\x02\n\x0fRunQueryRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12;\n\x0cpartition_id\x18\x02 \x01(\x0b\x32%.google.datastore.v1beta3.PartitionId\x12;\n\x0cread_options\x18\x01 \x01(\x0b\x32%.google.datastore.v1beta3.ReadOptions\x12\x30\n\x05query\x18\x03 \x01(\x0b\x32\x1f.google.datastore.v1beta3.QueryH\x00\x12\x37\n\tgql_query\x18\x07 \x01(\x0b\x32\".google.datastore.v1beta3.GqlQueryH\x00\x42\x0c\n\nquery_type\"}\n\x10RunQueryResponse\x12\x39\n\x05\x62\x61tch\x18\x01 \x01(\x0b\x32*.google.datastore.v1beta3.QueryResultBatch\x12.\n\x05query\x18\x02 \x01(\x0b\x32\x1f.google.datastore.v1beta3.Query\"-\n\x17\x42\x65ginTransactionRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\"/\n\x18\x42\x65ginTransactionResponse\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\":\n\x0fRollbackRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\"\x12\n\x10RollbackResponse\"\x8d\x02\n\rCommitRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12:\n\x04mode\x18\x05 \x01(\x0e\x32,.google.datastore.v1beta3.CommitRequest.Mode\x12\x15\n\x0btransaction\x18\x01 \x01(\x0cH\x00\x12\x35\n\tmutations\x18\x06 \x03(\x0b\x32\".google.datastore.v1beta3.Mutation\"F\n\x04Mode\x12\x14\n\x10MODE_UNSPECIFIED\x10\x00\x12\x11\n\rTRANSACTIONAL\x10\x01\x12\x15\n\x11NON_TRANSACTIONAL\x10\x02\x42\x16\n\x14transaction_selector\"k\n\x0e\x43ommitResponse\x12\x42\n\x10mutation_results\x18\x03 \x03(\x0b\x32(.google.datastore.v1beta3.MutationResult\x12\x15\n\rindex_updates\x18\x04 \x01(\x05\"U\n\x12\x41llocateIdsRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12+\n\x04keys\x18\x01 \x03(\x0b\x32\x1d.google.datastore.v1beta3.Key\"B\n\x13\x41llocateIdsResponse\x12+\n\x04keys\x18\x01 \x03(\x0b\x32\x1d.google.datastore.v1beta3.Key\"\xe4\x01\n\x08Mutation\x12\x32\n\x06insert\x18\x04 \x01(\x0b\x32 .google.datastore.v1beta3.EntityH\x00\x12\x32\n\x06update\x18\x05 \x01(\x0b\x32 .google.datastore.v1beta3.EntityH\x00\x12\x32\n\x06upsert\x18\x06 \x01(\x0b\x32 .google.datastore.v1beta3.EntityH\x00\x12/\n\x06\x64\x65lete\x18\x07 \x01(\x0b\x32\x1d.google.datastore.v1beta3.KeyH\x00\x42\x0b\n\toperation\"<\n\x0eMutationResult\x12*\n\x03key\x18\x03 \x01(\x0b\x32\x1d.google.datastore.v1beta3.Key\"\xda\x01\n\x0bReadOptions\x12Q\n\x10read_consistency\x18\x01 \x01(\x0e\x32\x35.google.datastore.v1beta3.ReadOptions.ReadConsistencyH\x00\x12\x15\n\x0btransaction\x18\x02 \x01(\x0cH\x00\"M\n\x0fReadConsistency\x12 \n\x1cREAD_CONSISTENCY_UNSPECIFIED\x10\x00\x12\n\n\x06STRONG\x10\x01\x12\x0c\n\x08\x45VENTUAL\x10\x02\x42\x12\n\x10\x63onsistency_type2\xb7\x07\n\tDatastore\x12\x8d\x01\n\x06Lookup\x12\'.google.datastore.v1beta3.LookupRequest\x1a(.google.datastore.v1beta3.LookupResponse\"0\x82\xd3\xe4\x93\x02*\"%/v1beta3/projects/{project_id}:lookup:\x01*\x12\x95\x01\n\x08RunQuery\x12).google.datastore.v1beta3.RunQueryRequest\x1a*.google.datastore.v1beta3.RunQueryResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1beta3/projects/{project_id}:runQuery:\x01*\x12\xb5\x01\n\x10\x42\x65ginTransaction\x12\x31.google.datastore.v1beta3.BeginTransactionRequest\x1a\x32.google.datastore.v1beta3.BeginTransactionResponse\":\x82\xd3\xe4\x93\x02\x34\"//v1beta3/projects/{project_id}:beginTransaction:\x01*\x12\x8d\x01\n\x06\x43ommit\x12\'.google.datastore.v1beta3.CommitRequest\x1a(.google.datastore.v1beta3.CommitResponse\"0\x82\xd3\xe4\x93\x02*\"%/v1beta3/projects/{project_id}:commit:\x01*\x12\x95\x01\n\x08Rollback\x12).google.datastore.v1beta3.RollbackRequest\x1a*.google.datastore.v1beta3.RollbackResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1beta3/projects/{project_id}:rollback:\x01*\x12\xa1\x01\n\x0b\x41llocateIds\x12,.google.datastore.v1beta3.AllocateIdsRequest\x1a-.google.datastore.v1beta3.AllocateIdsResponse\"5\x82\xd3\xe4\x93\x02/\"*/v1beta3/projects/{project_id}:allocateIds:\x01*B0\n\x1c\x63om.google.datastore.v1beta3B\x0e\x44\x61tastoreProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_datastore_dot_v1beta3_dot_entity__pb2.DESCRIPTOR,google_dot_datastore_dot_v1beta3_dot_query__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_COMMITREQUEST_MODE = _descriptor.EnumDescriptor(
name='Mode',
full_name='google.datastore.v1beta3.CommitRequest.Mode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MODE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRANSACTIONAL', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NON_TRANSACTIONAL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1263,
serialized_end=1333,
)
_sym_db.RegisterEnumDescriptor(_COMMITREQUEST_MODE)
_READOPTIONS_READCONSISTENCY = _descriptor.EnumDescriptor(
name='ReadConsistency',
full_name='google.datastore.v1beta3.ReadOptions.ReadConsistency',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='READ_CONSISTENCY_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRONG', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVENTUAL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2038,
serialized_end=2115,
)
_sym_db.RegisterEnumDescriptor(_READOPTIONS_READCONSISTENCY)
_LOOKUPREQUEST = _descriptor.Descriptor(
name='LookupRequest',
full_name='google.datastore.v1beta3.LookupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1beta3.LookupRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='read_options', full_name='google.datastore.v1beta3.LookupRequest.read_options', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keys', full_name='google.datastore.v1beta3.LookupRequest.keys', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=178,
serialized_end=319,
)
_LOOKUPRESPONSE = _descriptor.Descriptor(
name='LookupResponse',
full_name='google.datastore.v1beta3.LookupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='found', full_name='google.datastore.v1beta3.LookupResponse.found', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='missing', full_name='google.datastore.v1beta3.LookupResponse.missing', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deferred', full_name='google.datastore.v1beta3.LookupResponse.deferred', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=322,
serialized_end=499,
)
_RUNQUERYREQUEST = _descriptor.Descriptor(
name='RunQueryRequest',
full_name='google.datastore.v1beta3.RunQueryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1beta3.RunQueryRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='partition_id', full_name='google.datastore.v1beta3.RunQueryRequest.partition_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='read_options', full_name='google.datastore.v1beta3.RunQueryRequest.read_options', index=2,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='google.datastore.v1beta3.RunQueryRequest.query', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gql_query', full_name='google.datastore.v1beta3.RunQueryRequest.gql_query', index=4,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='query_type', full_name='google.datastore.v1beta3.RunQueryRequest.query_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=502,
serialized_end=782,
)
_RUNQUERYRESPONSE = _descriptor.Descriptor(
name='RunQueryResponse',
full_name='google.datastore.v1beta3.RunQueryResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch', full_name='google.datastore.v1beta3.RunQueryResponse.batch', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='google.datastore.v1beta3.RunQueryResponse.query', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=784,
serialized_end=909,
)
_BEGINTRANSACTIONREQUEST = _descriptor.Descriptor(
name='BeginTransactionRequest',
full_name='google.datastore.v1beta3.BeginTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1beta3.BeginTransactionRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=911,
serialized_end=956,
)
_BEGINTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='BeginTransactionResponse',
full_name='google.datastore.v1beta3.BeginTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1beta3.BeginTransactionResponse.transaction', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=958,
serialized_end=1005,
)
_ROLLBACKREQUEST = _descriptor.Descriptor(
name='RollbackRequest',
full_name='google.datastore.v1beta3.RollbackRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1beta3.RollbackRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1beta3.RollbackRequest.transaction', index=1,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1007,
serialized_end=1065,
)
_ROLLBACKRESPONSE = _descriptor.Descriptor(
name='RollbackResponse',
full_name='google.datastore.v1beta3.RollbackResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1067,
serialized_end=1085,
)
_COMMITREQUEST = _descriptor.Descriptor(
name='CommitRequest',
full_name='google.datastore.v1beta3.CommitRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1beta3.CommitRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mode', full_name='google.datastore.v1beta3.CommitRequest.mode', index=1,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1beta3.CommitRequest.transaction', index=2,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mutations', full_name='google.datastore.v1beta3.CommitRequest.mutations', index=3,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_COMMITREQUEST_MODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='transaction_selector', full_name='google.datastore.v1beta3.CommitRequest.transaction_selector',
index=0, containing_type=None, fields=[]),
],
serialized_start=1088,
serialized_end=1357,
)
_COMMITRESPONSE = _descriptor.Descriptor(
name='CommitResponse',
full_name='google.datastore.v1beta3.CommitResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mutation_results', full_name='google.datastore.v1beta3.CommitResponse.mutation_results', index=0,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='index_updates', full_name='google.datastore.v1beta3.CommitResponse.index_updates', index=1,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1359,
serialized_end=1466,
)
_ALLOCATEIDSREQUEST = _descriptor.Descriptor(
name='AllocateIdsRequest',
full_name='google.datastore.v1beta3.AllocateIdsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1beta3.AllocateIdsRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keys', full_name='google.datastore.v1beta3.AllocateIdsRequest.keys', index=1,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1468,
serialized_end=1553,
)
_ALLOCATEIDSRESPONSE = _descriptor.Descriptor(
name='AllocateIdsResponse',
full_name='google.datastore.v1beta3.AllocateIdsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='keys', full_name='google.datastore.v1beta3.AllocateIdsResponse.keys', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1555,
serialized_end=1621,
)
_MUTATION = _descriptor.Descriptor(
name='Mutation',
full_name='google.datastore.v1beta3.Mutation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='insert', full_name='google.datastore.v1beta3.Mutation.insert', index=0,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='update', full_name='google.datastore.v1beta3.Mutation.update', index=1,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='upsert', full_name='google.datastore.v1beta3.Mutation.upsert', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='delete', full_name='google.datastore.v1beta3.Mutation.delete', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.datastore.v1beta3.Mutation.operation',
index=0, containing_type=None, fields=[]),
],
serialized_start=1624,
serialized_end=1852,
)
_MUTATIONRESULT = _descriptor.Descriptor(
name='MutationResult',
full_name='google.datastore.v1beta3.MutationResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.datastore.v1beta3.MutationResult.key', index=0,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1854,
serialized_end=1914,
)
_READOPTIONS = _descriptor.Descriptor(
name='ReadOptions',
full_name='google.datastore.v1beta3.ReadOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='read_consistency', full_name='google.datastore.v1beta3.ReadOptions.read_consistency', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1beta3.ReadOptions.transaction', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_READOPTIONS_READCONSISTENCY,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='consistency_type', full_name='google.datastore.v1beta3.ReadOptions.consistency_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=1917,
serialized_end=2135,
)
_LOOKUPREQUEST.fields_by_name['read_options'].message_type = _READOPTIONS
_LOOKUPREQUEST.fields_by_name['keys'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY
_LOOKUPRESPONSE.fields_by_name['found'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._ENTITYRESULT
_LOOKUPRESPONSE.fields_by_name['missing'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._ENTITYRESULT
_LOOKUPRESPONSE.fields_by_name['deferred'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY
_RUNQUERYREQUEST.fields_by_name['partition_id'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._PARTITIONID
_RUNQUERYREQUEST.fields_by_name['read_options'].message_type = _READOPTIONS
_RUNQUERYREQUEST.fields_by_name['query'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._QUERY
_RUNQUERYREQUEST.fields_by_name['gql_query'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._GQLQUERY
_RUNQUERYREQUEST.oneofs_by_name['query_type'].fields.append(
_RUNQUERYREQUEST.fields_by_name['query'])
_RUNQUERYREQUEST.fields_by_name['query'].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name['query_type']
_RUNQUERYREQUEST.oneofs_by_name['query_type'].fields.append(
_RUNQUERYREQUEST.fields_by_name['gql_query'])
_RUNQUERYREQUEST.fields_by_name['gql_query'].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name['query_type']
_RUNQUERYRESPONSE.fields_by_name['batch'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._QUERYRESULTBATCH
_RUNQUERYRESPONSE.fields_by_name['query'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._QUERY
_COMMITREQUEST.fields_by_name['mode'].enum_type = _COMMITREQUEST_MODE
_COMMITREQUEST.fields_by_name['mutations'].message_type = _MUTATION
_COMMITREQUEST_MODE.containing_type = _COMMITREQUEST
_COMMITREQUEST.oneofs_by_name['transaction_selector'].fields.append(
_COMMITREQUEST.fields_by_name['transaction'])
_COMMITREQUEST.fields_by_name['transaction'].containing_oneof = _COMMITREQUEST.oneofs_by_name['transaction_selector']
_COMMITRESPONSE.fields_by_name['mutation_results'].message_type = _MUTATIONRESULT
_ALLOCATEIDSREQUEST.fields_by_name['keys'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY
_ALLOCATEIDSRESPONSE.fields_by_name['keys'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY
_MUTATION.fields_by_name['insert'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._ENTITY
_MUTATION.fields_by_name['update'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._ENTITY
_MUTATION.fields_by_name['upsert'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._ENTITY
_MUTATION.fields_by_name['delete'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['insert'])
_MUTATION.fields_by_name['insert'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['update'])
_MUTATION.fields_by_name['update'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['upsert'])
_MUTATION.fields_by_name['upsert'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['delete'])
_MUTATION.fields_by_name['delete'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATIONRESULT.fields_by_name['key'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY
_READOPTIONS.fields_by_name['read_consistency'].enum_type = _READOPTIONS_READCONSISTENCY
_READOPTIONS_READCONSISTENCY.containing_type = _READOPTIONS
_READOPTIONS.oneofs_by_name['consistency_type'].fields.append(
_READOPTIONS.fields_by_name['read_consistency'])
_READOPTIONS.fields_by_name['read_consistency'].containing_oneof = _READOPTIONS.oneofs_by_name['consistency_type']
_READOPTIONS.oneofs_by_name['consistency_type'].fields.append(
_READOPTIONS.fields_by_name['transaction'])
_READOPTIONS.fields_by_name['transaction'].containing_oneof = _READOPTIONS.oneofs_by_name['consistency_type']
DESCRIPTOR.message_types_by_name['LookupRequest'] = _LOOKUPREQUEST
DESCRIPTOR.message_types_by_name['LookupResponse'] = _LOOKUPRESPONSE
DESCRIPTOR.message_types_by_name['RunQueryRequest'] = _RUNQUERYREQUEST
DESCRIPTOR.message_types_by_name['RunQueryResponse'] = _RUNQUERYRESPONSE
DESCRIPTOR.message_types_by_name['BeginTransactionRequest'] = _BEGINTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['BeginTransactionResponse'] = _BEGINTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['RollbackRequest'] = _ROLLBACKREQUEST
DESCRIPTOR.message_types_by_name['RollbackResponse'] = _ROLLBACKRESPONSE
DESCRIPTOR.message_types_by_name['CommitRequest'] = _COMMITREQUEST
DESCRIPTOR.message_types_by_name['CommitResponse'] = _COMMITRESPONSE
DESCRIPTOR.message_types_by_name['AllocateIdsRequest'] = _ALLOCATEIDSREQUEST
DESCRIPTOR.message_types_by_name['AllocateIdsResponse'] = _ALLOCATEIDSRESPONSE
DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION
DESCRIPTOR.message_types_by_name['MutationResult'] = _MUTATIONRESULT
DESCRIPTOR.message_types_by_name['ReadOptions'] = _READOPTIONS
LookupRequest = _reflection.GeneratedProtocolMessageType('LookupRequest', (_message.Message,), dict(
DESCRIPTOR = _LOOKUPREQUEST,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.LookupRequest)
))
_sym_db.RegisterMessage(LookupRequest)
LookupResponse = _reflection.GeneratedProtocolMessageType('LookupResponse', (_message.Message,), dict(
DESCRIPTOR = _LOOKUPRESPONSE,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.LookupResponse)
))
_sym_db.RegisterMessage(LookupResponse)
RunQueryRequest = _reflection.GeneratedProtocolMessageType('RunQueryRequest', (_message.Message,), dict(
DESCRIPTOR = _RUNQUERYREQUEST,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.RunQueryRequest)
))
_sym_db.RegisterMessage(RunQueryRequest)
RunQueryResponse = _reflection.GeneratedProtocolMessageType('RunQueryResponse', (_message.Message,), dict(
DESCRIPTOR = _RUNQUERYRESPONSE,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.RunQueryResponse)
))
_sym_db.RegisterMessage(RunQueryResponse)
BeginTransactionRequest = _reflection.GeneratedProtocolMessageType('BeginTransactionRequest', (_message.Message,), dict(
DESCRIPTOR = _BEGINTRANSACTIONREQUEST,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.BeginTransactionRequest)
))
_sym_db.RegisterMessage(BeginTransactionRequest)
BeginTransactionResponse = _reflection.GeneratedProtocolMessageType('BeginTransactionResponse', (_message.Message,), dict(
DESCRIPTOR = _BEGINTRANSACTIONRESPONSE,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.BeginTransactionResponse)
))
_sym_db.RegisterMessage(BeginTransactionResponse)
RollbackRequest = _reflection.GeneratedProtocolMessageType('RollbackRequest', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKREQUEST,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.RollbackRequest)
))
_sym_db.RegisterMessage(RollbackRequest)
RollbackResponse = _reflection.GeneratedProtocolMessageType('RollbackResponse', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKRESPONSE,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.RollbackResponse)
))
_sym_db.RegisterMessage(RollbackResponse)
CommitRequest = _reflection.GeneratedProtocolMessageType('CommitRequest', (_message.Message,), dict(
DESCRIPTOR = _COMMITREQUEST,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.CommitRequest)
))
_sym_db.RegisterMessage(CommitRequest)
CommitResponse = _reflection.GeneratedProtocolMessageType('CommitResponse', (_message.Message,), dict(
DESCRIPTOR = _COMMITRESPONSE,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.CommitResponse)
))
_sym_db.RegisterMessage(CommitResponse)
AllocateIdsRequest = _reflection.GeneratedProtocolMessageType('AllocateIdsRequest', (_message.Message,), dict(
DESCRIPTOR = _ALLOCATEIDSREQUEST,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.AllocateIdsRequest)
))
_sym_db.RegisterMessage(AllocateIdsRequest)
AllocateIdsResponse = _reflection.GeneratedProtocolMessageType('AllocateIdsResponse', (_message.Message,), dict(
DESCRIPTOR = _ALLOCATEIDSRESPONSE,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.AllocateIdsResponse)
))
_sym_db.RegisterMessage(AllocateIdsResponse)
Mutation = _reflection.GeneratedProtocolMessageType('Mutation', (_message.Message,), dict(
DESCRIPTOR = _MUTATION,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.Mutation)
))
_sym_db.RegisterMessage(Mutation)
MutationResult = _reflection.GeneratedProtocolMessageType('MutationResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATIONRESULT,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.MutationResult)
))
_sym_db.RegisterMessage(MutationResult)
ReadOptions = _reflection.GeneratedProtocolMessageType('ReadOptions', (_message.Message,), dict(
DESCRIPTOR = _READOPTIONS,
__module__ = 'google.datastore.v1beta3.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1beta3.ReadOptions)
))
_sym_db.RegisterMessage(ReadOptions)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\034com.google.datastore.v1beta3B\016DatastoreProtoP\001')
# @@protoc_insertion_point(module_scope)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 2018 Jun.
@author: HuangLiPang, QuenLo
python version: 2.7
logging config doc:
https://docs.python.org/2/library/logging.config.html
"""
import logging
import logging.config
import time
from RotatingFileNameHandler import RotatingFileNameHandler
class UTCFormatter(logging.Formatter):
converter = time.gmtime
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
# local time
"standard": {
"format": "%(asctime)s - %(message)s",
"datefmt": "%Y-%m-%d %I:%M:%S %p"
},
"complete": {
"format": "%(asctime)s - PID: %(process)d"\
" - %(levelname)s - %(filename)s - %(lineno)d - %(message)s",
"datefmt": "%Y-%m-%d %I:%M:%S %p"
},
"utc": {
# "()" is a special key, which indicates a custom instantiation.
"()": UTCFormatter,
"format": "%(asctime)s %(message)s",
"datefmt": "%Y-%m-%d %I:%M:%S %p"
}
},
"handlers": {
# StreamHandler will show log in console
"default": {
"level": "INFO",
"formatter": "complete",
"class": "logging.StreamHandler"
}
},
# root logger
"root": {
"handlers": ["default"],
# default level is "WARNING"
"level": "INFO",
"propagate": True
}
} |
'''
Created on Oct 22, 2010
@author: Stephen O'Hara
'''
# PyVision License
#
# Copyright (c) 2006-2008 Stephen O'Hara
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import scipy as sp
import pyvision as pv
class ImageBuffer:
'''
Stores a limited number of images from a video (or any other source)
Makes it easy to do N-frame-differencing, for example, by easily being
able to get the current (middle) frame, plus the first and last frames of the
buffer. With an ImageBuffer of size N, as images are added, eventually the
buffer fills, and older items are dropped off the end. This is convenient
for streaming input sources, as the user can simply keep adding images
to this buffer, and internally, the most recent N will be kept available.
'''
def __init__(self, N=5):
'''
@param N: how many image frames to buffer
'''
self._data = [None for _ in xrange(N)]
self._count = 0
self._max = N
def __getitem__(self, key):
return self._data[key]
def __len__(self):
'''
This is a fixed-sized ring buffer, so length is always the number
of images that can be stored in the buffer (as initialized with Nframes)
'''
return self._max
def isFull(self):
if self._count == self._max:
return True
else:
return False
def clear(self):
self._data = [None for _ in xrange(self._max)]
self._count = 0
def getCount(self):
'''
Note that getCount() differs from __len__() in that this method returns the number of
image actually stored in the ImageBuffer, while __len__() returns the size of the buffer,
defined as the number of images the buffer is allowed to store.
'''
return self._count
def getBuffer(self):
return self._data
def getFirst(self):
return self._data[0]
def getLast(self):
return self._data[-1]
def getMiddle(self):
mid = int(self._count/2)
return self._data[mid]
def add(self, image):
'''
add an image to the buffer, will kick out the oldest of the buffer is full
@param image: image to add to buffer
'''
self._data.pop(0) #remove last, if just beginning, this will be None
self._data.append(image)
self._count += 1
if(self._count > self._max):
self._count = self._max
def fillBuffer(self, vid):
'''
If buffer is empty, you can use this function to spool off the first
N frames of the video to initialize/fill the buffer.
@param vid: an iterator of images, typically a pv.Video object or similar.
@note: Will cause an assertion exception if buffer is already full.
'''
assert not self.isFull()
while not self.isFull():
im = vid.next()
self.add(im)
return
def asStackBW(self, size=None):
'''
Outputs an image buffer as a 3D numpy array ("stack") of grayscale images.
@param size: A tuple (w,h) indicating the output size of each frame.
If None, then the size of the first image in the buffer will be used.
@return: a 3D array (stack) of the gray scale version of the images
in the buffer. The dimensions of the stack are (N,w,h), where N is
the number of images (buffer size), w and h are the width and height
of each image.
'''
if size==None:
img0 = self[0]
(w,h) = img0.size
else:
(w,h) = size
f = self.getCount()
stack = sp.zeros((f,w,h))
for i,img in enumerate(self._data):
#if img is not (w,h) in size, then resize first
sz = img.size
if (w,h) != sz:
img2 = img.resize((w,h))
mat = img2.asMatrix2D()
else:
mat = img.asMatrix2D()
stack[i,:,:] = mat
return stack
def asMontage(self, layout, tileSize=None, **kwargs):
(w,h) = self[0].size
if tileSize == None:
tw = w/5
th = h/5
if tw < 32: tw=32
if th < 24: th=24
tileSize = (tw,th)
im = pv.ImageMontage(self._data, layout=layout, tileSize=tileSize, **kwargs)
return im
def show(self, N=10, window="Image Buffer", pos=None, delay=0):
'''
@param N: The number of images in the buffer to display at once
@param window: The window name
@param pos: The window position
@param delay: The window display duration
'''
if self[0] == None: return
if N <= self._count:
im = self.asMontage(layout=(1,N))
else:
im = self.asMontage(layout=(1,self._count))
im.show(window, pos, delay)
#img = im.asImage()
#img.show(window, pos, delay)
|
"""represents an ipwhois info"""
# -*- coding:utf-8 -*-
import threading
from commonbaby.countrycodes import ALL_COUNTRIES
from datacontract.iscoutdataset.iscouttask import IscoutTask
class IPWhoisEntityData(object):
"""represents an entity in an ipwhois info"""
_roles_def: list = [
"registrant", "technical", "administrative", "abuse", "billing",
"registrar", "reseller", "sponsor", "proxy", "notifications", "noc"
]
def __init__(self, handle: str):
if not isinstance(handle, str) or handle == "":
raise Exception("Invalid handle for IPWhoisEntity")
self._handle: str = handle
self._roles: list = []
self._roles_locker = threading.RLock()
self.last_modified: str = None
self.name: str = None
self.address: str = None
self.email: str = None
self.phone: str = None
def set_role(self, role: str):
if not isinstance(role, str) or not role in self._roles_def:
return
if role in self._roles:
return
with self._roles_locker:
if role in self._roles:
return
self._roles.append(role)
def get_outputdict(self) -> dict:
"""return entity dict"""
res: dict = {}
res['handle'] = self._handle
res['roles'] = self._roles
if not self.last_modified is None:
res['last_modified'] = self.last_modified
if not self.address is None and self.address != "":
res['address'] = self.address
if not self.email is None and self.email != "":
res['email'] = self.email
if not self.phone is None and self.phone != "":
res['phone'] = self.phone
return res
class IPWhoisData(object):
"""represents a whois info\n
ip_ver: v4/v6"""
def __init__(
self,
reason: str,
md5: str,
raw: str,
handle: str,
allocate_type: str,
netname: str,
country_code: str,
ip_ver: str = 'v4',
):
if not isinstance(reason, str) or reason == "":
raise Exception('Invalid param "reason" for IPWhois')
if not isinstance(md5, str) or md5 == "":
raise Exception('Invalid param "md5" for IPWhois')
if not isinstance(raw, str) or raw == "":
raise Exception('Invalid param "raw" for IPWhois')
if not isinstance(handle, str) or handle == "":
raise Exception('Invalid param "handle" for IPWhois')
if not isinstance(ip_ver, str) or ip_ver == "":
raise Exception('Invalid param "ip_ver" for IPWhois')
if not isinstance(allocate_type, str) or allocate_type == "":
# raise Exception('Invalid param "allocate_type" for IPWhois')
allocate_type = "ALLOCATED PORTABLE"
if not isinstance(netname, str) or netname == "":
raise Exception('Invalid param "netname" for IPWhois')
if not isinstance(country_code,
str) or not ALL_COUNTRIES.__contains__(country_code):
raise Exception('Invalid param "country_code" for IPWhois')
self._reason: str = reason
self._md5: str = md5
self._raw: str = raw
self._handle: str = handle
self._ip_ver: str = ip_ver
self.applicable_from: str = None
self.applicable_until: str = None
self._allocate_type: str = allocate_type
self._netname: str = netname
self._country_code: str = country_code
self.last_modified: str = None
self.remarks: str = None
self._cidrs: dict = {}
self._cidrs_locker = threading.RLock()
self._entities: dict = {}
self._entities_locker = threading.RLock()
def set_cidrs(self, cidr: str):
"""set cidr"""
if not isinstance(cidr, str) or cidr == "":
return
if self._cidrs.__contains__(cidr):
return
with self._cidrs_locker:
if self._cidrs.__contains__(cidr):
return
self._cidrs[cidr] = cidr
def set_entity(self, entity: IPWhoisEntityData):
"""set entity"""
if not isinstance(entity, IPWhoisEntityData):
return
# 在一个whois信息的多个entities里,每条entity的handle是唯一的
# 后面在界面看能不能检查出问题
if self._entities.__contains__(entity._handle):
return
with self._entities_locker:
if self._entities.__contains__(entity._handle):
return
self._entities[entity._handle] = entity
def get_outputdict(self) -> dict:
"""get ipwhois dict"""
res: dict = {}
res['reason'] = self._reason
res['md5'] = self._md5
res['raw'] = self._raw
res['handle'] = self._handle
res['ip_version'] = self._ip_ver
if not self.applicable_from is None:
res['applicable_from'] = self.applicable_from
if not self.applicable_until is None:
res['applicable_until'] = self.applicable_until
res['allocate_type'] = self._allocate_type
res['netname'] = self._netname
if isinstance(self.remarks, str) and self.remarks != '':
res['remarks'] = self.remarks
res['country_code'] = self._country_code
if not self.last_modified is None:
res['last_modified'] = self.last_modified
if len(self._cidrs) > 0:
res['cidrs'] = []
for c in self._cidrs.values():
res['cidrs'].append(c)
if len(self._entities) > 0:
res['entities'] = []
for e in self._entities.values():
res['entities'].append(e.get_outputdict())
return res |
import enum
class Level(enum.Enum):
CRITICAL = 70
DEBUG = 40
ERROR = 60
FATAL = 80
INFO = 30
NOTICE = 20
TRACE = 10
WARNING = 50
UNSET = 0
class LevelColors(str, enum.Enum):
CRITICAL = "\x1b[38;5;196m"
DEBUG = "\x1b[38;5;32m"
ERROR = "\x1b[38;5;202m"
FATAL = "\x1b[38;5;198m"
INFO = "\x1b[38;5;12m"
NOTICE = "\x1b[38;5;15m"
TRACE = "\x1b[38;5;44m"
WARNING = "\x1b[38;5;220m"
UNSET = "\x1b[0m"
|
from tqdm import tqdm
import tensorflow as tf
# from core.dataset import Dataset
# yolo_data = Dataset('type')
# pbar_yolo = tqdm(yolo_data)
# for batch_data in pbar_yolo:
# continue
# #print(batch_data[0].shape)
## "./data/images/train.txt"
from core.lyhdata import Dataset
yolo_train_data = Dataset('train',num_parallel=1)
yolo_val_data = Dataset('val',num_parallel=1)
train_data = yolo_train_data.get_dataset()
val_data = yolo_val_data.get_dataset()
iterator = tf.data.Iterator.from_structure(train_data.output_types, train_data.output_shapes)
train_init_op = iterator.make_initializer(train_data)
val_init_op = iterator.make_initializer(val_data)
#pbar = tqdm(range(yolo_train_data.batches_per_epoch))
#
with tf.Session() as sess:
for epoch in range(1):
#sess.run(yolo_data.train_data_init_op)
sess.run(train_init_op)
#sess.run(val_init_op)
for _ in tqdm(range(yolo_train_data.batches_per_epoch)):
img,s_label,m_label,l_label,s_box,m_box,l_box = sess.run(iterator.get_next())
# print('img.shape',img.shape)
# print('s_label.shape',s_label.shape)
# print('m_label.shape',m_label.shape)
# print('l_label.shape',l_label.shape)
# print('l_box.shape',l_box.shape)
|
import plotly.graph_objects as go
from numpy import arange, sqrt, abs, max, linspace, isnan, histogram, zeros, corrcoef, ceil
from .utils import to_div
from ..utils import get_color_for_val
from ..parameters import FINGERS
def plot_ols_params(df, param, region_type, yaxis_name=''):
SUMMARY = {}
for region in sorted(df['channel'][region_type].unique()):
if region == 'unknown':
continue
x = df[param[0]][param[1]][df['channel'][region_type] == region]
SUMMARY[region] = [x.mean(), x.std() / sqrt(len(x))]
fig = go.Figure([
go.Scatter(
mode='markers',
y=[x[0] for x in SUMMARY.values()],
marker=dict(
color='black'),
error_y=dict(
type='data',
array=[x[1] for x in SUMMARY.values()],
)
)],
layout=go.Layout(
title=dict(
text=' / '.join(param),
),
xaxis=dict(
title=dict(
text='brain region',
),
tickmode='array',
tickvals=arange(len(SUMMARY)),
ticktext=list(SUMMARY.keys()),
),
yaxis=dict(
title=dict(
text=yaxis_name)
),
)
)
return fig
def plot_ols_rsquared(df, region_type):
max_val = ceil(df['estimate']['rsquared'].max() * 10) / 10
rsquared_levels = arange(0, max_val, .1)
BARS = {}
for region in sorted(df['channel'][region_type].unique()):
if region == 'unknown':
continue
i_region = df['channel'][region_type] == region
if i_region.sum() < 50:
continue
vals = []
for i in rsquared_levels:
vals.append(
len(df[i_region & (i <= df['estimate']['rsquared']) & (df['estimate']['rsquared'] <= (i + 0.1))]))
BARS[region] = vals
n_levels = len(BARS[df['channel'][region_type][0]])
bar_plots = []
for i in range(n_levels):
bar_plots.append(
go.Bar(
x=list(BARS.keys()),
y=[x[i] for x in BARS.values()],
name=f'{rsquared_levels[i]:0.1f} - {rsquared_levels[i]+0.1:0.1f}',
marker=dict(
line=dict(
width=0,
),
color=get_color_for_val(i, 'Hot', 0, n_levels),
),
),
)
fig = go.Figure(
bar_plots,
layout=go.Layout(
barmode='stack',
title=dict(
text='R-Squares'
),
xaxis=dict(
title=dict(
text='brain region',)
),
yaxis=dict(
title=dict(
text='# channels',)
),
))
return fig
def plot_ols_prf(df, region_type, param):
i = (df['estimate']['rsquared'] >= 0.1)
df1 = df[i]
x = df1['flexext']['diff']
absmax = max(abs(x))
bins = linspace(-absmax, absmax, 30)
divs = []
for region in df['channel'][region_type].unique():
df_roi = df1.loc[df['channel'][region_type] == region]
i_ext = (df_roi['estimate']['rsquared'] >= 0.1) & (df_roi['prf_ext']['rsquared'] >= 0.9)
i_flex = (df_roi['estimate']['rsquared'] >= 0.1) & (df_roi['prf_flex']['rsquared'] >= 0.9)
if param == 'finger':
bins = linspace(-1.5, 5.5, 15)
elif param == 'spread':
bins = linspace(0, 5, 20)
fig = go.Figure(data=[
make_bars(df_roi[i_ext]['prf_ext'][param], bins, 'extension'),
make_bars(df_roi[i_flex]['prf_flex'][param], bins, 'flexion'),
],
layout=go.Layout(
title=dict(
text=region),
))
divs.append(to_div(fig))
return divs
def plot_ols_flexext(df, region_type):
i = (df['estimate']['rsquared'] >= 0.1)
df1 = df[i]
x = df1['flexext']['diff']
absmax = max(abs(x))
bins = linspace(-absmax, absmax, 30)
divs = []
for region in df['channel'][region_type].unique():
df_roi = df1.loc[df['channel'][region_type] == region]
fig = go.Figure(
data=[
make_bars(df_roi['flexext']['diff'], bins),
],
layout=go.Layout(
title=dict(
text=region),
bargap=0,
))
divs.append(to_div(fig))
return divs
def make_bars(x, bins, name=''):
x = x[~isnan(x)]
[hist, edges] = histogram(x, bins=bins)
hist = hist / sum(hist)
trace = go.Bar(
x=edges[:-1] + (edges[1] - edges[0]) / 2,
y=hist,
name=name,
)
return trace
def plot_fingerfriends(df_ols, region_type):
i = df_ols['estimate']['rsquared'] > 0.1
df_sign = df_ols[i]
divs = []
for region in df_sign['channel'][region_type].unique():
i_region = df_sign['channel'][region_type] == region
if i_region.sum() < 2:
continue
for MOVEMENT in ('flexion', 'extension', 'close', 'open'):
if MOVEMENT not in df_sign.columns:
continue
cc = zeros((5, 5))
for i0, f0 in enumerate(FINGERS):
for i1, f1 in enumerate(FINGERS):
cc[i0, i1] = corrcoef(df_sign[MOVEMENT][f0][i_region], df_sign[MOVEMENT][f1][i_region])[0, 1]
fig = go.Figure(
data=[
go.Heatmap(
z=cc,
zmin=0,
zmax=1,
colorscale='Hot',
),
],
layout=go.Layout(
title=dict(
text=region + ' ' + MOVEMENT),
yaxis=dict(
autorange='reversed')))
divs.append(to_div(fig))
return divs
|
import pygame
from pygame import Rect
import config
"""convenience component offsets for tuple coordinates"""
X = 0
Y = 1
class Vehicle:
"""
Represents a truck, car, log, turtle or other moving object
"""
def __init__(self, spec, position, speed) -> None:
super().__init__()
self.spec = spec
self.speed = speed
self.pos = position
def __repr__(self):
return self.__str__()
def __str__(self):
rideability = "rideable" if self.spec.rideable else "unrideable"
sx = self.pos[X]
sy = self.pos[Y]
return "%s @ %1.2f->%1.2f,%1.2f" % (rideability, sx, sx + self.spec.width, sy)
def draw(self, surface, pos_to_pixels, block_size, clip):
"""
Draw ourself at given block size on surface using pos_to_pixels to convert position confined to clip rect
"""
px = pos_to_pixels(self.pos)
inset = round(-block_size * (1 - self.spec.scale))
width = self.spec.width
pixel_width = width * block_size
for i in range(self.spec.chain_length):
rect = Rect(px[X] + i * pixel_width, px[Y], width * block_size, block_size).inflate(inset, inset).clip(clip)
pygame.draw.rect(surface, self.spec.colour, rect)
def update(self, scene_width_blocks) -> bool:
"""
Moves the vehicle, returning true iff it has disappeared off screen.
"""
x = self.pos[0]
x = x + self.speed * config.game_speed / 100
self.pos = (x, self.pos[Y])
w = self.total_width()
going_left = self.speed < 0
disappeared_off_left = (x + w < 0 and going_left)
disappeared_off_right = (x > scene_width_blocks and not going_left)
return not (disappeared_off_left or disappeared_off_right)
def collides_with(self, other) -> bool:
"""Returns true only if other overlaps with the vehicle's current position."""
# TODO make collision detection work better - used only for frog collision?
return other[Y] == self.pos[Y] and self.pos[X] <= other[X] <= (self.pos[X] + self.total_width())
def total_width(self) -> int:
return self.spec.width * self.spec.chain_length
def is_rideable(self) -> bool:
return self.spec.rideable
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import re
import jsbeautifier
import js2py
import requests
from copy import deepcopy
import time
from pyquery import PyQuery as pq
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"
}
def gethtml(url):
response = requests.get(url, headers=headers)
return response
def get_cookie(url,selector,key1="FSSBBIl1UgzbN7N80S",key2="FSSBBIl1UgzbN7N80T"):
profile = webdriver.FirefoxOptions()
profile.add_argument('-headless') # 设置无头模式
driver=webdriver.Firefox(options=profile)
driver.get(url)
WebDriverWait(driver,10,0.5).until(EC.presence_of_element_located((By.CSS_SELECTOR,selector)))
a=driver.get_cookie(key1)
b=driver.get_cookie(key2)
cookies={key1:a["value"],key2:b["value"]}
html=driver.page_source
driver.quit()
return cookies
def get_html(url,selector):
profile = webdriver.FirefoxOptions()
profile.add_argument('-headless') # 设置无头模式
driver=webdriver.Firefox(options=profile)
driver.get(url)
WebDriverWait(driver,10,0.5).until(EC.presence_of_element_located((By.CSS_SELECTOR,selector)))
time.sleep(1)
html=driver.page_source
driver.quit()
return html
def get_url(url):
try:
response = gethtml(url)
res = response.content.decode()
js = re.compile('<script type="text/javascript">(.+?_0x33f22a\(\));', re.S).findall(res)[0]
js = re.sub(
"if\(_0x532424\['LaaBO'\]\(wzwsmethod,_0x532424\[_0x56ae\('0x3b','Q@8l'\)\]\)\)\{_0x2ff265\(_0x35ace3,wzwsparams\);\}else\{window\[_0x56ae\('0x3c','\)9A&'\)\]=_0x35ace3;\}",
"return _0x35ace3", js)
js = jsbeautifier.beautify(js)
wzwschallenge = js2py.eval_js(js)
cookies = response.cookies.get_dict()
headers_new=deepcopy(headers)
headers_new["Cookie"]="wzws_cid="+cookies["wzws_cid"]
return wzwschallenge,headers_new
except:
print("再来一次")
time.sleep(1)
wzwschallenge,headers_new=get_url(url)
return wzwschallenge,headers_new
def get_form_data(html,i):
data={}
doc=pq(html)
data["__VIEWSTATE"]=doc("#__VIEWSTATE").attr("value")
data["__VIEWSTATEGENERATOR"]=doc("#__VIEWSTATEGENERATOR").attr("value")
data["__EVENTTARGET"]="AspNetPager1"
data["__EVENTARGUMENT"]=i
data["__EVENTVALIDATION"]=doc("#__EVENTVALIDATION").attr("value")
data["_title"]=""
data["_wenhao"]=""
data["start"]=""
data["end"]=""
return data |
# -*- coding: utf-8 -*-
"""
@Time: 2021/8/2 16:52
@Author: zzhang [email protected]
@File: omnis_monitor.py
@desc:
"""
from collect.service_imp.flow.omnis_flow import ServiceOmnisFlowService
from collect.utils.collect_utils import get_safe_data, get_key
class ServiceFlowService(ServiceOmnisFlowService):
sf_const = {
"flow_name": "flow",
}
def get_flow_name(self):
return self.sf_const["flow_name"]
def get_flow(self):
return get_safe_data(self.get_flow_name(), self.template)
def handler_current_node(self, current):
params_result = self.get_params_result()
# service = get_safe_data(self.get_service_name(), current)
from collect.service_imp.common.filters.template_tool import TemplateTool
template_tool = TemplateTool(op_user=self.op_user)
service = self.get_node_service(current, params_result, template_tool)
if not self.is_success(service):
return service
service = self.get_data(service)
service_result = self.get_service_result(service, self.template)
if not self.is_success(service_result):
return service_result
data = self.get_data(service_result)
return self.success(data)
def execute(self, handler_node):
self.set_flow(self.get_flow())
# 必须包含service 节点
self.set_must_node_names([self.get_service_name()])
self.set_flow_name(self.get_flow_name())
flow_result = self.flow(handler_node)
return flow_result
def result(self, params):
flow = self.get_flow()
if not flow:
return self.fail(msg="没有找到" + self.get_flow_name() + "节点,请检查配置")
flow_result = self.execute(self.handler_current_node)
if not self.is_success(flow_result):
return flow_result
return self.success({})
|
"""Initializes the mongodb database"""
from whist.server.const import DATABASE_NAME
from whist.server.database.connection import get_database
db = get_database(DATABASE_NAME)
|
#
# Copyright (C) 2016 Codethink Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Tristan Van Berkom <[email protected]>
# Plugin author facing APIs
import os
if "_BST_COMPLETION" not in os.environ:
# Special sauce to get the version from versioneer
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
from .utils import UtilError, ProgramNotFoundError
from .sandbox import Sandbox, SandboxFlags, SandboxCommandError
from .storage import Directory
from .types import CoreWarnings, OverlapAction
from .node import MappingNode, Node, ProvenanceInformation, ScalarNode, SequenceNode
from .plugin import Plugin
from .source import Source, SourceError, SourceFetcher
from .downloadablefilesource import DownloadableFileSource
from .element import Element, ElementError, DependencyConfiguration
from .buildelement import BuildElement
from .scriptelement import ScriptElement
# XXX We are exposing a private member here as we expect it to move to a
# separate package soon. See the following discussion for more details:
# https://gitlab.com/BuildStream/buildstream/issues/739#note_124819869
from ._gitsourcebase import _GitSourceBase, _GitMirror
|
# This is an importable rather than a standalone html file
# for a couple of reasons, not least of those being
# than as soon as we take the step toward using file system
# resources, it makes packaging more complex ...
# The other reasons can be summarized as "laziness".
FIRELOGGER_HREF = "https://addons.mozilla.org/en-US/firefox/addon/11090"
FIREPYTHON_BASE_HREF = "http://firepython.binaryage.com"
BODY_HEADER = """\
<div id="header">
<div class="container">
<div class="header-left span-8">
<a href="http://www.binaryage.com"
title="Binary Age"><div class="header-logo"></div></a>
<a href="http://twitter.com/binaryage"><div
class="twitter" title="Follow us on Twitter"></div></a>
</div>
</div>
</div>
"""
BODY = """\
<!DOCTYPE html>
<html>
<head>
<title>FirePython demo app</title>
<link rel="stylesheet" href="__BASE__/shared/css/screen.css"
type="text/css" media="screen, projection">
<link rel="stylesheet" href="__BASE__/shared/css/print.css"
type="text/css" media="print">
<!--[if lt IE 8]>
<link rel="stylesheet"
href="__BASE__/shared/css/ie.css" type="text/css"
media="screen, projection">
<![endif]-->
<link rel="stylesheet" href="__BASE__/shared/css/site.css" type="text/css">
</head>
<body>
__BODY_HEADER__
<div id='site'>
<div class='container'>
<div class='main-left span-12'>
<div class="logo">
<img src="__BASE__/shared/img/firepython-icon.png"
width="32" height="32"/>
<h1>FirePython</h1>
</div>
<h2 id='instructions-header'>welcome to the FirePython demo app!</h2>
<p id='instructions'>
Make sure you have
<a href="__FIRELOGGER_HREF__">firelogger</a> installed,
then hit <a href="/BORK?error=%(error)s">this link</a>
or any other request containing 'error' in the
<strong>QUERY_STRING</strong> to see some output in
the firebug <strong>Logger</strong> panel.
</p>
<h2 id='environ-header'><abbr
title='partial environ, that is'>environ:</abbr></h2>
<pre id='environ'>%(environ)s</pre>
</div>
</div>
</div>
</body>
</html>
"""
# poor man's templating, ftw!
REPLACEMENTS = (
('__FIRELOGGER_HREF__', FIRELOGGER_HREF),
('__BODY_HEADER__', BODY_HEADER),
('__BASE__', FIREPYTHON_BASE_HREF), # this one *last*
)
for old, new in REPLACEMENTS:
BODY = BODY.replace(old, new)
del old, new
EXCLAMATIONS = (
"'bye", "'dswounds", "'sblood", "'sdeath", "'sfoot", "'struth",
"'zackly", "'zactly", '10-4', 'AIUI', 'Abyssinia', 'BFD',
'Baruch HaShem', 'Bueller', 'CBF', 'Christ', 'Christ alive',
'Christ almighty', 'Deo volente', 'F off', 'FTMFW', 'FTW', 'G2G',
'GDGD', 'GIYF', 'GTH', 'God Almighty', 'God Save the King',
'God Save the Queen', 'God bless you', 'God damn',
'God in heaven', 'God willing', 'Goddy', 'Godspeed',
'Gordon Bennett', 'HTH', 'Happy Thanksgiving', 'Hell no',
'Hell yeah', 'Holy Mother', 'Holy Mother of God', "I don't think",
'I never did', 'I say', 'I should coco', 'I should cocoa', "I'll be",
"I'll drink to that", "I'll say", 'JFGI', 'JSYK', 'Janey Mack',
'Jeebus', 'Jeezum Crow', 'Jeremiah', 'Jesum Crow', 'Jesus',
'Jesus Christ', 'Jesus H. Christ', 'Jesus Harold Christ',
'Judas Priest', 'LOL', 'Lord be praised', 'Lord love a duck',
'Lord willing', 'MTFBWY', 'NVRM', 'O', 'OK', 'OKDK', 'OMGWTFBBQ',
'P U', "Qapla'", 'ROTFLMAO', 'ReHi', 'Selah', 'Sieg Heil', 'TT4N',
'XD', 'ZOMFG', 'ZOMG', '^H', '^W', 'a', "a'ight", "a'right", 'aah',
'aargh', 'aarrghh', 'about face', 'about sledge', 'abracadabra',
'abso-fucking-lutely', 'absolutely', 'achoo', 'ack', 'action',
'adieu', 'adios', 'agreed', 'ah', 'ah-choo', 'aha', 'ahchoo', 'ahem',
'ahh', 'ahoy', 'ahoy-hoy', 'ai', 'ai yah', 'alack', 'alakazam', 'alas',
'alley oop', 'allrighty', 'alreet', 'alrighty', 'amen', 'amidships',
'and the horse you rode in on', 'applesauce',
'arf', 'argh', 'arr', 'arrah now', 'as if', 'as you like',
'as you wish', 'astaghfirullah',
'atchoo', 'atishoo', 'attaboy', 'attagirl', 'au revoir', 'avast',
'aw', 'aw shucks',
'aweel', 'aww', 'ay', 'ay, chihuahua', 'aye', 'aye man',
'ba da bing ba da boom',
'bababadalgharaghtakamminarronnkonnbronntonnerronntuonnthunntrovar'
'rhounawnskawntoohoohoordenenthurnuk', 'baccare', 'bad luck',
'bada bing', 'bada bing bada boom', 'bada bing, bada boom', 'bada boom',
'bada boom bada bing', 'bah', 'bam', 'banzai', 'bastard', 'batter up',
'battle stations', 'beauty',
'because', 'begad', 'begorra', 'begorrah', 'bejeezus', 'bejesus',
'big deal', 'big whoop',
'big wow', 'bingo', 'bish bash bosh', 'blah', 'blah blah blah', 'bleah',
'blech', 'bleeding heck',
'bleeding hell', 'bleh', 'bless you', 'blimey', "blimey O'Reilly",
"blimey O'Riley", 'blood and tommy', 'bloody Nora',
'blooming heck', 'blooming hell', 'blow this for a game of soldiers',
'bog off', 'bollocks', 'bon voyage', 'boo', 'boo hoo',
'boom', 'booyah', 'booyakasha', 'bosh', 'bostin', 'bother', 'bottoms up',
'boutye',
)
# vim:filetype=html
|
from flask import Flask
APP_VERSION = "0.0.1"
app = Flask(__name__)
@app.route("/")
def home():
return ""
@app.route("/ping")
def ping():
return {
"version": APP_VERSION
}
if __name__ == "__main__":
app.run()
|
import pickle, os
from PyQt4 import QtGui, QtCore
class Settings():
SETTINGS_FILE = None
THEME_DIR = None
def __init__(self):
self.__theme = '-'
self.__animation_duration = 500
self.__is_fulscreen = False
self.__full_deck = False
self.load()
self.load_resources()
def save(self):
f = open(Settings.SETTINGS_FILE, 'w')
pickle.dump(
{'theme': self.__theme,
'fs': self.__is_fulscreen,
'full': self.__full_deck
}, f)
f.close()
def load(self):
try:
f = open(Settings.SETTINGS_FILE, 'r')
tmp = pickle.load(f)
f.close()
if 'theme' in tmp: self.__theme = tmp['theme']
if 'fs' in tmp: self.__is_fulscreen = tmp['fs']
if 'full' in tmp: self.__full_deck = tmp['full']
except IOError:
pass
if not QtCore.QDir(os.path.join(Settings.THEME_DIR, self.__theme)).exists():
themesdir = QtCore.QDir(Settings.THEME_DIR)
themes = themesdir.entryList(filters=QtCore.QDir.AllDirs)
themes.removeAt(themes.indexOf('.'))
themes.removeAt(themes.indexOf('..'))
if len(themes) >= 1:
self.__theme = str(themes[0])
else:
print 'Error'
print 'No themes were found in: ', Settings.THEME_DIR
raise IOError('No theme files')
def setTheme(self, theme):
self.__theme = str(theme)
def getTheme(self):
return self.__theme
def getAnimationDuration(self):
return self.__animation_duration
def setAnimationDuration(self, ms):
self.__animation_duration = ms
def isFullscreen(self):
return self.__is_fulscreen
def setFullscreen(self, fs):
self.__is_fulscreen = fs
def toggleFullscreen(self):
self.__is_fulscreen = not self.__is_fulscreen
return self.isFullscreen()
def isFullDeck(self):
return self.__full_deck
def setFullDeck(self, full):
self.__full_deck = full
def load_resources(self):
self.resources = {'playing_cards_back': QtGui.QImage(os.path.join(Settings.THEME_DIR, self.__theme, 'playing_cards_back.png')),
'playing_cards': QtGui.QImage(os.path.join(Settings.THEME_DIR, self.__theme, 'playing_cards.png')),
'playing_symbols': QtGui.QImage(os.path.join(Settings.THEME_DIR, self.__theme, 'symbols.png')),
'background': QtGui.QImage(os.path.join(Settings.THEME_DIR, self.__theme, 'background.png')),
'seats': QtGui.QImage(os.path.join(Settings.THEME_DIR, self.__theme, 'seats.png')),
'seats_large': QtGui.QImage(os.path.join(Settings.THEME_DIR, self.__theme, 'seats_large.png')),
'table': QtGui.QImage(os.path.join(Settings.THEME_DIR, self.__theme, 'table.png')),
'stack': QtGui.QImage(os.path.join(Settings.THEME_DIR, self.__theme, 'stack.png'))
}
self.colors = {'font': QtGui.QColor(50, 29, 17),
'font disabled': QtGui.QColor(102, 60, 35),
'button': QtGui.QColor(149, 96, 56),
'button highlight': QtGui.QColor(124, 73, 43),
'button blink': QtGui.QColor(255, 200, 0)
}
try:
f = open(os.path.join(Settings.THEME_DIR, self.__theme, 'colors'), 'r')
self.colors.update(pickle.load(f))
f.close()
except:
pass
|
VALUE = 1 / 0
|
import numpy as np
def rotx(theta):
rot = np.array([[1.0, 0, 0], [0, 1, 0], [0, 0, 1]])
rot[1, 1] = np.cos(theta)
rot[2, 1] = np.sin(theta)
rot[1, 2] = -np.sin(theta)
rot[2, 2] = np.cos(theta)
return rot
def roty(theta):
rot = np.array([[1.0, 0, 0], [0, 1, 0], [0, 0, 1]])
rot[2, 2] = np.cos(theta)
rot[0, 2] = np.sin(theta)
rot[2, 0] = -np.sin(theta)
rot[0, 0] = np.cos(theta)
return rot
def rotz(theta):
rot = np.array([[1.0, 0, 0], [0, 1, 0], [0, 0, 1]])
rot[0, 0] = np.cos(theta)
rot[1, 0] = np.sin(theta)
rot[0, 1] = -np.sin(theta)
rot[1, 1] = np.cos(theta)
return rot
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 6
# Methods Covered : 6
# Examples Total : 6
# Examples Tested : 6
# Coverage % : 100
# ----------------------
# nat_gateways: 6/6
import unittest
import azure.mgmt.network
from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
import pytest
AZURE_LOCATION = 'eastus'
@pytest.mark.live_test_only
class TestMgmtNetwork(AzureMgmtRecordedTestCase):
def setup_method(self, method):
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.network.NetworkManagementClient
)
def create_public_ip_addresses(self, group_name, location, public_ip_name):
# Create PublicIP
BODY = {
'location': location,
'public_ip_allocation_method': 'Static',
'idle_timeout_in_minutes': 4,
'sku': {
'name': 'Standard'
}
}
result = self.mgmt_client.public_ip_addresses.begin_create_or_update(
group_name,
public_ip_name,
BODY
)
return result.result()
def create_public_ip_prefixes(self, group_name, location, public_ip_prefix_name):
# Create public IP prefix defaults[put]
BODY = {
"location": location,
"prefix_length": "30",
"sku": {
"name": "Standard"
}
}
result = self.mgmt_client.public_ip_prefixes.begin_create_or_update(group_name, public_ip_prefix_name, BODY)
return result.result()
@RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
@recorded_by_proxy
def test_network(self, resource_group):
SUBSCRIPTION_ID = self.get_settings_value("SUBSCRIPTION_ID")
RESOURCE_GROUP = resource_group.name
NAT_GATEWAY_NAME = "myNatGateway"
PUBLIC_IP_ADDRESS_NAME = "publicipaddress"
PUBLIC_IP_PREFIX_NAME = "publicipprefix"
self.create_public_ip_addresses(RESOURCE_GROUP, AZURE_LOCATION, PUBLIC_IP_ADDRESS_NAME)
self.create_public_ip_prefixes(RESOURCE_GROUP, AZURE_LOCATION, PUBLIC_IP_PREFIX_NAME)
# /NatGateways/put/Create nat gateway[put]
BODY = {
"location": "eastus",
"sku": {
"name": "Standard"
},
"public_ip_addresses": [
{
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/publicIPAddresses/" + PUBLIC_IP_ADDRESS_NAME
}
],
"public_ip_prefixes": [
{
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/publicIPPrefixes/" + PUBLIC_IP_PREFIX_NAME
}
]
}
result = self.mgmt_client.nat_gateways.begin_create_or_update(resource_group_name=RESOURCE_GROUP, nat_gateway_name=NAT_GATEWAY_NAME, parameters=BODY)
result = result.result()
# /NatGateways/get/Get nat gateway[get]
result = self.mgmt_client.nat_gateways.get(resource_group_name=RESOURCE_GROUP, nat_gateway_name=NAT_GATEWAY_NAME)
# /NatGateways/get/List nat gateways in resource group[get]
result = self.mgmt_client.nat_gateways.list(resource_group_name=RESOURCE_GROUP)
# /NatGateways/get/List all nat gateways[get]
result = self.mgmt_client.nat_gateways.list_all()
# /NatGateways/patch/Update nat gateway tags[patch]
BODY = {
"tags": {
"tag1": "value1",
"tag2": "value2"
}
}
result = self.mgmt_client.nat_gateways.update_tags(resource_group_name=RESOURCE_GROUP, nat_gateway_name=NAT_GATEWAY_NAME, parameters=BODY)
# /NatGateways/delete/Delete nat gateway[delete]
result = self.mgmt_client.nat_gateways.begin_delete(resource_group_name=RESOURCE_GROUP, nat_gateway_name=NAT_GATEWAY_NAME)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
__version_info__ = ('0', '0', '6')
__version__ = '.'.join(__version_info__)
name = "revops"
|
import os
# DATABASE SETTINGS
SQLALCHEMY_DATABASE_URI = 'sqlite:////{}/laborapp.db'.format(os.getcwd())
SECRET_KEY = "withgreatpowercomesgreatresponsibility"
DEBUG = True
# mail settings
MAIL_SERVER = 'smtp.example.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = 'username'
MAIL_PASSWORD = 'password'
# Flask Security Settings
SECURITY_REGISTERABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
SECURITY_SEND_PASSWORD_CHANGE_EMAIL = False
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL = False
SECURITY_LOGIN_WITHOUT_CONFIRMATION = True
SECURITY_POST_LOGIN_VIEW = "/user/dashboard"
SECURITY_POST_REGISTER_VIEW = "/user/dashboard"
SKILLS = ['PHP', 'Java', 'Ruby', 'C#', 'C++', 'Bash', 'SQL', 'CSS', 'HTML', 'QA', 'Node', 'Flask','JavaScript']
DEBUG_TB_INTERCEPT_REDIRECTS = False
|
#!/usr/bin/env python
from time import strftime, gmtime
def now():
return strftime('%Y-%m-%d %H:%M:%S', gmtime())
def number(value):
return float(value.replace('lei', '').replace('.', '').replace(',', '.'))
|
"""Tests for the math.klobuchar-module
"""
from midgard.ionosphere import klobuchar
def test_klobuchar():
# Comparison are done against GPS-Toolbox Klobuchar programs from Ola Ovstedal
# https://www.ngs.noaa.gov/gps-toolbox/ovstedal.htm
t = 593100
ion_coeffs = [
0.382e-07,
0.149e-07,
-0.179e-06,
0.0, # alpha coefficients
0.143e06,
0.0,
-0.328e06,
0.113e06, # beta coefficients
]
rec_pos = [0.698131701, 4.53785606, 0.0]
az = 3.66519143
el = 0.34906585
freq_l1 = 1575420000.0
freq = 1575420000.0
# +gLAB validation test
# PRN15, epoch
# -input:obs /home/dahmic/where/data/gnss/obs/2018/032/stas0320.18o
# -input:nav /home/dahmic/where/data/gnss/orb/brdc/2018/032/brdm0320.18p (added ionosphere parameters)
# -input:dcb /home/dahmic/where/data/gnss/orb/brdc/2018/032/brdm0320.18p
# t = 432000.0
# ion_coeffs = [8.381900e-09, -7.450600e-09, -5.960500e-08, 5.960500e-08, 8.806400e+04, -3.276800e+04, -1.966100e+05, 1.966100e+05]
# rec_pos = [3275753.912000, 321110.865100, 5445041.882900]
# az = 0.159409
# el = 1.171217
# freq_l1 = 1575420000.0
# freq = 1575420000.0
# -gLAB validation test
delay, _ = klobuchar.klobuchar(t, ion_coeffs, rec_pos, az, el, freq_l1, freq)
# expected: delay = 23.784 m
expected = 23.784
assert abs(delay - expected) < 1e-3
|
#!/usr/bin/env python
from pandas import DataFrame, concat, melt, Series
import re
from numpy import where
from collections import OrderedDict
from itertools import chain
__all__ = [
'string_to_list', 'strip_time', 'year_strip', 'extract',
'check_int', 'produce_null_df', 'check_registration',
'UniqueReplace', 'updated_df_values', 'write_column_to_log',
'wide_to_long', 'split_column', 'cbind'
]
def cbind(dataframe, column1, column2):
try:
column1 = int(column1)
column2 = int(column2)
except Exception as e:
print(str(e))
new_column = (
dataframe[column1].astype(str) +
' ' +
dataframe[column2].fillna('NULL').apply(
lambda x: '' if x is 'NULL' else str(x))).apply(
lambda y: y.strip())
return concat([dataframe, new_column], axis=1)
def split_column(dataframe, column, separator):
splitdf = dataframe[
column].apply(
lambda x: Series([i for i in x.split(separator)]))
return concat([dataframe, splitdf], axis=1)
def wide_to_long(dataframe, value_columns, value_column_name):
all_columns = dataframe.columns.values.tolist()
id_columns = [x for x in all_columns if x not in value_columns]
melted_df = melt(
dataframe,
value_vars=value_columns, var_name='species_column',
id_vars=id_columns, value_name=str(value_column_name)
)
return melted_df
def string_to_list(userinput):
'''
Function to take a string with names separated by
commas and turn that into a list of names
'''
strtolist = [
x.strip() for x in re.split(',', userinput)
]
return strtolist
def strip_time(data, col):
'''
Method to remove delimiter from date time data columns
'''
strippedlist = []
for i in list(set(col)):
strippedlist.append([
re.sub("/|,|-|;"," ", x) for x in list(
data[i].astype(str))])
return strippedlist
def year_strip(dateformat):
'''
Takes a string specifying a date format and then extracts the
year format for help with processing.----DELETE?
'''
f = dateformat
found = re.search('Y+', f)
ylength = len(found.group(0))
return ylength
def extract(d,keys):
''' returns subset of dictionary based on list of keys'''
return OrderedDict((k, d[k]) for k in d if k in keys)
def check_int(x):
''' function to check if text can be converted to integer'''
try:
int(x)
return True
except ValueError:
return False
def produce_null_df(ncols, colnames, dflength, nullvalue):
'''
Helper function to create a dataframe of null
values for concatinating with formated data
'''
try:
list(colnames)
int(ncols)
int(dflength)
str(nullvalue)
except Exception as e:
print(str(e))
ValueError('Invalid data types for arguments')
p = re.compile('\w+\s')
matches = p.match(nullvalue)
if matches is None:
nullvalue = (nullvalue + ' ')
allnulls = concat(
[DataFrame(
re.sub(' ', ' ', (str(nullvalue)*dflength)).split())]*
len(colnames), axis=1)
allnulls.columns = colnames
return allnulls.copy()
def check_registration(clss, inputname):
'''
Helper funciton to make sure the input handler was registered
with the facade class before trying operations.
ONLY FOR USE IN FACADE CLASS!!!!!!!
'''
try:
assert clss._inputs[inputname].name == inputname
except Exception as e:
print(str(e))
raise ValueError('Input Not Registered')
class UniqueReplace(object):
'''
Class to perform the work of returning a dataframe with unique
combinations of factors from 'x' number of columns
'''
def __init__(self, dataframe, clsinstance):
self._data = dataframe.copy()
self.userinput = clsinstance
self.lookup = list(clsinstance.lnedentry.values())
self.levels = None
self.original = None
self.modified = None
def get_levels(self):
'''
Returns pandas dataframe with unique combination of
levels
'''
try:
self.levels = self._data[
self.lookup].drop_duplicates().sort_values(
self.lookup).reset_index(drop=True)
return self.levels
except Exception as e:
print(str(e))
raise LookupError('Invalid column names')
def replace_levels(
self, modifiedlevelname, allotherlevels=None):
'''
Takes a modified list of factor level labels and converts
the original labels in the dataframe into
the modified labels.
'''
try:
assert len(self.lookup) == 1
except Exception as e:
print(str(e))
raise AssertionError(
'To replace values input only one column' +
' name.')
self.modified = modifiedlevelname
self.original = self._data[self.lookup].drop_duplicates()
og_list = self.original[self.lookup].values.tolist()
if any(isinstance(i, list) for i in og_list):
og_list = list(chain.from_iterable(og_list))
else:
pass
level_name_changed_from = [
x for x in og_list if x not in allotherlevels]
print(level_name_changed_from, self.modified)
try:
assert (
len(self.modified) == len(level_name_changed_from))
self._data = self._data.replace(
{self.lookup[0]: {
level_name_changed_from[0]: self.modified[0]}},
)
return self._data
except Exception as e:
print(str(e))
raise AttributeError('Too many levels to replace')
return self._data
def replace_values(self):
'''
takes a list of values to change
'''
try:
if check_int(
self.userinput.lnedentry['from']) is True:
modified = self._data.replace(
int(self.userinput.lnedentry['from']),
self.userinput.lnedentry['to'])
return modified
else:
pass
except Exception as e:
print(str(e))
raise LookupError('InputHandler key error')
finally:
modified = self._data.replace(
self.userinput.lnedentry['from'],
self.userinput.lnedentry['to'])
return modified
def updated_df_values(olddataframe,newdataframe,logger, name):
'''
Helper function to aid in logging the difference between
dataframes after user have modified the entries.
For example, inputing latitude and longitude for the site
table or the extent of spatial replication in the main table.
Arguments:
olddataframe = An unmodified dataframe
newdataframe = A user modified dataframe
logger = An instance of a logger handler
table = A string with the name to append to log
'''
try:
assert (
olddataframe.columns.values.tolist() ==
newdataframe.columns.values.tolist()) is True
except Exception as e:
print(str(e))
raise AttributeError(
'Dataframe columns are not equivalent')
diffdf = (olddataframe != newdataframe)
if (len(olddataframe) == 0 or
olddataframe is None or
len(olddataframe.columns) == 0):
logger.info('{} "{}"'.format(
name,
'NULL'))
else:
diffdf = (olddataframe != newdataframe)
for i,item in enumerate(diffdf.columns):
if any(diffdf[item].values.tolist()):
index = where(diffdf[item].values)[0].tolist()
logger.info('{} "{}" = {} to {}'.format(
name,
item,
olddataframe.loc[index,item].values.tolist(),
newdataframe.loc[index,item].values.tolist()))
else:
pass
def write_column_to_log(dictionary, logger, tablename):
'''
Function to data a dictionary of column headers/entries,
turn it into a dataframe, and log the entries as
a function to the name.
'''
coldf = DataFrame([dictionary])
nulldf = produce_null_df(
len(coldf.values.tolist()),
coldf.columns.values.tolist(),
len(coldf),
'NULL'
)
updated_df_values(
nulldf, coldf, logger, tablename
)
|
# encoding: utf-8
# module System.Windows.Documents calls itself Documents
# from PresentationFramework,Version=4.0.0.0,Culture=neutral,PublicKeyToken=31bf3856ad364e35,PresentationCore,Version=4.0.0.0,Culture=neutral,PublicKeyToken=31bf3856ad364e35
# by generator 1.145
# no doc
# no imports
# no functions
# classes
from __init___parts.Adorner import Adorner
from __init___parts.AdornerDecorator import AdornerDecorator
from __init___parts.AdornerLayer import AdornerLayer
from __init___parts.TextElement import TextElement
from __init___parts.Inline import Inline
from __init___parts.AnchoredBlock import AnchoredBlock
from __init___parts.Block import Block
from __init___parts.BlockCollection import BlockCollection
from __init___parts.BlockUIContainer import BlockUIContainer
from __init___parts.Span import Span
from __init___parts.Bold import Bold
from __init___parts.ContentPosition import ContentPosition
from __init___parts.DocumentPage import DocumentPage
from __init___parts.DocumentPaginator import DocumentPaginator
from __init___parts.DocumentReference import DocumentReference
from __init___parts.DocumentReferenceCollection import DocumentReferenceCollection
from __init___parts.DynamicDocumentPaginator import DynamicDocumentPaginator
from __init___parts.EditingCommands import EditingCommands
from __init___parts.Figure import Figure
from __init___parts.IDocumentPaginatorSource import IDocumentPaginatorSource
from __init___parts.FixedDocument import FixedDocument
from __init___parts.FixedDocumentSequence import FixedDocumentSequence
from __init___parts.FixedPage import FixedPage
from __init___parts.Floater import Floater
from __init___parts.FlowDocument import FlowDocument
from __init___parts.FrameworkTextComposition import FrameworkTextComposition
from __init___parts.FrameworkRichTextComposition import FrameworkRichTextComposition
from __init___parts.GetPageCompletedEventArgs import GetPageCompletedEventArgs
from __init___parts.GetPageCompletedEventHandler import GetPageCompletedEventHandler
from __init___parts.GetPageNumberCompletedEventArgs import GetPageNumberCompletedEventArgs
from __init___parts.GetPageNumberCompletedEventHandler import GetPageNumberCompletedEventHandler
from __init___parts.GetPageRootCompletedEventArgs import GetPageRootCompletedEventArgs
from __init___parts.GetPageRootCompletedEventHandler import GetPageRootCompletedEventHandler
from __init___parts.Glyphs import Glyphs
from __init___parts.Hyperlink import Hyperlink
from __init___parts.InlineCollection import InlineCollection
from __init___parts.InlineUIContainer import InlineUIContainer
from __init___parts.Italic import Italic
from __init___parts.LineBreak import LineBreak
from __init___parts.LinkTarget import LinkTarget
from __init___parts.LinkTargetCollection import LinkTargetCollection
from __init___parts.List import List
from __init___parts.ListItem import ListItem
from __init___parts.ListItemCollection import ListItemCollection
from __init___parts.LogicalDirection import LogicalDirection
from __init___parts.PageContent import PageContent
from __init___parts.PageContentCollection import PageContentCollection
from __init___parts.PagesChangedEventArgs import PagesChangedEventArgs
from __init___parts.PagesChangedEventHandler import PagesChangedEventHandler
from __init___parts.PaginationProgressEventArgs import PaginationProgressEventArgs
from __init___parts.PaginationProgressEventHandler import PaginationProgressEventHandler
from __init___parts.Paragraph import Paragraph
from __init___parts.Run import Run
from __init___parts.Section import Section
from __init___parts.Table import Table
from __init___parts.TableCell import TableCell
from __init___parts.TableCellCollection import TableCellCollection
from __init___parts.TableColumn import TableColumn
from __init___parts.TableColumnCollection import TableColumnCollection
from __init___parts.TableRow import TableRow
from __init___parts.TableRowCollection import TableRowCollection
from __init___parts.TableRowGroup import TableRowGroup
from __init___parts.TableRowGroupCollection import TableRowGroupCollection
from __init___parts.TextEffectResolver import TextEffectResolver
from __init___parts.TextEffectTarget import TextEffectTarget
from __init___parts.TextElementCollection import TextElementCollection
from __init___parts.TextElementEditingBehaviorAttribute import TextElementEditingBehaviorAttribute
from __init___parts.TextPointer import TextPointer
from __init___parts.TextPointerContext import TextPointerContext
from __init___parts.TextRange import TextRange
from __init___parts.TextSelection import TextSelection
from __init___parts.Typography import Typography
from __init___parts.Underline import Underline
from __init___parts.ZoomPercentageConverter import ZoomPercentageConverter
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, re
def getClassesDict():
f = open('hindiclasses.sorted.txt')
classesDict = dict()
for line in f:
fields = line.split()
classesDict[fields[0]] = fields[1]
f.close()
return classesDict
def renderExample(example):
label = example[0]
features = example[1]
result = label + ' | '
for feature_name in features:
result += feature_name
feature_value = features[feature_name]
if feature_value is not None:
result += ':'+str(feature_value)
result += ' '
return result
def addSuffixFeature(features, word_text, suffix_length):
features['s'+str(suffix_length)+'+'+word_text[-suffix_length:]] = None
def addContextFeatures(features, sentence, pos, window):
for i in range(pos-window, pos+window+1):
if i <> 0:
if i in range(len(sentence)):
features['c'+str(i-pos)+'+'+sentence[i]] = None
else:
features['c'+str(i-pos)+'+'] = None
def addClassContextFeatures(features, sentence, pos, window):
for i in range(pos-window, pos+window+1):
if i <> 0:
if i in range(len(sentence)):
features['v'+str(i-pos)+'+'+str(classesDict.get(sentence[i],-1))] = None
else:
features['v'+str(i-pos)+'+'] = None
def addNgramFeatures(features, word_text, ngram_length):
for i in range(len(word_text) - ngram_length + 1):
key = 'n'+str(ngram_length)+'+'+word_text[i:i+ngram_length]
#print "%d-ngram of %s: %s" % (i, word_text, key)
if key in features:
features[key] = features[key] + 1
else:
features[key] = 1
# main
config_str = sys.argv[3]
config = dict()
for e in config_str.split(','):
parameter, value = e.split('=')
if parameter == 'loss_function':
config[parameter] = value
else:
config[parameter] = int(value)
classesDict = getClassesDict();
print 'Classes dictionary got'
labels_present = 1
if len(sys.argv) > 4 and sys.argv[4] == 'no-labels':
labels_present = 0
with open(sys.argv[1]) as in_file, open(sys.argv[2],'w') as out_file:
for line in in_file:
sentence = line[:-1].split(' ')
for i in range(len(sentence)):
word = sentence[i]
if labels_present:
word_text = word[:-2]
label = '1' if word[-1] == '1' else '-1'
else:
word_text = word
label = '-1'
features = dict()
features['t+'+word_text] = None
word_length = len(word_text)
vowel_count = len(re.findall(r'[aąeęioóuy]', word_text))
if word_length > 0:
vowel_rate = vowel_count / float(word_length)
else:
vowel_rate = 0
#features['word_length'] = word_length
#features['vowel_count'] = vowel_count
#features['vowel_rate'] = vowel_rate
for i in range(1,config['ngram']+1):
addNgramFeatures(features, word_text, i)
for j in range(2,config['suffixes']+1):
addSuffixFeature(features, word_text, j)
addContextFeatures(features, sentence, i, config['context'])
if config['fl']:
features['firstOrLast'] = 1 if (i == 0 or i == len(sentence) -1) else 0
if config['class']:
features['d+'+str(classesDict.get(word_text, -1))] = None
if config['class_context'] > 0:
addClassContextFeatures(features, sentence, i, config['class_context'])
if config['ending']:
features['cvbEnding'] = 1 if (word_text[-1] == 'ī' or word_text[-1] == 'i' or word_text[-1] == 'a') else 0
if config['hyphen']:
features['hyphen'] = 1 if ('-' in word_text) else 0
example = (label, features)
out_file.write(renderExample(example)+'\n')
|
from os import sep
import re
import sys
import logging
import numpy as np
import pandas as pd
from dvc import api
from io import StringIO
import warnings
warnings.filterwarnings('ignore')
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(name)s: %(message)s',
level=logging.INFO,
datefmt='%H:%M:%S',
stream=sys.stderr
)
logger = logging.getLogger(__name__)
logging.info('Fetching data...')
bikes_data_path = api.read('data/raw/bikes.csv', remote='dataset-track')
bikes = pd.read_csv(StringIO(bikes_data_path))
bikes.dropna(inplace=True)
#i'll ignore bikes with price equals 0
bikes = bikes[bikes['price']>0]
bikes.reset_index(drop=True, inplace=True)
bikes_names = bikes['model_name']
# I´ll split the column "model_name" in blankspace
bikes_names = bikes_names.str.replace("Royal Enfield", "Royal-Enfield")
bikes_names = bikes_names.str.split(pat=" ")
# i´ll create a column with "brand_name" feature
brand = []
model_dirty = []
for i in range(len(bikes_names)):
brand.append(bikes_names.iloc[i][0])
model_dirty.append(bikes_names.iloc[i][1:-1])
brand_name = pd.DataFrame(brand, columns=["brand_name"])
# i´ll create two columns, "model_name" and "motor_size"
patron_location = re.compile('(\d{3,})')
motor_size = []
model_name = []
for model in model_dirty:
model_str = " ".join(model[:6])
model_name.append(model_str)
try:
size = patron_location.search(model_str)
motor_size.append(float(size.group(1)))
except:
motor_size.append(np.nan)
model_name_col = pd.DataFrame(model_name, columns=["model_name"])
motor_size_col = pd.DataFrame(motor_size, columns=["motor_size"])
model_name_col.replace("\d{3,}cc", "", regex=True, inplace=True)
name_stract = pd.concat([brand_name, model_name_col, motor_size_col], axis=1)
name_stract['brand_name'].replace("Royal-Enfield\u200e", "Royal-Enfield", inplace=True)
bikes['kms_driven'] = bikes['kms_driven'].replace('[A-Za-z-\s]+', '', regex=True)
kms_driven_list = []
for i in range(len(bikes)):
try:
kms_driven_list.append(float(bikes['kms_driven'].iloc[i]))
except Exception as e:
kms_driven_list.append(np.nan)
kms_driven = pd.DataFrame(kms_driven_list, columns=["kms_driven"])
bikes['mileage'].replace('[\sA-Za-z]+', '', regex=True, inplace=True)
bikes['mileage'].replace('', '0', regex=True, inplace=True)
mileage_list = []
for i in range(len(bikes)):
try:
mileage_list.append(float(bikes['mileage'].iloc[i][:2]))
except:
mileage_list.append(np.nan)
mileage = pd.DataFrame(mileage_list, columns=["mileage"])
bikes['owner'].replace({'first owner': 1, 'second owner': 2, 'third owner': 3, 'fourth owner or more': 4}, inplace=True)
power_col = bikes['power']
power_splited = power_col.str.split(pat=" ")
power_list = []
for i in range(len(bikes)):
try:
power_list.append(power_splited.iloc[i][0])
except:
power_list.append(np.nan)
power = pd.DataFrame(power_list, columns=["power"])
power['power'].replace('[\sA-Za-z]+', '', regex=True, inplace=True)
power['power'].replace('-.*', '', regex=True, inplace=True)
power['power'] = power['power'].astype(float)
bikes['years'] = 2021 - bikes['model_year']
bikes_new = pd.concat([name_stract, bikes['years'], kms_driven, mileage, bikes['owner'], power, bikes['price']], axis=1)
bikes_new.dropna(inplace=True)
bikes_new = bikes_new[bikes_new['kms_driven'] < bikes_new['kms_driven'].quantile(0.975)]
bikes_new = bikes_new[bikes_new['price'] < 3000000]
idx = bikes_new['brand_name'].value_counts()[bikes_new['brand_name'].value_counts() > 2].index.tolist()
bikes_new = bikes_new[bikes_new['brand_name'].isin(idx)]
bikes_new.to_csv('data/processed/bikes_processed.csv', index=False)
logger.info("Data Fetched and prepared...")
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
import time
import open3d as o3d
import misc3d as m3d
""" numpy implementation of farthest point sampling """
def farthest_point_sampling_numpy(xyz, npoint):
N = xyz.shape[0]
indices = [0] * npoint
distance = np.ones((N,)) * 1e10
farthest = 0
for i in range(npoint):
indices[i] = farthest
centroid = xyz[farthest, :]
dist = np.sum((xyz - centroid)**2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = np.argmax(distance)
return indices
pcd = o3d.io.read_point_cloud('../data/pose_estimation/model/obj.ply')
print('before smapling: {}'.format(pcd))
points = np.asarray(pcd.points)
t0 = time.time()
indices = m3d.preprocessing.farthest_point_sampling(pcd, 1000)
print('time cost for misc3d: {}'.format(time.time() - t0))
sample = pcd.select_by_index(indices)
t0 = time.time()
indices = farthest_point_sampling_numpy(points, 1000)
print('time cost for numpy: {}'.format(time.time() - t0))
sample_numpy = pcd.select_by_index(indices)
vis = o3d.visualization.Visualizer()
vis.create_window("Farest point sampling", 1920, 1200)
m3d.vis.draw_geometry3d(vis, pcd)
m3d.vis.draw_geometry3d(vis, sample, color=(0, 1, 0), size=5)
vis.run()
|
#Modules:
import numpy as np
#Public:
class DataLoader:
'''
Classe responsável por gerenciar a padronização das entradas
das enquentes parciais.
'''
#Constructor
def __init__(
self,
fonte,
paredao
):
self._fonte = fonte
self._paredao_names = paredao.get_participantes()
self._content = []
#Interface
def set_manual_entries(
self,
num_participante_1,
num_participante_2,
num_participante_3
):
'''
Define as entradas de forma manual.
'''
for _ in range(0, num_participante_1):
self._content.append(
self._new_data_entry(
is_participante_1 = True
)
)
for _ in range(0, num_participante_2):
self._content.append(
self._new_data_entry(
is_participante_2 = True
)
)
for _ in range(0, num_participante_3):
self._content.append(
self._new_data_entry(
is_participante_3 = True
)
)
def get(self, max_samples = 0):
'''
Retorna os dados de uma fonte. Sendo possível definir um
número limite de amostras.
'''
if max_samples:
return list(np.random.choice(self._content, max_samples))
else:
return self._content
#Implementation
def _new_data_entry(
self,
is_participante_1 = False,
is_participante_2 = False,
is_participante_3 = False
):
return {
self._paredao_names[0] : is_participante_1,
self._paredao_names[1] : is_participante_2,
self._paredao_names[2] : is_participante_3,
'Fonte' : self._fonte
}
|
import numpy as np
import scipy as sp
from VyPy import tools
from VyPy.exceptions import EvaluationFailure
from VyPy.tools import atleast_2d
class Inference(object):
def __init__(self,Kernel):
self.Kernel = Kernel
self.Train = Kernel.Train
return
def __call__(self,XI):
return self.predict(XI)
def precalc(self):
return
def predict(self,XI):
''' Evaluate GPR model at XI
'''
raise NotImplementedError
# unpack
Kernel = self.Kernel
XI = atleast_2d(XI)
# process
## CODE
# results
YI_solve = 0 # predicted output
CovI_solve = 0 # predicted covariance
# pack up outputs
try:
data = Kernel.pack_outputs(XI,YI_solve,CovI_solve)
except NotImplementedError:
data = [YI_solve,CovI_solve]
return data
#: def predict()
|
import socket
from typing import Tuple, Optional
from collections import deque
from net_test_tools.dataset import preprocess
from net_test_tools.transceiver import Transceiver
from net_test_tools.utils import Multiset, hex_str
def receive_for_dataset(
local: Tuple[str, int],
remote: Optional[Tuple[str, int]],
path: str,
*,
timeout: Optional[float] = None,
repeat: int = 1,
post_process: bool = False
) -> Multiset:
"""
receive expected data in dataset, return dataset which contains packet not received.
:param local:
:param remote: None for any address.
:param path:
:param timeout: if None, socket is blocking mode; otherwise in non-blocking mode with timeout
:param repeat:
:param post_process: whether to process after reception is complete
:return:
"""
dataset: Multiset = preprocess(path, repeat)
return receive_for(local, remote, dataset, post_process=post_process, timeout=timeout)
def receive_for(
local: Tuple[str, int],
remote: Optional[Tuple[str, int]],
dataset: Optional[Multiset],
*,
post_process: bool,
timeout: Optional[float]
):
"""
:param local:
:param remote: if None, it will receive from any address.
:param dataset: if None, it will receive for any data.
:param post_process:
:param timeout:
:return:
"""
transport = Transceiver(local)
if timeout == 0:
raise ValueError("timeout cannot be 0.")
transport.set_timeout(timeout)
forever = True if dataset is None else False
count_send = len(dataset) if not forever else 1
count_recv = 0
count_correct = 0
buffer = deque()
if forever:
def process(packet: bytes):
print(f"receive: {hex_str(packet)}")
else:
def process(packet: bytes):
nonlocal count_correct
if packet not in dataset:
print(f"receive but incorrect: {hex_str(packet)}")
else:
print(f"receive: {hex_str(packet)}")
dataset.remove(packet)
count_correct += 1
recv_rate = count_recv / count_send * 100 # may > 100% when lots of incorrect packets are received.
correct_rate = count_correct / count_recv * 100
print(f"recv = {recv_rate:.2f} %, correct = {correct_rate:.2f} %")
while count_recv < count_send:
try:
p_recv = transport.recv(remote)
count_recv += 1
if forever:
count_send += 1
if post_process:
buffer.append(p_recv)
else:
process(p_recv)
except socket.timeout:
print(f"receive timeout")
break
if post_process:
while len(buffer) > 0:
process(buffer.popleft())
if not forever:
loss_rate = (count_send - count_correct) / count_send * 100
print(f"packet send = {count_send}, packet received = {count_recv}, "
f"correct packet = {count_correct}, loss = {loss_rate:.2f} %")
return dataset
|
import struct
import wtforms
from wtforms.validators import Length, NumberRange
from . import core
class BasicBinaryField(core.BinaryField):
# Some BinaryFields will have inherent value restrictions, based on the
# limitations of the serialized form. For example, a UInt8Field cannot
# store numbers above 0xFF. When the class is instantiated, these
# validators will be silently added to any validators provided by the
# constructor.
initial_validators = []
def __init__(self, label='', validators=None, order=None, **kwargs):
core.BinaryItem.__init__(self)
self.size = struct.calcsize(self.pack_string)
self.order = order
# Clone the initial_validators list to avoid mutating a class
# variable.
all_vldtrs = list(self.initial_validators)
if validators is not None:
all_vldtrs.extend(validators)
self.form_field = self.form_field_class(label, all_vldtrs, **kwargs)
def pack(self, data, order=None):
order = self.order or order or ''
return self.pack_data(data, order)
def pack_data(self, data, order):
return struct.pack(order + self.pack_string, data)
def unpack(self, buffer, order=None):
order = self.order or order or ''
return self.unpack_data(buffer, order)
def unpack_data(self, buffer, order):
return struct.unpack(order + self.pack_string, buffer)[0]
class CharField(BasicBinaryField):
"""
Store a single byte as a one-character ``str`` (in Python 2) or ``bytes``
object (in Python 3).
Attributes:
size: always ``1``
form_field: A :class:`wtforms.fields.StringField` instance.
"""
form_field_class = wtforms.StringField
initial_validators = [Length(min=1, max=1)]
pack_string = 'c'
class BinaryBooleanField(BasicBinaryField):
"""
Store either ``True`` or ``False`` as ``b'\\x01'`` or ``b'\\x00'``
(respectively).
Attributes:
size: always ``1``
form_field: A :class:`wtforms.fields.BooleanField` instance.
"""
form_field_class = wtforms.BooleanField
pack_string = '?'
class BinaryIntegerField(BasicBinaryField):
"""
This class should not be instantiated directly; instead, you should use
one of its subclasses, which determine what kind of int is stored, and
how. Those subclasses are:
==================== ==== =============== ================
Name size Min Max
==================== ==== =============== ================
:class:`Int8Field` 1 -128 127
:class:`UInt8Field` 1 0 255
:class:`Int16Field` 2 -32768 32767
:class:`UInt16Field` 2 0 65535
:class:`Int32Field` 4 -2\ :sup:`31` 2\ :sup:`31` - 1
:class:`UInt32Field` 4 0 2\ :sup:`32` - 1
:class:`Int64Field` 8 -2\ :sup:`63` 2\ :sup:`63` - 1
:class:`UInt64Field` 8 0 2\ :sup:`64` - 1
==================== ==== =============== ================
Attributes:
form_field: A :class:`wtforms.fields.Integerfield` instance.
"""
form_field_class = wtforms.IntegerField
@property
def initial_validators(self):
return [NumberRange(self.min, self.max)]
class Int8Field(BinaryIntegerField):
pack_string = 'b'
min = -128
max = 127
class UInt8Field(BinaryIntegerField):
pack_string = 'B'
min = 0
max = (2 ** 8) - 1
class Int16Field(BinaryIntegerField):
pack_string = 'h'
min = -(2 ** 15)
max = (2 ** 15) - 1
class UInt16Field(BinaryIntegerField):
pack_string = 'H'
min = 0
max = (2 ** 16) - 1
class Int32Field(BinaryIntegerField):
pack_string = 'i'
min = -(2 ** 31)
max = (2 ** 31) - 1
class UInt32Field(BinaryIntegerField):
pack_string = 'I'
min = 0
max = (2 ** 32) - 1
class Int64Field(BinaryIntegerField):
pack_string = 'q'
min = -(2 ** 63)
max = (2 ** 63) - 1
class UInt64Field(BinaryIntegerField):
pack_string = 'Q'
min = 0
max = (2 ** 64) - 1
class Float32Field(BasicBinaryField):
"""
Store a ``float`` in four bytes.
Attributes:
size: Always ``4``.
form_field: A :class:`wtforms.fields.FloatField` instance.
"""
form_field_class = wtforms.FloatField
pack_string = 'f'
class Float64Field(BasicBinaryField):
"""
Store a ``float`` in eight bytes.
Attributes:
size: Always ``8``.
form_field: A :class:`wtforms.fields.FloatField` instance.
"""
form_field_class = wtforms.FloatField
pack_string = 'd'
class BytesField(BasicBinaryField):
"""
Store *N* bytes.
Attributes:
max_length: Maximum number of bytes in the stored string. Note that
this may not be equal to :attr:`size`.
size: The :attr:`size` of a :class:`BytesField` with ``max_length``
*N* varies based on the *length* argument used to construct it.
If *length* is :attr:`~minform.FIXED` or
:attr:`~minform.AUTOMATIC`, ``size`` will be *N*.
If *length* is :attr:`~minform.EXPLICIT`, there will be one or
more extra bytes at the beginning of the packed data, which store
the number of bytes used by the string. This will be the smallest
number of bytes needed to store a number up to ``max_length``. So,
``size`` can be *N+1*, *N+2*, *N+4*, or *N+8*. (For more
information, see the documentation for :data:`~minform.EXPLICIT`.)
form_field: A :class:`wtforms.fields.StringField` instance.
"""
form_field_class = wtforms.StringField
def __init__(self, label='', validators=None, max_length=None,
length=core.AUTOMATIC, order=None, **kwargs):
if not isinstance(max_length, int) or max_length < 0:
raise ValueError('BytesField must be created with a '
'positive max_length keyword argument.')
self.order = order
self.length = length
self.max_length = max_length
if self.length == core.FIXED:
self.initial_validators = [Length(max=max_length, min=max_length)]
self.pack_string = '{0}s'.format(max_length)
elif self.length == core.AUTOMATIC:
self.initial_validators = [Length(max=max_length)]
self.pack_string = '{0}s'.format(max_length)
elif self.length == core.EXPLICIT:
self.initial_validators = [Length(max=max_length)]
self.length_field = store_numbers_up_to(max_length, order=order)
self.pack_string = '{0}{1}s'.format(self.length_field.pack_string,
max_length)
super(BytesField, self).__init__(label, validators, order, **kwargs)
def pack_data(self, data, order):
buffer = bytearray(self.size)
length = len(data)
if self.length == core.EXPLICIT:
pack_length_string = order + self.length_field.pack_string
struct.pack_into(pack_length_string, buffer, 0, length)
start = self.length_field.size
else:
start = 0
buffer[start:start+length] = data
return buffer
def unpack_data(self, buffer, order):
if self.length == core.EXPLICIT:
unpack_length_string = order + self.length_field.pack_string
length = struct.unpack_from(unpack_length_string, buffer)[0]
if length > self.max_length:
message = "Buffer cannot contain {0} bytes.".format(length)
raise ValueError(message)
data_buffer = buffer[self.length_field.size:]
else:
length = self.max_length
data_buffer = buffer
data = data_buffer[:length]
if self.length == core.AUTOMATIC:
data = data.rstrip(b'\x00')
return data
def store_numbers_up_to(n, signed=False, **kwargs):
"""
Return a BinaryField class that can store numbers up to a certain maximum.
If the number is too big to store, a ``ValueError`` will be raised.
Parameters:
n: The highest number that you expect to need to store (must be at
most a 64-bit integer).
signed: Return a field that can store negative numbers.
kwargs: Additional arguments get passed into the binary field
constructor.
Returns:
BinaryIntegerField: A :class:`BinaryIntegerField` that can store
numbers up to at least ``n``.
"""
if signed:
if n <= Int8Field.max:
return Int8Field(**kwargs)
elif n <= Int16Field.max:
return Int16Field(**kwargs)
elif n <= Int32Field.max:
return Int32Field(**kwargs)
elif n <= Int64Field.max:
return Int64Field(**kwargs)
else:
raise ValueError("Can't track numbers up to {0}".format(n))
else:
if n <= UInt8Field.max:
return UInt8Field(**kwargs)
elif n <= UInt16Field.max:
return UInt16Field(**kwargs)
elif n <= UInt32Field.max:
return UInt32Field(**kwargs)
elif n <= UInt64Field.max:
return UInt64Field(**kwargs)
else:
raise ValueError("Can't track numbers up to {0}".format(n))
|
print('''
Error #404
Subscribe to my telegram channel @libernet_15''')
|
import os
import sys
from git import Repo, Actor
import time, datetime
from . import ccc
closeCode = 0
months = {
"[01]": "A",
"[02]": "B",
"[03]": "C",
"[04]": "D",
"[05]": "E",
"[06]": "F",
"[07]": "G",
"[08]": "H",
"[09]": "I",
"[10]": "J",
"[11]": "K",
"[12]": "L"
}
def close(shouldPush=True, err="", fail="", note="", success="", cyan=[], alert=""):
"""
print failure,
clean workspace
exit with stderr
"""
global closeCode
if err != "":
ccc.stderr(err)
closeCode = 1
if cyan != []:
ccc.cyan(cyan[0], cyan[1])
if note != "":
ccc.note(note)
if fail != "":
ccc.fail(fail)
closeCode = 1
if alert != "":
ccc.alert(alert)
if success != "":
ccc.success(success)
# if shouldPush:
# update_Xbooksrc_transform()
if "linux" in sys.platform:
try:
os.system("rm -r -f ./Xblog/")
ccc.success("cleaning temp workspace")
except Exception as err:
ccc.fail=("cleaning temp workspace")
closeCode = 1
else:
ccc.note("aborted cleaning temp workspace")
closeCode = 1
sys.exit(closeCode)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStrings(Koan):
def test_double_quoted_strings_are_strings(self):
#is double quoted properly
string = "Hello, world."
self.assertEqual(True, isinstance(string, str))
def test_single_quoted_strings_are_also_strings(self):
#single quotes works as a string
string = 'Goodbye, world.'
self.assertEqual(True, isinstance(string, str))
def test_triple_quote_strings_are_also_strings(self):
#triple quotes are strings
string = """Howdy, world!"""
self.assertEqual(True, isinstance(string, str))
def test_triple_single_quotes_work_too(self):
#triple single quotes works as string
string = '''Bonjour tout le monde!'''
self.assertEqual(True, isinstance(string, str))
def test_raw_strings_are_also_strings(self):
#raw strings count as strings
string = r"Konnichi wa, world!"
self.assertEqual(True, isinstance(string, str))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
#single quotes on both ends and double quotations inside the single quotes counts as a string
string = 'He said, "Go Away."'
self.assertEqual('He said, "Go Away."', string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
#double quotes on both ends of string and single quote in the string works
string = "Don't"
self.assertEqual("Don't", string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
#backslash escapes the quote so the strings are equal
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
self.assertEqual(True, (a == b))
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
#backslash continues to next line, length of the string is 52 characters
string = "It was the best of times,\n\
It was the worst of times."
self.assertEqual(52, len(string))
def test_triple_quoted_strings_can_span_lines(self):
#Triple quotes strings can bu used to span multiple lines, length of string is 15
string = """
Howdy,
world!
"""
self.assertEqual(15, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
#triple quoted lines do not need to use backslash escaping as much, string a and b match
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(True, (a == b))
def test_escaping_quotes_at_the_end_of_triple_quoted_string(self):
#used single quotes on outside and double quotes inside for a valid string
string = """Hello "world\""""
self.assertEqual('Hello "world"', string)
def test_plus_concatenates_strings(self):
#concatenating two strings
string = "Hello, " + "world"
self.assertEqual("Hello, world", string)
def test_adjacent_strings_are_concatenated_automatically(self):
#automatically concatenated
string = "Hello" ", " "world"
self.assertEqual("Hello, world", string)
def test_plus_will_not_modify_original_strings(self):
#Concatenates both variables as strings
hi = "Hello, "
there = "world"
string = hi + there
self.assertEqual("Hello, ", hi)
self.assertEqual("world", there)
def test_plus_equals_will_append_to_end_of_string(self):
# += appends to end of the string
hi = "Hello, "
there = "world"
hi += there
self.assertEqual("Hello, world", hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
# += does not modify the original string
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual("Hello, ", original)
def test_most_strings_interpret_escape_characters(self):
#strings interpret escape characters, so length is equal to 1
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(1, len(string))
|
from .fileOnAES import *
from .fileOnXOR import *
from .fileOnBase64 import *
from .fileOnBase32 import *
from .fileOnBase16 import *
from .fileOnBlowfish import *
from .fileOnDES import *
from .fileOnDES3 import *
from .fileOnARC2 import *
from .fileOnARC4 import *
__all__ = ['fileOnAES','fileOnXOR','fileOnBase64','fileOnBase32','fileOnBase16','fileOnBlowfish','fileOnARC4','fileOnARC2','fileOnDES3','fileOnDES']
name = "fileCrypto" |
"""Basic Unit testing for example.py"""
import math
from random import randint
import pytest
from example import increment, COLORS
def test_increment():
"""Testing increment function"""
test_value = randint(0, 10)
assert increment(3) == 4
assert increment(-2) == -1
assert increment(2.4) == 3.4
for i in range(100):
assert increment(test_value) == test_value + 1
def test_num_colors():
"""testing colors contents"""
assert len(COLORS) ==4
def test_color_contents():
assert 'blue' in COLORS
|
#!/usr/bin/env python3
__author__ = 'Bradley Frank'
import argparse
import json
import logging
import os
import shutil
import subprocess
import sys
import urllib.request
PROJECTS = {
'Mailspring': ['Foundry376', 'Mailspring'],
'VSCodium': ['VSCodium', 'vscodium'],
}
REPO_ROOT_DIR = '/srv/repos'
REPO_COLO = 'colo'
REPOSITORIES = {
'CentOS': '7',
'Fedora': '29'
}
PACKAGES = {}
class rpm2repo:
def __init__(self, name, owner, repo, colo_dir, repolog):
self.releases_url = 'https://api.github.com/repos/' + \
owner + '/' + repo + '/releases/latest'
self.colo_dir = colo_dir
self.name = name
self.repolog = repolog
def get_latest_release(self):
# Download release feed from GitHub
try:
response = urllib.request.urlopen(self.releases_url)
except HTTPError as e:
print(self.name + ': could not download release information.')
self.repolog.log('error', e.code)
return False
except URLError as e:
print(self.name + ': could not download release information.')
self.repolog.log('error', e.reason)
return False
self.data = response.readall().decode('utf-8')
self.feed = json.loads(self.data)
# Check that feed actually has releases
if 'assets' not in self.feed:
print(self.name + ': could not find release information.')
return False
else:
self.repolog.log('info',
self.name + ': downloaded release information.')
# Search releases for RPM file
for asset in self.feed['assets']:
if asset['name'].endswith('.rpm'):
self.download_url = asset['browser_download_url']
self.rpm_name = asset['name']
self.repolog.log('info',
self.name + ': found latest release RPM.')
break
else:
print('RPM file not found.')
return False
# Append new version filename to repo directory
self.filename = os.path.join(self.colo_dir, self.rpm_name)
# Create repo directory if it doesn't exist
if not os.path.isdir(self.colo_dir):
os.makedirs(self.colo_dir, exist_ok=True)
# Skip if file already exists
if os.path.isfile(self.filename):
print(self.name + ': RPM is already at latest release.')
return False
# Download the actual RPM file
try:
response = urllib.request.urlopen(self.download_url)
except HTTPError as e:
print('Could not download release.')
self.repolog.log('error', e.code)
return False
except URLError as e:
print('Could not download release.')
self.repolog.log('error', e.reason)
return False
# Save the RPM file to disk
try:
with open(self.filename, 'wb') as f:
shutil.copyfileobj(response, f)
except IOError as e:
print(self.name + ': could not save ' + self.rpm_name + '.')
self.repolog.log('error', e)
return False
print(self.name + ': updated to latest release.')
return True
class reposyncer:
def __init__(self, os, version, repolog):
self.repo_name = os + ' ' + version
self.os = os.lower()
self.version = version
self.repolog = repolog
def reposync(self):
# Build reposync command
self.conf = os.path.join('/etc/reposyncer.d/',
self.os + '_' + self.version)
self.repo = os.path.join('/srv/repos/', self.os, self.version)
reposync_command = [
'reposync',
'-c', + self.conf,
'-p', + self.repo,
'--gpgcheck',
'--delete',
'--downloadcomps',
'--download-metadata',
'--quiet',
]
# Run the reposync process
try:
subprocess.call(reposync_command,
stdout=open(os.devnull, 'wb'),
stderr=open(os.devnull, 'wb'))
except OSError as e:
print(self.repo_name + ': error syncing repository.')
self.repolog.log('error', e)
return False
print(self.repo_name + ': successfully synced repository.')
return True
class repocreator:
def __init__(self, name, repo_dir, repolog):
self.name = name
self.colo_dir = repo_dir
self.repolog = repolog
def createrepo(self):
try:
subprocess.call(['createrepo', self.colo_dir],
stdout=open(os.devnull, 'wb'),
stderr=open(os.devnull, 'wb'))
except OSError as e:
print(self.name + ': error creating repository.')
self.repolog.log('error', e)
return False
print(self.name + ': successfully created repository.')
return True
class myLogger:
def __init__(self, debug=False):
# Logging settings
self.logger = logging.getLogger('reposyncer')
if not debug:
log_level = 0
else:
log_level = 10
self.logger.setLevel(log_level)
# Logging formats
_log_format = '[%(asctime)s] [%(levelname)8s] %(message)s'
log_format = logging.Formatter(_log_format, '%H:%M:%S')
# Adds a console handler to the logger
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(log_level)
ch.setFormatter(log_format)
self.logger.addHandler(ch)
def log(self, lvl, msg):
level = logging.getLevelName(lvl.upper())
self.logger.log(level, msg)
if __name__ == '__main__':
def _rpm2repo():
# Handle individual RPM updates
colo_dir = os.path.join(REPO_ROOT_DIR, REPO_COLO)
for name, repo in PROJECTS.items():
PACKAGES[name] = rpm2repo(name, repo[0], repo[1], colo_dir, repolog)
PACKAGES[name].get_latest_release()
cr = repocreator('Colo', colo_dir, repolog)
cr.createrepo()
def _reposyncer():
# Sync all configured repositories
pass
def _repocreator():
# Run createrepo across all repositories
pass
# Set available arguments
parser = argparse.ArgumentParser(
description='Wrapper for reposync and createrepo.')
parser.add_argument('-d', '--debug', action='store_true',
help='enables debug messages')
args = parser.parse_args()
# Configure debugging
if args.debug:
repolog = myLogger(True)
else:
repolog = myLogger(False)
# Execute desired processes
_rpm2repo()
_reposyncer()
_repocreator()
|
import os
import os, sys, shutil
sys.path.append('../')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from TrialsOfNeuralVocalRecon.data_processing.convenience_tools import timeStructured
#CDIR = os.path.dirname(os.path.realpath(__file__))
CDIR = 'C:/Users\hoss3301\work\TrialsOfNeuralVocalRecon'
EXPERIMENTS = os.path.join(CDIR, 'experiments')
#if not os.path.isdir(EXPERIMENTS): os.mkdir(EXPERIMENTS)
time_string = timeStructured()
plot_one_path = os.path.join(EXPERIMENTS, "plot_one-{}________".format(time_string))
if not os.path.isdir(plot_one_path): os.mkdir(plot_one_path)
list_experiments = [d for d in os.listdir(EXPERIMENTS) if 'experiment' in d]
list_experiments = [d for d in list_experiments if not '.zip' in d]
fusions = []
for exp in list_experiments:
exp_path = os.path.join(*[EXPERIMENTS, exp])
with open(exp_path + r'/1/run.json', 'r') as inf:
dict_string = inf.read().replace('false', 'False').replace('null', 'None').replace('true', 'True')
run_dict = eval(dict_string)
with open(exp_path + r'/1/config.json', 'r') as inf:
dict_string = inf.read().replace('false', 'False').replace('null', 'None').replace('true', 'True')
config_dict = eval(dict_string)
print()
print(config_dict['fusion_type'])
print(run_dict['result'])
fusions.append(config_dict['fusion_type'])
metrics = list(run_dict['result'].keys())
metrics.remove('loss')
fusions = sorted(np.unique(fusions).tolist())
print(metrics)
print(fusions)
data = np.zeros((len(metrics), len(fusions)))
for exp in list_experiments:
try:
exp_path = os.path.join(*[EXPERIMENTS, exp])
with open(exp_path + r'/1/run.json', 'r') as inf:
dict_string = inf.read().replace('false', 'False').replace('null', 'None').replace('true', 'True')
run_dict = eval(dict_string)
with open(exp_path + r'/1/config.json', 'r') as inf:
dict_string = inf.read().replace('false', 'False').replace('null', 'None').replace('true', 'True')
config_dict = eval(dict_string)
fusion = config_dict['fusion_type']
f_i = fusions.index(fusion)
for metric in metrics:
m_i = metrics.index(metric)
data[m_i, f_i] = run_dict['result'][metric]
except Exception as e:
print(e)
ld = len(metrics)
lm = len(fusions)
width = 1 / lm - .05
X = np.arange(ld)
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
for i in range(lm):
ax.bar(X + i * width, data.T[i], width=width)
ax.set_ylabel('accuracy')
plt.xticks(X + lm * width / 2, metrics)
fusions = [f.replace('_', '') for f in fusions]
ax.legend(labels=fusions)
plt.savefig(os.path.join(plot_one_path, 'plot_bars_accs.png'), bbox_inches="tight")
|
"""
bbo.py
Defines bbo
"""
__version__ = '1.0'
__author__ = 'Hugo Chauvary'
__email__ = '[email protected]'
from module import logger
from pyspark.sql import SparkSession
from pyspark.sql.types import Row
from module.data import Data
from module.logger import *
from util.util import get_df_types
from util.util import log_item
class Bbo:
def __init__(self, spark: SparkSession) -> None:
"""
Class constructor
args:
- spark: spark session
attributes:
- spark: spark session
- bids: collection of bids, sspark df
- asks: collection of asks, spark df
- l1: collection of bbos, spark df
"""
self.spark = spark
self.bids = spark.createDataFrame([], get_df_types('bbo'))
self.asks = spark.createDataFrame([], get_df_types('bbo'))
self.l1 = spark.createDataFrame([], get_df_types('l1_data'))
@log_item
def update(self, record: Row):
"""
Update bbo
attributes:
- record: l3 formatted record
"""
logger = Logger().logger
logger.info(f'processing seq_num {record.seq_num}')
bids_count = self.bids.count()
asks_count = self.asks.count()
bids_first = self.bids.first()
asks_first = self.asks.first()
# case 1: new order/buy side
if record.add_side == 'BUY':
# create bid record
vals = [(
record.time,
record.add_price,
record.add_qty,
record.add_order_id,
record.seq_num
)]
bid = self.spark.createDataFrame(vals, get_df_types('bbo'))
# condition to create a bbo entry
# new highest buy price
if ((bids_count > 0 and record.add_price > bids_first.price) \
or bids_count == 0) \
and asks_count > 0:
vals = [(
record.time,
record.add_price,
asks_first.price,
record.add_qty,
asks_first.size,
record.seq_num
)]
bbo = self.spark.createDataFrame(vals, get_df_types('l1_data'))
# bbo.show()
self.l1 = self.l1.union(bbo)
# self.bids.show()
# limit(2) because we do not need to store all bids
# improves performance
# TODO: test
self.bids = self.bids.limit(2).union(bid)
self.bids = self.bids.sort(self.bids.price.desc())
# self.bids.show()
# case 2: new order/sell side
if record.add_side == 'SELL':
# create ask record
vals = [(
record.time,
record.add_price,
record.add_qty,
record.add_order_id,
record.seq_num
)]
ask = self.spark.createDataFrame(vals, get_df_types('bbo'))
# condition to create a bbo entry
# new lowest sell price
if ((asks_count > 0 and record.add_price < asks_first.price) \
or asks_count == 0) \
and bids_count > 0:
vals = [(
record.time,
bids_first.price,
record.add_price,
bids_first.size,
record.add_qty,
record.seq_num
)]
bbo = self.spark.createDataFrame(vals, get_df_types('l1_data'))
# bbo.show()
self.l1 = self.l1.union(bbo)
self.asks = self.asks.limit(2).union(ask)
self.asks = self.asks.sort(self.asks.price.asc())
# self.asks.show()
# case 3: deleted order/buy side
if record.delete_side == 'BUY':
order_id = bids_first.order_id
self.bids = self.bids.filter(self.bids.order_id != record.delete_order_id)
bids_first = self.bids.first()
# condition to create a bbo entry
# deleted highest buy price
if order_id == record.delete_order_id:
vals = [(
record.time,
bids_first.price,
asks_first.price,
bids_first.size,
asks_first.size,
record.seq_num
)]
bbo = self.spark.createDataFrame(vals, get_df_types('l1_data'))
# bbo.show()
self.l1 = self.l1.union(bbo)
# case 4: deleted order/sell side
if record.delete_side == 'SELL':
order_id = asks_first.order_id
self.asks = self.asks.filter(self.asks.order_id != record.delete_order_id)
asks_first = self.asks.first()
# condition to create a bbo entry
# deleted lowest sell price
if order_id == record.delete_order_id:
vals = [(
record.time,
bids_first.price,
asks_first.price,
bids_first.size,
asks_first.size,
record.seq_num
)]
bbo = self.spark.createDataFrame(vals, get_df_types('l1_data'))
# bbo.show()
self.l1 = self.l1.union(bbo)
# case 5: updated order/buy side
if record.update_side == 'BUY':
order_id = bids_first.order_id
# delete bid
self.bids = self.bids.filter(self.bids.order_id != record.update_order_id)
# condition to create a bbo entry
# updated highest buy price
# TODO: check if remains highest price
if order_id == record.update_order_id:
vals = [(
record.time,
record.add_price,
asks_first.price,
record.add_qty,
asks_first.size,
record.seq_num
)]
bbo = self.spark.createDataFrame(vals, get_df_types('l1_data'))
# bbo.show()
self.l1 = self.l1.union(bbo)
# create updated bid
vals = [(
record.time,
record.update_price,
record.update_qty,
record.update_order_id,
record.seq_num
)]
bid = self.spark.createDataFrame(vals, get_df_types('bbo'))
# add bid and reorder
self.bids = self.bids.limit(2).union(bid)
self.bids = self.bids.sort(self.bids.price.desc())
# case 6: updated order/sell side
if record.update_side == 'SELL':
order_id = asks_first.order_id
# delete ask
self.asks = self.asks.filter(self.asks.order_id != record.update_order_id)
# condition to create a bbo entry
# updated lowest sell price
# TODO: check if remains lowest price
if order_id == record.update_order_id:
vals = [(
record.time,
bids_first.price,
record.add_price,
bids_first.size,
record.add_qty,
record.seq_num
)]
bbo = self.spark.createDataFrame(vals, get_df_types('l1_data'))
# bbo.show()
self.l1 = self.l1.union(bbo)
# create updated ask
# add to asks and reorder
vals = [(
record.time,
record.update_price,
record.update_qty,
record.update_order_id,
record.seq_num
)]
ask = self.spark.createDataFrame(vals, get_df_types('bbo'))
self.asks = self.asks.limit(2).union(ask)
self.asks = self.asks.sort(self.asks.price.desc())
# self.l1.show()
return self.l1 |
from unittest import TestCase
from moceansdk import RequiredFieldException
from moceansdk.modules.command.mc_object.send_sms import SendSMS
class TestTgSendText(TestCase):
def testParams(self):
params = {
"action": "send-sms",
"from": {
"type": "phone_num",
"id": "123456789"
},
"to": {
"type": "phone_num",
"id": "987654321"
},
"content": {
"type": "text",
"text": "test text"
}
}
req = SendSMS()
req.set_from("123456789")
req.set_to("987654321")
req.set_content("test text")
self.assertEqual(params, req.get_request_data())
def test_if_action_auto_defined(self):
params = {
"action": "send-sms",
"from": {
"type": "phone_num",
"id": "123456789"
},
"to": {
"type": "phone_num",
"id": "987654321"
},
"content": {
"type": "text",
"text": "test text"
}
}
self.assertEqual('send-sms', SendSMS(
params).get_request_data()['action'])
self.assertEqual('text', SendSMS(
params).get_request_data()['content']['type'])
def test_if_required_field_not_set(self):
try:
SendSMS().get_request_data()
self.fail()
except RequiredFieldException:
pass
|
from userbot.utils import register
@register(outgoing=True, pattern="^.me$")
async def join(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("I AM OUWNER OF EYE GANG /n/n IF ANY YOUR CHANNEL SO MAKE ME ADMIN IN POST PREMIUM ACCOUNT /n/n MY RULES /n/n * NO SEELING /n * NO PROMOTION /n * AND MORE RULES")
|
import numpy as np
import pandas as pd
from scipy.signal import savgol_filter
from plaster.tools.utils import utils, stats
from plaster.tools.schema import check
def radiometry_histogram_analyzer(sig):
"""
This is a bespoke histogram analyzer for radiometry to extract certain guesses.
Assumptions:
* There will be a dominante peak with a mean near zero and easily separated
from a one peak which is above 3 stdevs of the zero peak
* The one peak will be most dominant at the last cycle
* The zero peak will have a negative side that is essentially uncontaminated
by signal.
"""
# REMOVE 0s -- these are the result of failures
sig = sig.flatten()
sig = sig[sig != 0.0]
lft, rgt = np.percentile(sig, (0.1, 97.0))
# Use the negative values to estimate the sigma of the zero peak
zero_sigma = stats.half_nanstd(sig)
if zero_sigma == 0.0:
z.hist(sig)
print("ERROR: Unable to determine beta on channel")
return 0.0, 1.0, 0.0, [0.0], [0.0], 0.0, 0.0, 0.0
# Go n-stds on the right side of the zero peak
zero_hist_right_side_thresh = 3.0 * zero_sigma
zero_bins = np.linspace(lft, zero_hist_right_side_thresh, 200)
zero_hist, zero_edges = np.histogram(
sig[sig < zero_hist_right_side_thresh], bins=zero_bins
)
zero_edges = zero_edges[1:]
top = np.max(zero_hist)
zero_mu = zero_edges[np.argmax(zero_hist)]
rgt = np.percentile(sig, 97)
one_bins = np.linspace(zero_hist_right_side_thresh, rgt, 200)
one_hist, one_edges = np.histogram(
sig[sig > zero_hist_right_side_thresh], bins=one_bins
)
one_edges = one_edges[1:]
# Smooth this with a savgol filter
one_filt = savgol_filter((one_edges, one_hist), window_length=27, polyorder=3)
top = np.max(one_hist)
beta = one_edges[np.argmax(one_filt[1])]
return zero_mu, zero_sigma, beta, one_edges, one_filt[1], lft, rgt, top
# def beta_per_channel(sig):
# check.array_t(sig, ndim=3)
# if report_params.get("beta_per_channel"):
# # This is a hack to allow manually setting for situations
# # where radiometry_histogram_analyzer() isn't doing well
# print("USING USER SPECIFIED BETA PER CHANNEL")
# beta = np.array(report_params.get("beta_per_channel"))
# else:
# beta = np.zeros((n_channels))
# for ch_i in range(n_channels):
# _, _, _beta, _, _, _, _, _ = radiometry_histogram_analyzer(sig[:, ch_i])
# beta[ch_i] = _beta
# return np.nan_to_num(beta)
def field_quality(ims_import, sigproc_v2, field_quality_thresh):
"""
Builds up a (field, channel) DataFrame with quality and alignment
Arguments:
ims_import: ImsImportResult
sigproc_v2: SigprocV2Result
field_quality_thresh: Need a way to auto-tune this
Returns:
field_df: (field_i, channel_i, alignment, mean_quality, good_field_alignment, good_field_quality)
field_align_thresh: float (derived from the image size)
"""
field_df = sigproc_v2.fields().copy()
n_fields = sigproc_v2.n_fields
assert n_fields == field_df.field_i.nunique()
index = ["field_i", "channel_i"]
# ALIGNMENT: Max field alignment is 10% of the width or height of the import image
# It might be possible to increase this but as the alignment gets worse it begins
# to break the assumption that the aln coordinates can be used to look up
# spatial calibration information such as the regional PSF.
field_align_thresh = int(0.15 * ims_import.dim)
field_df["alignment"] = np.sqrt(field_df.aln_x ** 2 + field_df.aln_y ** 2)
field_df = field_df.set_index(index)
field_df = field_df.groupby(index).alignment.max().reset_index()
# MEAN QUALITY (each channel, all cycles)
qual_df = ims_import.qualities()
if len(qual_df) == 0:
# If there is no quality data from ims_import then create one with all NaNs
qual_df = field_df.copy()[index]
qual_df["quality"] = np.nan
qual_df = qual_df.groupby(index).mean()[["quality"]]
field_df = field_df.set_index(index).join(qual_df)
field_df["good_field_alignment"] = field_df.alignment < field_align_thresh
field_df["good_field_quality"] = field_df.quality > field_quality_thresh
if np.all(np.isnan(field_df.quality)):
field_df.good_field_quality = True
return field_df.reset_index(), field_align_thresh
def dark_thresh_per_channel(sig, dark_thresh_in_stds=4.0):
"""
Find the dark threshold (the intensity of transition from dark to light)
by computing a one-sided std on each channel
Arguments:
sig: ndarray(n_peaks, n_channels, n_cycles)
Returns:
dark_thresh_per_ch: ndarray(n_channels)
The estimated intensity threshold between off and on
"""
check.array_t(sig, ndim=3)
n_channels = sig.shape[1]
dark_thresh_per_ch = np.zeros((n_channels,))
for ch_i in range(n_channels):
zero_sigma_est = stats.half_nanstd(sig[:, ch_i].flatten())
dark_thresh_per_ch[ch_i] = dark_thresh_in_stds * zero_sigma_est
return dark_thresh_per_ch
def features(ims_import, sigproc_v2, dark_thresh_in_stds):
"""
Extract a variety of features for every peak
Arguments:
ims_import: ImsImportResult
sigproc_v2: SigprocV2Result
Returns:
per_peak_df: Dataframe peak traits independent of channel (one row per peak)
ch_peak_df: Dataframe peak traits by channel (one row per peak per channel)
TO DO:
Consider parallelization
"""
from plaster.run.prep.prep_worker import triangle_dytmat
from scipy.spatial.distance import cdist
per_peak_df = sigproc_v2.peaks()
# Convenience aliases
n_channels = sigproc_v2.n_channels
n_cycles = sigproc_v2.n_cycles
n_peaks = per_peak_df.peak_i.max() + 1
assert len(per_peak_df) == n_peaks
im_mea = ims_import.dim
# Merge in stage metadata
if ims_import.has_metadata():
meta_df = ims_import.metadata()
column_names = ["field_i", "stage_x", "stage_y"]
if all([col in meta_df for col in column_names]):
stage_df = meta_df[column_names].groupby("field_i").mean()
per_peak_df = pd.merge(
left=per_peak_df, right=stage_df, left_on="field_i", right_on="field_i"
)
per_peak_df["flowcell_x"] = per_peak_df.stage_x + per_peak_df.aln_x
per_peak_df["flowcell_y"] = per_peak_df.stage_y + per_peak_df.aln_y
per_peak_df["radius"] = np.sqrt(
(per_peak_df.aln_x - im_mea // 2) ** 2 + (per_peak_df.aln_y - im_mea // 2) ** 2
)
sig = sigproc_v2.sig()
dark_thresh_per_ch = dark_thresh_per_channel(sig, dark_thresh_in_stds)
per_ch_dfs = []
for ch_i in range(n_channels):
dark_thresh = dark_thresh_per_ch[ch_i]
ch_sig = sig[:, ch_i, :]
ch_noi = sigproc_v2.noi()[:, ch_i, :]
# has_neighbor_stats = run.sigproc_v2.has_neighbor_stats()
# if has_neighbor_stats:
# try:
# nei = run.sigproc_v2.neighborhood_stats()
# assert nei.shape[0] == n_peaks
# # There's an issue here on some fields that have no neighbor info
# except TypeError:
# has_neighbor_stats = False
# "Lifespan" is the cycles over which a peak is "on". Abbreviated "lif"
# Use the cosine distance to determine lif_len. This is based on trying
# practically every distance metric in the cdist family and seeing that
# cosine tends to do the best job. There is likely a better theoretical
# understanding to be had for this. The main goal is to approximately
# assign row lengths noisy noisy rows like:
# [1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
# Ie, is that length 1 or length 7? 7 Seems more reasonable and that
# would be the result of the cosine distance.
dyts1 = triangle_dytmat(n_cycles, n_dyes=1, include_nul_row=False)
dyt1_dists = cdist(ch_sig > dark_thresh, dyts1, "cosine")
# life length is the measured by the arg minimum cdist along each row
# But we need to add one because the triangle_dytmat does not include
# the nul row (all zeros) thus the dyts1[0] has length 1, not 0
lif_len = np.argmin(dyt1_dists, axis=1) + 1
# Extract signal during and after the lifetime ("afl" = "afterlife")
row_iz, col_iz = np.indices(ch_sig.shape)
sig_lif = np.where(col_iz < lif_len[:, None], ch_sig, np.nan)
sig_afl = np.where(col_iz >= lif_len[:, None], ch_sig, np.nan)
def stats(mat, prefix):
with utils.np_no_warn():
return pd.DataFrame(
{
f"{prefix}_med": np.nanmedian(mat, axis=1),
f"{prefix}_men": np.nanmean(mat, axis=1),
f"{prefix}_std": np.nanstd(mat, axis=1),
f"{prefix}_iqr": np.subtract(
*np.nanpercentile(mat, [75, 25], axis=1)
),
f"{prefix}_max": np.nanmax(mat, axis=1),
f"{prefix}_min": np.nanmin(mat, axis=1),
}
)
ch_peak_df = pd.DataFrame(
dict(
peak_i=per_peak_df.peak_i,
field_i=per_peak_df.field_i,
channel_i=ch_i,
lif_len=lif_len,
noi_cy0=ch_noi[:, 0],
dark_cy0=ch_sig[:, 0] <= dark_thresh,
)
)
ch_peak_df = pd.concat(
(ch_peak_df, stats(sig_lif, "lif"), stats(sig_afl, "afl"),), axis=1,
)
# Multi-channel can have zero-length lives in SOME channels but not others.
# This is because the peak finder only requires that ONE channel have
# non-zero signal. But the above calculations for lif_len
# will not handle this situation as it doesn't (and must not) include
# the "all zeros (aka nul) row".
# Thus, the lif_len will report length 1 when the true length is 0
# for those channels with no signal at cycle 0.
# Thus, we have to detect this situation by looking
# for rows with lif_len == 1 where cy[0] value is very low.
true_lif_0 = ch_peak_df[
(ch_peak_df.lif_len == 1) & (ch_peak_df.lif_men < dark_thresh)
]
ch_peak_df.loc[true_lif_0.peak_i, "lif_len"] = 0.0
ch_peak_df.loc[true_lif_0.peak_i][
["lif_med", "lif_men", "lif_std", "lif_iqr", "lif_max", "lif_min"]
] = 0.0
per_ch_dfs += [ch_peak_df]
return per_peak_df, pd.concat(per_ch_dfs)
def _noise_thresh_one_channel(noi_cy0, noi_thresh_in_stds=2.5):
"""
Use a filter to smooth the histogram of a noise distribution
and return a threshold based on noi_thresh_in_stds to the right
of the peak.
"""
check.array_t(noi_cy0, ndim=1)
bins = np.linspace(0, np.percentile(noi_cy0, 99), 200)
# Smooth this with a savgol filter and use the main peak
# to one-sideds estimate the std
_hist, _edges = np.histogram(noi_cy0, bins=bins)
_edges = _edges[1:]
_filt = savgol_filter((_edges, _hist), window_length=11, polyorder=3)
_filt = _filt[1]
center = _edges[np.argmax(_filt)]
std = stats.half_nanstd(noi_cy0, mean=center)
return center + noi_thresh_in_stds * std
def noise(ch_peak_df, noi_thresh_in_stds=2.5):
"""
Get the noise and thresholds for each channel.
Returns:
noi_cy0_per_ch: list(ndarray)
noi_thresh_per_ch: ndarray(n_channels)
"""
n_channels = ch_peak_df.channel_i.nunique()
noi_cy0_per_ch = [None] * n_channels
noi_thresh_per_ch = np.zeros((n_channels,))
for ch_i in range(n_channels):
noi_cy0_per_ch[ch_i] = ch_peak_df[
(ch_peak_df.channel_i == ch_i) & (ch_peak_df.dark_cy0 == 0)
].noi_cy0
noi_thresh_per_ch[ch_i] = _noise_thresh_one_channel(
noi_cy0_per_ch[ch_i].values, noi_thresh_in_stds=noi_thresh_in_stds
)
return noi_cy0_per_ch, noi_thresh_per_ch
def monotonic(bal_sig, beta, lif_len, monotonic_threshold=1.0):
"""
Examine a cycle-balanced radat (one channel) for the
maximum increase in intensity per row and normalize
by beta. This puts it roughly into units of dye-count.
Arguments:
bal_sig: ndarray(n_peaks, n_cycle). Cycle balanced
beta: float. approximate intensity per dye
lif_len: ndarray(n_peaks). lifespan of each row in cycles
monotonic_threshold: float. In dye count units, max increase alloed
Returns:
monotonic_metric: ndarray((n_peaks)). Max increase in any cycle for each peak in dye counts
good_mask: ndarray((n_peaks), dtype=bool).
Where monotonic_metric > monotonic_threshold and life_len > 1 and first cycle is not dark
"""
check.array_t(bal_sig, ndim=2)
check.t(beta, float)
check.array_t(lif_len, ndim=1)
check.t(monotonic_threshold, float)
assert len(lif_len) == bal_sig.shape[0]
_, col_iz = np.indices(bal_sig.shape)
sig_lif = np.where(col_iz < lif_len[:, None], bal_sig, np.nan)
with utils.np_no_warn():
d = np.diff(sig_lif, append=0.0, axis=1)
maxs_diff = np.nanmax(d, axis=1)
monotonic_metric = maxs_diff / beta
monotonic_metric_exceeds_thresh_mask = monotonic_metric > monotonic_threshold
lif_gt_1_mask = lif_len > 1
starts_high_mask = bal_sig[:, 0] > 0.8 * beta
good_mask = ~(
monotonic_metric_exceeds_thresh_mask & lif_gt_1_mask & starts_high_mask
)
return monotonic_metric, good_mask
def _sig_in_range(sigproc_v2):
"""
Returns a mask indicating which signals are in range. "In range" means
able to be represented as np.float32 for the moment. The radmat is float64,
but some downstream ops (e.g. classify_rf) want to represent as float32.
Or we could just truncate these signals? But these signals really are
probably bad.
"""
finfo = np.finfo(np.float32)
max_allowed = finfo.max
min_allowed = finfo.min
radmat = utils.mat_flatter(sigproc_v2.sig())
peak_max = np.max(radmat, axis=1)
peak_min = np.min(radmat, axis=1)
in_range_mask = (peak_min > min_allowed) & (peak_max < max_allowed)
return in_range_mask
def build_filter_df(sigproc_v2, field_df, per_peak_df, ch_peak_df, noi_thresh):
_field_df = (
field_df.groupby("field_i")
.agg(dict(good_field_alignment=np.nanmin, good_field_quality=np.nanmin))
.reset_index()
)
df = per_peak_df.merge(
right=_field_df[["field_i", "good_field_alignment", "good_field_quality"]],
how="inner",
on="field_i",
)
# nanmax across the channels on each peak to get the highest noise at cycle 0
# Note, this is assuming that all channels have relatively similar
# noise profiles. If this is not true then we need a more complicated
# calculation where we look at the distance of the noise compared
# to something like a z-score. For now, I'm using a single threshold
# in common for all channels
max_noi0_over_all_channels = (
ch_peak_df.groupby("peak_i").agg(dict(noi_cy0=np.nanmax)).reset_index()
)
max_noi0_over_all_channels.rename(columns=dict(noi_cy0="max_noi0"), inplace=True)
df = df.set_index("peak_i").join(max_noi0_over_all_channels.set_index("peak_i"))
df["good_noi"] = df.max_noi0 < noi_thresh
# TODO: Monotonic?
# for ch_i in range(sigproc_v2.n_channels):
# sig = sigproc_v2.sig()[:, ch_i, :]
# _df = ch_peak_df[ch_peak_df.channel_i == ch_i]
# monotonic_metric, monotonic_good_mask = monotonic(
# sig, beta_per_channel[ch_i], _df.lif_len.values, monotonic_threshold=monotonic_threshold
# )
# df["good_monotonic_any_ch"] = ch_peak_df.groupby("peak_i").agg({"good_monotonic": [np.nanmax]}).values.flatten()
# df["good_monotonic_all_ch"] = ch_peak_df.groupby("peak_i").agg({"good_monotonic": [np.nanmin]}).values.flatten()
# TODO: SNR? (Consider using structure of sig vs noi)
# _snr = run.sigproc_v2.snr()[all_fea_df.peak_i, :, 0]
# all_fea_df["good_snr"] = np.all(_snr[:, :] > ch_valley, axis=1)
# all_fea_df.pass_all = all_fea_df.pass_all & all_fea_df.good_snr
# TODO: how best to handle out-of-range radmat values? For now
# just reject signal that is outside the boundaries of float32
df["sig_in_range"] = _sig_in_range(sigproc_v2)
df["pass_quality"] = (
df.good_field_alignment & df.good_field_quality & df.good_noi & df.sig_in_range
)
return df.reset_index()
def cycle_balance():
raise NotImplementedError
def radmat_filter_mask(rad_filter_result):
"""
Return a mask indicating which radmat rows pass the most recent application of
filtering as represented by the "pass_quality" column of RadFilterResult.filter_df.
"""
return (rad_filter_result.filter_df.pass_quality).astype(bool).values
|
import json
import requests
import os
import data
# Sends api request
def WarframeAPIRequest():
url = ("https://ws.warframestat.us/pc")
r = requests.get(url)
apidata = r.json()
#Directory of the locally saved worldstate json
basedir = os.path.abspath(os.path.dirname(__file__))
data_json = basedir+'\world.json'
#Open and replace the current worldstate with the newly aquired json
with open(data_json, 'w') as outfile:
json.dump(apidata, outfile)
#After data has been replaced open the worldstate json
with open(data_json, encoding="utf8") as f:
cacheddata = json.load(f)
#Return the content of the json for parsing
return cacheddata
#Save content of json into a variable, too lazy atm to add checks to function so this will save time and api calls
WorldStateData = WarframeAPIRequest()
# Retrieves the sorties information
def RetrieveSorties():
#Gets the data from the WorldStateData variable created above and filters out the sortie part which we need.
Sorties = WorldStateData["sortie"]
return Sorties
# Retrieves alert information
def RetrieveAlerts():
Alerts = WorldStateData["alerts"]
return Alerts
# Retrieves alert information
def RetrieveFissures():
fissures = WorldStateData["fissures"]
return fissures
# Retrieves the certus time cycle
def RetrieveCetusCycle():
TimeCycle = WorldStateData["cetusCycle"]
return TimeCycle
# Retrieves the certus time cycle
def RetrieveEarthCycle():
TimeCycle = []
TimeCycle.append(WorldStateData["earthCycle"]["timeLeft"])
TimeCycle.append(WorldStateData["earthCycle"]["isDay"])
return TimeCycle
|
from base64 import b64encode
import base64
from typing import Optional
from hashlib import md5
from .utils import ChecksumError
try:
import crcmod
except ImportError:
crcmod = None
class ConsistencyChecker:
def __init__(self):
pass
def update(self, data: bytes):
pass
def validate_json_response(self, gcs_object):
pass
def validate_headers(self, headers):
pass
def validate_http_response(self, r):
pass
class MD5Checker(ConsistencyChecker):
def __init__(self):
self.md = md5()
def update(self, data):
self.md.update(data)
def validate_json_response(self, gcs_object):
mdback = gcs_object["md5Hash"]
if b64encode(self.md.digest()) != mdback.encode():
raise ChecksumError("MD5 checksum failed")
def validate_headers(self, headers):
if headers is not None and "X-Goog-Hash" in headers:
dig = [
bit.split("=")[1]
for bit in headers["X-Goog-Hash"].split(",")
if bit.split("=")[0] == "md5"
]
if dig:
if b64encode(self.md.digest()).decode().rstrip("=") != dig[0]:
raise ChecksumError("Checksum failure")
else:
raise NotImplementedError(
"No md5 checksum available to do consistency check. GCS does "
"not provide md5 sums for composite objects."
)
def validate_http_response(self, r):
return self.validate_headers(r.headers)
class SizeChecker(ConsistencyChecker):
def __init__(self):
self.size = 0
def update(self, data: bytes):
self.size += len(data)
def validate_json_response(self, gcs_object):
assert int(gcs_object["size"]) == self.size, "Size mismatch"
def validate_http_response(self, r):
assert r.content_length == self.size
class Crc32cChecker(ConsistencyChecker):
def __init__(self):
self.crc32c = crcmod.Crc(0x11EDC6F41, initCrc=0, xorOut=0xFFFFFFFF)
def update(self, data: bytes):
self.crc32c.update(data)
def validate_json_response(self, gcs_object):
# docs for gcs_object: https://cloud.google.com/storage/docs/json_api/v1/objects
digest = self.crc32c.digest()
digest_b64 = base64.b64encode(digest).decode()
expected = gcs_object["crc32c"]
if digest_b64 != expected:
raise ChecksumError(f'Expected "{expected}". Got "{digest_b64}"')
def validate_headers(self, headers):
if headers is not None:
hasher = headers.get("X-Goog-Hash", "")
crc = [h.split("=", 1)[1] for h in hasher.split(",") if "crc32c" in h]
if not crc:
raise NotImplementedError("No crc32c checksum was provided by google!")
if crc[0] != b64encode(self.crc32c.digest()).decode():
raise ChecksumError()
def validate_http_response(self, r):
return self.validate_headers(r.headers)
def get_consistency_checker(consistency: Optional[str]) -> ConsistencyChecker:
if consistency == "size":
return SizeChecker()
elif consistency == "md5":
return MD5Checker()
elif consistency == "crc32c":
if crcmod is None:
raise ImportError(
"The python package `crcmod` is required for `consistency='crc32c'`. "
"This can be installed with `pip install gcsfs[crc]`"
)
else:
return Crc32cChecker()
else:
return ConsistencyChecker()
|
from unittest import TestCase
from xrpl.models.requests import Fee
class TestRequest(TestCase):
def test_to_dict_includes_method_as_string(self):
tx = Fee()
value = tx.to_dict()["method"]
self.assertEqual(type(value), str)
|
from flask import request, json, Blueprint, g
from ..models.comment import CommentModel, CommentSchema
from .user_view import custom_response
from ..shared.authentication import Auth
comment_api = Blueprint('comment_api', __name__)
comment_schema = CommentSchema()
@comment_api.route('/', methods=['POST'])
@Auth.auth_required
def create_comment():
'''
creates a comment associated
with the owner's id, also
requires the doc's id
'''
req_data = request.get_json()
req_data['owner_id'] = g.user['id']
if not req_data.get('doc_id'):
return custom_response('missing doc id', 400)
data, error = comment_schema.load(req_data)
if error:
return custom_response(error, 404)
comment = CommentModel(data)
comment.save()
com_data = comment_schema.dump(comment).data
return custom_response(com_data, 201)
@comment_api.route('/doc/<int:doc_id>', methods=['GET'])
@Auth.auth_required
def get_all_comments_by_doc_id(doc_id):
comments = CommentModel.get_all_comments_by_doc_id(doc_id)
data = comment_schema.dump(comments, many=True).data
return custom_response(data, 200)
@comment_api.route('/<int:comment_id>', methods=['GET', 'DELETE', 'PUT'])
@Auth.auth_required
def comment_actions(comment_id):
'''
Allows for update, delete,
and get methods on a single
comment, update and delete
only allowed if user_id from g
matches comments owner_id
'''
req_data = request.get_json()
comment = CommentModel.get_comment_by_id(comment_id)
data = comment_schema.dump(comment).data
if not comment:
return custom_response({'error': 'comment not found'}, 404)
if request.method == 'PUT':
if data.get('owner_id') != g.user.get('id'):
return custom_response({'error': 'permission denied'}, 400)
data, error = comment_schema.load(req_data, partial=True)
if error:
return custom_response(error, 400)
comment.update(data)
data = comment_schema.dump(comment).data
return custom_response(data, 200)
elif request.method == 'DELETE':
if data.get('owner_id') != g.user.get('id'):
return custom_response({'error': 'permission denied'}, 400)
comment.delete()
return custom_response({'message': 'comment deleted'}, 204)
# GET
return custom_response(data, 200)
|
binary = ["0111111","0001010","1011101","1001111","1101010","1100111","1110111","0001011","1111111","1101011"]
two2ten =[int(i,2) for i in binary]
def f(S):
k = 0
for i in range(0,len(S),3):
t = int(S[i:i+3])
k *=10
k += two2ten.index(t)
return k
while True:
a=input()
if a == 'BYE':
break
a=a.replace('=','')
A,B=a.split("+")
S= f(A)+f(B)
print(a,end="=")
s=""
while(S>0):
s += "%03d" %(two2ten[S%10])
S//=10
for i in range(len(s)-1,-1,-3):
print(s[i-2]+s[i-1]+s[i],end="")
print() |
'''
Created on 2022-01-24
@author: wf
'''
import unittest
from tests.basetest import BaseTest
from osprojects.osproject import OsProject, Commit, Ticket, main, GitHub, gitlog2wiki
class TestOsProject(BaseTest):
'''
test the OsProject concepts
'''
def testOsProject(self):
'''
tests if the projects details, commits and issues/tickets are correctly queried
'''
osProject=self.getSampleById(OsProject,"id", "pyOpenSourceProjects")
tickets=osProject.getAllTickets()
expectedTicket=self.getSampleById(Ticket, "number", 2)
expectedTicket.project=osProject
self.assertDictEqual(expectedTicket.__dict__, tickets[-2].__dict__)
commit=Commit()
ticket=Ticket()
pass
def testGetCommits(self):
"""
tests extraction of commits for a repository
"""
if self.inPublicCI():
return
osProject=self.getSampleById(OsProject,"id", "pyOpenSourceProjects")
commits = osProject.getCommits()
expectedCommit = self.getSampleById(Commit, "hash", "106254f")
self.assertTrue(len(commits)>15)
self.assertDictEqual(expectedCommit.__dict__, commits[0].__dict__)
def testCmdLine(self):
"""
tests cmdline of osproject
"""
testParams=[
["-o", "WolfgangFahl", "-p", "pyOpenSourceProjects", "-ts", "github"],
["--repo"],
]
for params in testParams:
output=self.captureOutput(main, params)
self.assertTrue(len(output.split("\n"))>=2) # test number of Tickets
self.assertIn("{{Ticket", output)
def testGitlog2IssueCmdline(self):
"""
tests gitlog2issue
"""
if self.inPublicCI():
return
commit = self.getSampleById(Commit, "hash", "106254f")
expectedCommitMarkup = commit.toWikiMarkup()
output=self.captureOutput(gitlog2wiki)
outputLines=output.split("\n")
self.assertTrue(expectedCommitMarkup in outputLines)
class TestGitHub(BaseTest):
"""
tests GitHub class
"""
def testResolveProjectUrl(self):
"""
tests the resolving of the project url
"""
urlCases=[
{
"owner": "WolfgangFahl",
"project": "pyOpenSourceProjects",
"variants": [
"https://github.com/WolfgangFahl/pyOpenSourceProjects",
"http://github.com/WolfgangFahl/pyOpenSourceProjects",
"[email protected]:WolfgangFahl/pyOpenSourceProjects",
]
}, {
"owner": "ad-freiburg",
"project": "qlever",
"variants": [
"https://github.com/ad-freiburg/qlever"
]
}
]
for urlCase in urlCases:
urlVariants=urlCase["variants"]
expectedOwner=urlCase["owner"]
expectedProject=urlCase["project"]
for url in urlVariants:
giturl=f"{url}.git"
owner, project = GitHub.resolveProjectUrl(giturl)
self.assertEqual(expectedOwner, owner)
self.assertEqual(expectedProject, project)
class TestCommit(BaseTest):
"""
Tests Commit class
"""
def testToWikiMarkup(self):
"""
tests toWikiMarkup
"""
commit=self.getSampleById(Commit, "hash", "106254f")
expectedMarkup="{{commit|host=https://github.com/WolfgangFahl/pyOpenSourceProjects|path=|project=pyOpenSourceProjects|subject=Initial commit|name=GitHub|date=2022-01-24 07:02:55+01:00|hash=106254f|storemode=subobject|viewmode=line}}"
self.assertEqual(expectedMarkup, commit.toWikiMarkup())
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
from itertools import permutations
class Solution:
def nextGreaterElement(self, n: int) -> int:
"""
Personal attempt
Space : O(n)
Time : O(n!)
"""
if n < 10:
return -1
a = list(str(n))
ans = -1
perms = permutations(a)
for i in list(perms):
num = int("".join(i))
if num > n and num < 2 ** 31:
if ans == -1:
ans = num
else:
ans = min(ans, num)
return ans
def nextGreaterElementBest(self, n: int) -> int:
"""
Best Solution
Space : O(n)
Time : O(n)
"""
s = list(str(n))
l = len(s)
i = l-1
for i in range(i, -1, -1):
if s[i-1] < s[i]:
i -= 1
break
i -= 1
if i < 0:
return -1
for j in range(l-1, i, -1):
if s[j] > s[i]:
s[j], s[i] = s[i], s[j]
break
s[i+1:] = s[i+1:][::-1]
val = int(''.join(s))
return val if val < (2**31)-1 else -1
|
from context import CommandContext
from command import Command
from new import NewCommand
from check import CheckCommand
from gen_ref import GenRefCommand
from resolve import ResolveCommand
from list import ListCommand
from up import UpCommand
from down import DownCommand
from rebuild import RebuildCommand
from gen_sql import GenSqlCommand
from init import InitCommand
|
"""書籍関連のテスト"""
from django.test import TestCase
from django.core.urlresolvers import reverse
from .factories import BookFactory
class ListViewTests(TestCase):
"""書籍一覧ビューのテスト"""
def setUp(self):
"""前準備"""
self.title = 'テストタイトル'
self.books = BookFactory.create_batch(20, title=self.title)
def test_show_page(self):
"""書籍一覧を表示できる"""
r = self.client.get(reverse('books:list'))
self.assertContains(r, self.title, count=10, status_code=200)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : RealtimePlotter_test.py
# Author : Duy Anh Pham <[email protected]>
# Date : 23.03.2020
# Last Modified By: Duy Anh Pham <[email protected]>
import unittest
import matplotlib.pyplot as plt
import numpy as np
from RealtimePlotter import *
import time
# TODO:
# - TestCase for setting labels
# random dataplot dimension
while True:
row = np.random.randint(10)
col = np.random.randint(20, 50)
if row > 0 and col > 0:
break
# random data source
data_len = 100
data_interval = 0.005
plot_update_interval = 0.01
datasource = np.random.randint(
low=-10, high=10, size=(data_len, row))
class RealtimePlotterClassTest(unittest.TestCase):
def setUp(self):
self.data = np.random.randint(low=-10, high=10, size=(col, row))
self.dataplot = DataPlot(
row, col, option=DataplotOption.TIMESTAMP_NONE)
def tearDown(self):
# tear down code
pass
class PlottingTest(unittest.TestCase):
def setUp(self):
pass
# all plots should look the same
def test_Should_UpdateDataRealtimeWithCustomTimestamp_When_GettingDataFromSource(self):
# test code:
# good use case when time ticks saved in 3rd-party database/cached memory.
# use plt.pause in separated process
# update data in database
dataplot = DataPlot(
row, col, option=DataplotOption.TIMESTAMP_CUSTOM)
# simulating time ticks that saved in DB/cahed memory
self.timestamp_custom = []
ticks_start = 0
ticks_interval = 50
for i in range(1000): # 1000 values for time ticks axis (more than enough for now)
self.timestamp_custom.append(ticks_start + ticks_interval*i)
self.realtimeplotter = RealtimePlotter(dataplot)
fig, axes = plt.subplots()
plt.title('Plotting Data')
# plt.show()
self.realtimeplotter.config_plots(axes, ylim=[-11, 11])
for i in range(datasource.shape[0]):
self.realtimeplotter.dataplot.append(
datasource[i], self.timestamp_custom[i])
self.realtimeplotter.plot_data()
plt.pause(plot_update_interval)
print(dataplot)
input("Continue(press any key)?")
def test_Should_UpdateDataRealtimeWithTimestamp_When_GettingDataFromSource(self):
# test code:
# not good use case because blocking process cause additional delays.
# use plt.pause in separated process
# update data in database
dataplot = DataPlot(
row, col, option=DataplotOption.TIMESTAMP_AUTO)
self.realtimeplotter = RealtimePlotter(dataplot)
fig, axes = plt.subplots()
plt.title('Plotting Data')
# plt.show()
self.realtimeplotter.config_plots(axes, ylim=[-11, 11])
for i in range(datasource.shape[0]):
self.realtimeplotter.dataplot.append(datasource[i])
# time ticks auto. generated base on delays interval
time.sleep(data_interval)
self.realtimeplotter.plot_data()
plt.pause(plot_update_interval)
input("Continue(press any key)?")
def test_Should_UpdateDataRealtimeWithoutTimestamp_When_GettingDataFromSource(self):
# test code:
# not good use case because blocking process cause performances delays.
# use plt.pause in separated process
# update data in database
dataplot = DataPlot(
row, col, option=DataplotOption.TIMESTAMP_NONE)
self.realtimeplotter = RealtimePlotter(dataplot)
fig, axes = plt.subplots()
plt.title('Plotting Data')
# plt.show()
self.realtimeplotter.config_plots(
axes, y_labels=['a', 'b', 'c'], ylim=[-11, 11])
for i in range(datasource.shape[0]):
self.realtimeplotter.dataplot.append(datasource[i])
self.realtimeplotter.plot_data()
plt.pause(plot_update_interval)
input("Continue(press any key)?")
def tearDown(self):
# tear down code
pass
|
# Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods for testing private queries.
Utility methods for testing private queries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def run_query(query, records, global_state=None, weights=None):
"""Executes query on the given set of records as a single sample.
Args:
query: A PrivateQuery to run.
records: An iterable containing records to pass to the query.
global_state: The current global state. If None, an initial global state is
generated.
weights: An optional iterable containing the weights of the records.
Returns:
A tuple (result, new_global_state) where "result" is the result of the
query and "new_global_state" is the updated global state.
"""
if not global_state:
global_state = query.initial_global_state()
params = query.derive_sample_params(global_state)
sample_state = query.initial_sample_state(next(iter(records)))
if weights is None:
for record in records:
sample_state = query.accumulate_record(params, sample_state, record)
else:
for weight, record in zip(weights, records):
sample_state = query.accumulate_record(params, sample_state, record,
weight)
result, global_state, _ = query.get_noised_result(sample_state, global_state)
return result, global_state
|
from django.urls import path
from . import views
app_name='app'
urlpatterns = [
path('',views.home,name='home'),
path('upfile_Ajax',views.upfile,name='upfile'),
path('runcmd_Ajax',views.run_cmd,name='run_cmd'),
path('cmd_msg',views.cmd_msg,name='cmd_msg'),
path('file_list',views.file_list,name='file_list'),
path('file_remove',views.file_remove,name='file_remove'),
path('files',views.files,name='files'),
]
|
import tensorflow as tf
import grpc
from tensorflow_serving_client.protos import prediction_service_pb2_grpc, predict_pb2
from tensorflow_serving_client.proto_util import copy_message
class TensorflowServingClient(object):
def __init__(self, host, port, cert=None):
self.host = host
self.port = port
self.channel = grpc.insecure_channel('%s:%s' % (host, port))
if cert is None:
self.channel = grpc.insecure_channel('%s:%s' % (host, port))
else:
with open(cert,'rb') as f:
trusted_certs = f.read()
credentials = grpc.ssl_channel_credentials(root_certificates=trusted_certs)
self.channel = grpc.secure_channel('%s:%s' % (host, port), credentials, options=None)
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(self.channel)
def execute(self, request, timeout=10.0):
return self.stub.Predict(request, timeout)
def make_prediction(self, input_data, input_tensor_name, timeout=10.0, model_name=None):
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name or 'model'
copy_message(tf.contrib.util.make_tensor_proto(input_data, dtype='float32'), request.inputs[input_tensor_name])
response = self.execute(request, timeout=timeout)
results = {}
for key in response.outputs:
tensor_proto = response.outputs[key]
nd_array = tf.contrib.util.make_ndarray(tensor_proto)
results[key] = nd_array
return results
|
from .AbstractRepositoryFile import AbstractRepositoryFile
class GitFileInfo(AbstractRepositoryFile):
def __init__(self, workingCopy, status:str, filePath:str):
super().__init__(workingCopy, status, filePath)
#
#
|
import FWCore.ParameterSet.Config as cms
#from HLTrigger.HLTfilters.hltHighLevel_cfi import *
#exoticaMuHLT = hltHighLevel
#Define the HLT path to be used.
#exoticaMuHLT.HLTPaths =['HLT_L1MuOpen']
#exoticaMuHLT.TriggerResultsTag = cms.InputTag("TriggerResults","","HLT8E29")
#Define the HLT quality cut
#exoticaHLTMuonFilter = cms.EDFilter("HLTSummaryFilter",
# summary = cms.InputTag("hltTriggerSummaryAOD","","HLT8E29"), # trigger summary
# member = cms.InputTag("hltL3MuonCandidates","","HLT8E29"), # filter or collection
# cut = cms.string("pt>0"), # cut on trigger object
# minN = cms.int32(0) # min. # of passing objects needed
# )
Jet2 = cms.EDFilter("EtaPtMinCandViewSelector",
src = cms.InputTag("iterativeCone5CaloJets"),
ptMin = cms.double(8),
etaMin = cms.double(-2),
etaMax = cms.double(2)
)
Jet1 = cms.EDFilter("EtaPtMinCandViewSelector",
src = cms.InputTag("Jet2"),
ptMin = cms.double(8),
etaMin = cms.double(-1),
etaMax = cms.double(1)
)
#Define the Reco quality cut
#jetFilter = cms.EDFilter("CaloJetSelector",
# src = cms.InputTag("iterativeCone5CaloJets"),
# cut = cms.string('pt > 100 && abs(eta) < 2.0' ),
# filter = cms.bool(True),
# minNumber = cms.uint32(2)
# sizeSelector = cms.uint32(2)
# )
dijetFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("Jet2"),
minNumber = cms.uint32(2)
)
jetFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("Jet1"),
minNumber = cms.uint32(1)
)
#===== add electrons =======
jetSuperClusterMerger = cms.EDFilter("EgammaSuperClusterMerger",
src = cms.VInputTag(cms.InputTag('correctedHybridSuperClusters'),
cms.InputTag('correctedMulti5x5SuperClustersWithPreshower'))
)
jetSuperClusterCands = cms.EDProducer("ConcreteEcalCandidateProducer",
src = cms.InputTag("jetSuperClusterMerger"),
particleType = cms.string('e-')
)
goodJetSuperClusters = cms.EDFilter("CandViewRefSelector",
src = cms.InputTag("jetSuperClusterCands"),
cut = cms.string('et > 3.0')
)
jetSuperClusterPt5Filter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("goodJetSuperClusters"),
minNumber = cms.uint32(2)
)
twoEmClusters = cms.Sequence(
jetSuperClusterMerger+jetSuperClusterCands+goodJetSuperClusters+jetSuperClusterPt5Filter
)
#Define group sequence, using HLT/Reco quality cut.
#exoticaMuHLTQualitySeq = cms.Sequence()
jetRecoQualitySeq = cms.Sequence(
# twoEmClusters +
Jet2+Jet1+dijetFilter+jetFilter
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""CLI to log docker stats to a CSV file."""
import argparse
import datetime
import os
import re
import subprocess
import time
HEADER = "name,cpu_percent,mem,mem_percent,netio,blockio,pids,datetime"
REGEX_SIZE = re.compile(r"(\d+(?:\.\d+)?)([a-zA-Z]+)")
CONVERT_MAP = {
"b": 1000000,
"kib": 1000,
"kb": 1000,
"mib": 1,
"mb": 1,
"gib": 1 / 1000,
"gb": 1 / 1000,
}
LABEL_MAP = {
"cpu_percent": "CPU (%)",
"mem": "Memory (Mb)",
"mem_percent": "Memory (%)",
"netio": "Network I/O (Mb)",
"blockio": "Block I/O (Mb)",
"pids": "Subprocesses",
}
def get_stats(quiet=False, handle=None):
current_time = datetime.datetime.now()
data = subprocess.check_output(
[
"docker",
"stats",
"--no-stream",
"--format",
"{{.Name}},{{.CPUPerc}},{{.MemUsage}},{{.MemPerc}},{{.NetIO}},{{.BlockIO}},{{.PIDs}}",
],
encoding="utf8",
)
lines = [f"{line.rstrip()},{current_time}" for line in data.rstrip().splitlines()]
text = "\n".join(lines)
if not quiet:
print(text)
if handle:
handle.write(text + "\n")
handle.flush()
def cli():
parser = argparse.ArgumentParser(description="Record docker stats to CSV.")
parser.add_argument(
"-o", "--output", metavar="PATH", type=str, default="", help="Output to file"
)
parser.add_argument(
"-i", "--interval", type=float, default=1, help="Polling interval (seconds)"
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="Do not print output"
)
parser.add_argument(
"-p",
"--plot",
metavar="COLUMN",
type=str,
help="Plot a column from an existing output CSV.",
)
parser.add_argument(
"-e",
"--plot-ext",
metavar="EXT",
type=str,
default="pdf",
help="Extension of output plot.",
)
args = parser.parse_args()
if args.plot:
plot_column(
args.output,
args.plot,
os.path.splitext(args.output)[0] + f"-{args.plot}.{args.plot_ext}",
)
return
if args.output and os.path.exists(args.output):
raise IOError("output path exists, delete first")
if not args.quiet:
print(HEADER)
if args.output:
with open(args.output, "w", encoding="utf8") as handle:
handle.write(HEADER + "\n")
while True:
get_stats(quiet=args.quiet, handle=handle)
time.sleep(args.interval)
else:
while True:
get_stats(quiet=args.quiet)
time.sleep(args.interval)
def convert_size(text: str) -> float:
"""Convert memory size to megabytes float."""
match = REGEX_SIZE.match(text)
assert match, f"did not match regex: {text}"
return float(match[1]) * CONVERT_MAP[match[2].lower()]
def to_df(path: str):
"""Convert a saved CSV to a pandas DataFrame (memory in megabytes)."""
import pandas as pd
# read
df = pd.read_csv(path, index_col=False)
# format columns
df.mem_percent = df.mem_percent.str.rstrip("%").astype("float")
df.cpu_percent = df.cpu_percent.str.rstrip("%").astype("float")
df.mem = df.mem.apply(lambda x: convert_size(x.split("/")[0]))
df.netio = df.netio.apply(lambda x: convert_size(x.split("/")[0]))
df.blockio = df.blockio.apply(lambda x: convert_size(x.split("/")[0]))
df.pids = df.pids.astype("int")
df.datetime = pd.to_datetime(df.datetime)
return df
def plot_column(path: str, column: str, outpath: str = ""):
"""Plot a single column and save to file."""
df = to_df(path)
col_df = df.set_index(["name", "datetime"])[column].unstack("name")
ax = col_df.plot(grid=True)
ax.set_xlabel("Time")
ax.set_ylabel(LABEL_MAP[column])
if outpath:
ax.get_figure().savefig(outpath, bbox_inches="tight")
return ax
if __name__ == "__main__":
cli()
|
"""
The :mod:`expert.utils.conv` module holds util functions for convolutions, like
padding to maintain the size of the image.
"""
# Author: Alex Hepburn <[email protected]>
# License: new BSD
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['pad']
def pad(im_size, filt_size, stride):
"""
Returns the amount of padding needed on [height, width] to maintain image
size.
This function calculates the amount of padding needed to keep the output
image shape the same as the input image shape.
Parameters
----------
im_size : List[int]
List of [height, width] of the image to pad.
filt_size : int
The width of the filter being used in the convolution, assumed to be
square.
stride : int
Amount of stride in the convolution.
Returns
-------
padding : List[int]
Amount of padding needed for []
"""
padding = [int(((stride-1)*i-stride+filt_size)/2) for i in im_size]
# Append lists to each other for use in func:`torch.nn.functional.pad`.
return padding*2
|
from typing import Any, List, Union
import pytest
from .parser import AST, ParseError, parse
StrAST = List[Union[str, List[Any]]]
def to_string(ast: AST) -> StrAST:
str_ast: StrAST = []
for node in ast:
if isinstance(node, list):
str_ast.append(to_string(node))
else:
str_ast.append(node.__str__())
return str_ast
def test_new() -> None:
assert to_string(parse("(@new (hello world))")) == [
"@new : @new",
["word : hello", "word : world"]
]
def test_new_advanced_assertion() -> None:
assert to_string(parse("""
(@new
(address
(Pichugin Vladislav)
(Moscow Bauman9 322)
)
)
""")) == [
"@new : @new",
[
"word : address",
["word : Pichugin", "word : Vladislav"],
["word : Moscow", "word : Bauman9", "number : 322"]
]
]
def test_new_rule() -> None:
assert to_string(parse("""
(@new
(@rule
(livesAbout $person1 $person2)
(@and (address $person1 ($town . $rest1))
(address $person2 ($town . $rest2))
(@not (same $person1 $person2)))
)
)
""")) == [
"@new : @new",
[
"@rule : @rule",
["word : livesAbout", "var : $person1", "var : $person2"],
[
"@and : @and",
[
"word : address",
"var : $person1",
["var : $town", ". : .", "var : $rest1"]
],
[
"word : address",
"var : $person2",
["var : $town", ". : .", "var : $rest2"]
],
[
"@not : @not",
["word : same", "var : $person1", "var : $person2"]
]
]
]
]
def test_new_empty_assertion() -> None:
assert to_string(parse("(@new ())")) == ["@new : @new", []]
def test_new_rule_with_empty_body() -> None:
assert to_string(parse("(@new (@rule (append () $y $y)))")) == [
"@new : @new",
[
"@rule : @rule",
[
"word : append",
[],
"var : $y",
"var : $y"
]
]
]
def test_empty_query() -> None:
assert to_string(parse("()")) == []
def test_simple_query() -> None:
assert to_string(parse("(position $x (programmer $type))")) == [
"word : position",
"var : $x",
[
"word : programmer",
"var : $type"
]
]
def test_simple_query_with_dot() -> None:
assert to_string(parse("(position $x (programmer . $type))")) == [
"word : position",
"var : $x",
[
"word : programmer",
". : .",
"var : $type"
]
]
def test_simple_query_advanced() -> None:
assert to_string(parse("(test 999 (() $v1 666 aaa . $rest) $var)")) == [
"word : test",
"number : 999",
[
[],
"var : $v1",
"number : 666",
"word : aaa",
". : .",
"var : $rest"
],
"var : $var"
]
def test_query_with_apply() -> None:
assert to_string(parse("""
(@or
(salary $person $amount)
(@apply > $amount 3000)
(@apply < $amount 10)
)
""")) == [
"@or : @or",
["word : salary", "var : $person", "var : $amount"],
["@apply : @apply", "> : >", "var : $amount", "number : 3000"],
["@apply : @apply", "< : <", "var : $amount", "number : 10"],
]
def test_parse_error() -> None:
with pytest.raises(ParseError):
parse("(@new (@rule (same $x $x))")
def test_batch_new() -> None:
assert to_string(parse("""
(@new
(@rule (same $x $x))
(position Vlad (junior developer))
)
""")) == [
"@new : @new",
["@rule : @rule", ["word : same", "var : $x", "var : $x"]],
["word : position", "word : Vlad", ["word : junior", "word : developer"]]
]
|
# -*- coding: utf-8 -*-
#
# This file is part of Radicale Server - Calendar Server
# Copyright © 2011-2013 Guillaume Ayoub
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
"""
Radicale executable module.
This module can be executed from a command line with ``$python -m radicale`` or
from a python programme with ``radicale.__main__.run()``.
"""
import atexit
import os
import sys
import optparse
import signal
import threading
import traceback
from wsgiref.simple_server import make_server
from . import (
Application, config, HTTPServer, HTTPSServer, log, RequestHandler, VERSION)
# This is a script, many branches and variables
# pylint: disable=R0912,R0914
def export_storage(config, path, debug=False):
"""Export the storage for Radicale 2.0.0."""
import json
import shutil
import tempfile
from . import ical, pathutils, storage
storage.load()
print("INFO: Exporting storage for Radicale 2.0.0 to %r" % path)
temp = tempfile.mkdtemp(prefix="Radicale.export.")
try:
os.mkdir(os.path.join(temp, "root"))
try:
remaining_collections = list(
ical.Collection.from_path("/", depth="0"))
except Exception as e:
print("ERROR: Failed to find collection %r: %s" % ("/", e))
if debug:
traceback.print_exc()
exit(1)
while remaining_collections:
collection = remaining_collections.pop(0)
if debug:
print("DEBUG: Exporting collection %r" %
("/" + collection.path))
try:
try:
filesystem_path = pathutils.path_to_filesystem(
collection.path,
os.path.join(temp, "root", "collection-root"))
except ValueError as e:
print(
"WARNING: Skipping unsafe collection %r: %s" %
("/" + collection.path, e))
if debug:
traceback.print_exc()
continue
try:
remaining_collections.extend(collection.children(
collection.path))
except Exception as e:
print("ERROR: Failed to find child collections of %r: %s" %
("/" + collection.path, e))
if debug:
traceback.print_exc()
exit(1)
os.makedirs(filesystem_path)
with collection.props as props:
if props:
props_filename = os.path.join(
filesystem_path, ".Radicale.props")
with open(props_filename, "w") as f:
json.dump(props, f)
for component in collection.components:
if debug:
print("DEBUG: Exporting component %r of collection %r"
% (component.name, "/" + collection.path))
try:
if not pathutils.is_safe_filesystem_path_component(
component.name):
print("WARNING: Skipping unsafe item %r from "
"collection %r" %
(component.name, "/" + collection.path))
continue
items = [component]
if collection.resource_type == "calendar":
items.extend(collection.timezones)
text = ical.serialize(
collection.tag, collection.headers, items)
component_filename = os.path.join(
filesystem_path, component.name)
with open(component_filename, "wb") as f:
f.write(text.encode("utf-8"))
except Exception as e:
print("ERROR: Failed to export component %r from "
"collection %r: %s" %
(component.name, "/" + collection.path, e))
if debug:
traceback.print_exc()
exit(1)
except Exception as e:
print("ERROR: Failed to export collection %r: %s" %
("/" + collection.path, e))
if debug:
traceback.print_exc()
exit(1)
try:
# This check is prone to a race condition
if os.path.exists(path):
raise Exception("Destination path %r already exists" % path)
shutil.move(os.path.join(temp, "root"), path)
except Exception as e:
print("ERROR: Can't create %r directory: %s" % (path, e))
if debug:
traceback.print_exc()
exit(1)
finally:
shutil.rmtree(temp)
def run():
"""Run Radicale as a standalone server."""
# Get command-line options
parser = optparse.OptionParser(version=VERSION)
parser.add_option(
"-d", "--daemon", action="store_true",
help="launch as daemon")
parser.add_option(
"-p", "--pid",
help="set PID filename for daemon mode")
parser.add_option(
"-f", "--foreground", action="store_false", dest="daemon",
help="launch in foreground (opposite of --daemon)")
parser.add_option(
"-H", "--hosts",
help="set server hostnames and ports")
parser.add_option(
"-s", "--ssl", action="store_true",
help="use SSL connection")
parser.add_option(
"-S", "--no-ssl", action="store_false", dest="ssl",
help="do not use SSL connection (opposite of --ssl)")
parser.add_option(
"-k", "--key",
help="set private key file")
parser.add_option(
"-c", "--certificate",
help="set certificate file")
parser.add_option(
"-D", "--debug", action="store_true",
help="print debug information")
parser.add_option(
"-C", "--config",
help="use a specific configuration file")
parser.add_option(
"--export-storage",
help=("export the storage for Radicale 2.0.0 to the specified "
"folder and exit"), metavar="FOLDER")
options = parser.parse_args()[0]
# Read in the configuration specified by the command line (if specified)
configuration_found = (
config.read(options.config) if options.config else True)
# Update Radicale configuration according to options
for option in parser.option_list:
key = option.dest
if key and key != "export_storage":
section = "logging" if key == "debug" else "server"
value = getattr(options, key)
if value is not None:
config.set(section, key, str(value))
if options.export_storage is not None:
config.set("logging", "config", "")
config.set("logging", "debug", "True" if options.debug else "False")
log.start()
if not configuration_found:
print("WARNING: Configuration file '%s' not found" %
options.config)
export_storage(config, options.export_storage, debug=options.debug)
exit(0)
# Start logging
log.start()
# Log a warning if the configuration file of the command line is not found
if not configuration_found:
log.LOGGER.warning(
"Configuration file '%s' not found" % options.config)
# Fork if Radicale is launched as daemon
if config.getboolean("server", "daemon"):
# Check and create PID file in a race-free manner
if config.get("server", "pid"):
try:
pid_fd = os.open(
config.get("server", "pid"),
os.O_CREAT | os.O_EXCL | os.O_WRONLY)
except:
raise OSError(
"PID file exists: %s" % config.get("server", "pid"))
pid = os.fork()
if pid:
sys.exit()
# Write PID
if config.get("server", "pid"):
with os.fdopen(pid_fd, "w") as pid_file:
pid_file.write(str(os.getpid()))
# Decouple environment
os.umask(0)
os.chdir("/")
os.setsid()
with open(os.devnull, "r") as null_in:
os.dup2(null_in.fileno(), sys.stdin.fileno())
with open(os.devnull, "w") as null_out:
os.dup2(null_out.fileno(), sys.stdout.fileno())
os.dup2(null_out.fileno(), sys.stderr.fileno())
# Register exit function
def cleanup():
"""Remove the PID files."""
log.LOGGER.debug("Cleaning up")
# Remove PID file
if (config.get("server", "pid") and
config.getboolean("server", "daemon")):
os.unlink(config.get("server", "pid"))
atexit.register(cleanup)
log.LOGGER.info("Starting Radicale")
# Create collection servers
servers = []
server_class = (
HTTPSServer if config.getboolean("server", "ssl") else HTTPServer)
shutdown_program = threading.Event()
for host in config.get("server", "hosts").split(","):
address, port = host.strip().rsplit(":", 1)
address, port = address.strip("[] "), int(port)
servers.append(
make_server(address, port, Application(),
server_class, RequestHandler))
# SIGTERM and SIGINT (aka KeyboardInterrupt) should just mark this for
# shutdown
signal.signal(signal.SIGTERM, lambda *_: shutdown_program.set())
signal.signal(signal.SIGINT, lambda *_: shutdown_program.set())
def serve_forever(server):
"""Serve a server forever, cleanly shutdown when things go wrong."""
try:
server.serve_forever()
finally:
shutdown_program.set()
log.LOGGER.debug(
"Base URL prefix: %s" % config.get("server", "base_prefix"))
# Start the servers in a different loop to avoid possible race-conditions,
# when a server exists but another server is added to the list at the same
# time
for server in servers:
log.LOGGER.debug(
"Listening to %s port %s" % (
server.server_name, server.server_port))
if config.getboolean("server", "ssl"):
log.LOGGER.debug("Using SSL")
threading.Thread(target=serve_forever, args=(server,)).start()
log.LOGGER.debug("Radicale server ready")
# Main loop: wait until all servers are exited
try:
# We must do the busy-waiting here, as all ``.join()`` calls completly
# block the thread, such that signals are not received
while True:
# The number is irrelevant, it only needs to be greater than 0.05
# due to python implementing its own busy-waiting logic
shutdown_program.wait(5.0)
if shutdown_program.is_set():
break
finally:
# Ignore signals, so that they cannot interfere
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
log.LOGGER.info("Stopping Radicale")
for server in servers:
log.LOGGER.debug(
"Closing server listening to %s port %s" % (
server.server_name, server.server_port))
server.shutdown()
# pylint: enable=R0912,R0914
if __name__ == "__main__":
run()
|
from detecting.models.backbones import VGG16,resnet_v1_50,resnet_v1_101,resnet_v1_152
from detecting.utils import model_util
from tensorflow.keras.models import Model
# 选择不同的backbone
def get_backbone(cfg):
if cfg.MODEL.BACKBONE=='vgg16':
return backbone_vgg16(cfg)
elif cfg.MODEL.BACKBONE=='resnet50':
return backbone_resnet50(cfg)
elif cfg.MODEL.BACKBONE=='resnet101':
return backbone_resnet101(cfg)
elif cfg.MODEL.BACKBONE=='resnet152':
return backbone_resnet152(cfg)
def backbone_vgg16(cfg):
vgg16 = VGG16(include_top=True, weights='imagenet')
# 不要最后的一个池化层
backbone = Model(inputs=vgg16.input,outputs=vgg16.get_layer('block5_conv3').output,name='backbone')
# conv3_1之前的层不训练
for layer in backbone.layers[:7]:
layer.trainable = False
# 获取vgg16最后分类的那一部分
head_to_tail = model_util.extract_submodel(
model=vgg16,
inputs=vgg16.get_layer('block5_pool').output,
outputs=vgg16.get_layer('fc2').output,
name='head_to_tail')
return backbone, head_to_tail
def backbone_resnet50(cfg):
resnet50 = resnet_v1_50(batchnorm_training=cfg.SOLVER.BN_TRAIN,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
classes=None,
weights=None,
include_top=False)
backbone = Model(inputs=resnet50.input,outputs=resnet50.get_layer('conv4_block6_out').output,name='backbone')
head_to_tail = model_util.extract_submodel(
model=resnet50,
inputs=resnet50.get_layer('conv4_block6_out').output,
outputs=resnet50.get_layer('conv5_block3_out').output,
name='head_to_tail')
return backbone, head_to_tail
def backbone_resnet101(cfg):
resnet101 = resnet_v1_101(batchnorm_training=cfg.SOLVER.BN_TRAIN,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
classes=None,
weights=None,
include_top=False)
backbone = Model(inputs=resnet101.input,outputs=resnet101.get_layer('conv4_block23_out').output,name='backbone')
head_to_tail = model_util.extract_submodel(
model=resnet101,
inputs=resnet101.get_layer('conv4_block23_out').output,
outputs=resnet101.get_layer('conv5_block3_out').output,
name='head_to_tail')
return backbone, head_to_tail
def backbone_resnet152(cfg):
resnet152 = resnet_v1_152(batchnorm_training=cfg.SOLVER.BN_TRAIN,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
classes=None,
weights=None,
include_top=False)
backbone = Model(inputs=resnet152.input,outputs=resnet152.get_layer('conv4_block36_out').output,name='backbone')
head_to_tail = model_util.extract_submodel(
model=resnet152,
inputs=resnet152.get_layer('conv4_block36_out').output,
outputs=resnet152.get_layer('conv5_block3_out').output,
name='head_to_tail')
return backbone, head_to_tail
|
#!/usr/bin/python3
from config import *
import discord
from discord.ext import commands
import json
import os
bot = commands.Bot(command_prefix=prefix)
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
try:
bot.run(token)
except Exception as e:
print(f'Error when logging in: {e}') |
import json
from sys import exit
names_2 = []
names_3 = []
hobs = []
my_list = ['Имена', 'Фамилии', 'Отчества', 'Увлечения']
list_list = [[],[],[]]
with open('users.csv', 'r', encoding='utf-8') as f_1:
with open('hobby.csv', 'r', encoding='utf-8') as f_2:
if sum(1 for i in f_2) > sum(1 for j in f_1):
exit(1)
f_1.seek(0)
f_2.seek(0)
for line in f_1:
name = line.split(',')
list_list[0].append(name[1])
list_list[1].append(name[0])
list_list[2].append(name[2].strip())
for line in f_2:
hob = map(lambda s: s.strip(), line.split(','))
hobs.extend(hob)
list_list.append(hobs)
list_list = map(tuple, list_list)
res_dict = {k: v for k, v in zip(my_list, list_list)}
with open('dict_file_4.txt', 'w', encoding='utf-8') as f_3:
json.dump(res_dict, f_3)
with open('dict_file_4.txt', 'r', encoding='utf-8') as f_3:
dict_test = json.load(f_3)
print(dict_test)
|
from argo.core.hooks.EveryNEpochsTFModelHook import EveryNEpochsTFModelHook
# get_samples_from_dataset
from datasets.Dataset import check_dataset_keys_not_loop, VALIDATION,TEST
from argo.core.argoLogging import get_logger
from matplotlib import pyplot as plt
import tensorflow as tf
import numpy as np
# from matplotlib.patches import Ellipse
from scipy.stats import chi2
import pygtc
import traceback
#import MCDcalibrationHook
#from ..hooks.MCDcalibrationHook import MCDcalibrationHook
#calibrated_number = MCDcalibrationHook()
import tensorflow_probability as tfp
from scipy.optimize import curve_fit
from scipy.stats import beta
# from matplotlib.patches import Ellipse
from scipy.stats import chi2
tf_logging = get_logger()
class MCDropoutHook(EveryNEpochsTFModelHook):
def __init__(self,
model,
period,
time_reference,
dirName,
datasets_keys=[VALIDATION,TEST],
posterior_samples = 2500,
n_batches = -1
):
super().__init__(model, period, time_reference, dataset_keys=datasets_keys, dirName=dirName + '/mc_dropout')
self._default_plot_bool = False
self._parameters_list = self._model.dataset._parameters_list
self._n_batches = n_batches
self._posterior_samples = posterior_samples
self.calibrated_value_Aleatoric = {}
tf_logging.info("Create mcDropoutHook for: \n" + \
", ".join(datasets_keys)+"\n")
def do_when_triggered(self, run_context, run_values):
time_ref = self._time_ref
time_ref_str = self._time_ref_shortstr
tf_logging.info("trigger for mcDropoutHook")
for ds_key in self._datasets_keys:
fileName = "mc_"+str(ds_key) + "_" + time_ref_str + "_" + str(time_ref).zfill(4)
self.calibrated_value_Aleatoric = self._calculate_mc_dropout_calibrate(run_context.session, ds_key, fileName)
self._calculate_mc_dropout(run_context.session, ds_key, fileName)
tf_logging.info("finished with %s"%ds_key)
def _calculate_mc_dropout_calibrate(self, session, ds_key, baseName, is_training_value=False):
if type(session).__name__ != 'Session':
raise Exception("I need a raw session to evaluate metric over dataset.")
dataset_initializer = self._ds_initializers[ds_key]
dataset_handle = self._ds_handles[ds_key]
handle = self._ds_handle
model = self._model
parameters_list = self._parameters_list
# labels_min = self._labels_min
# labels_max = self._labels_max
# with open(self._create_name('max-min_info', baseName)+'.dat', 'w') as ft5:
# ft5.write("min_params max_params\n")
# ft5.write("{} {} \n".format(labels_min, labels_max))
init_ops = dataset_initializer
session.run(init_ops)
b = 0
N_calibrated_batches=1000*self._n_batches
batch_means = []
batch_vars = []
batch_covs = []
batch_reals = []
while True:
try:
# model.raw_x is the input before any noise addition (if present), we want to make sure we get the clean batch before noise
batch_x, batch_y = session.run([model.raw_x, model.y],
feed_dict={model.is_training: is_training_value,
handle: dataset_handle,
model.n_samples_ph:1})
# model.x is the input after noise addition (if present), we want to make sure we feed x, so that noiose will not be added.
samples, means, vars, covs = session.run([model.prediction_sample,
model.prediction_mean,
model.prediction_variance,
model.prediction_covariance],
feed_dict={model.x: batch_x,
model.is_training: is_training_value,
handle: dataset_handle})
batch_means.append(means)
batch_vars.append(vars)
batch_covs.append(covs)
batch_reals.append(batch_y)
b+=1
if b==N_calibrated_batches:
break
except tf.errors.OutOfRangeError:
break
# np.save(self._create_name('calibration_batch_means', baseName), batch_means)
# np.save(self._create_name('calibration_batch_covs', baseName), batch_covs)
# np.save(self._create_name('calibration_batch_reals', baseName), batch_reals)
calibrated_value=self.calibrated_number(batch_covs[:-1], batch_means[:-1], batch_reals[:-1])
# sumeT, ppf_run = self.CI_calibrate(
# batch_covs,
# batch_means,
# batch_reals,
# baseName,
# alpha_calibrate=calibrated_value,
# Aleatoric=1)
# sumeT=np.array(sumeT)
# ppf_run=np.array(ppf_run)
# results_calibred=np.stack((sumeT,ppf_run)).T
# np.save(self._create_name('ci_info_calibration', baseName), results_calibred)
return calibrated_value
def _create_name(self, prefix, baseName):
return self._dirName + "/" + prefix + '_' + baseName
def CI_calibrate(self, total_covariance, mean_pred, rea_valu, baseName,alpha_calibrate=1,Aleatoric=0):
sumeT,ppf_run = self.general_ellip_counts_calibrate_core(total_covariance, mean_pred, rea_valu)
sumeT_recali,ppf_run_recali = self.general_ellip_counts_calibrate_core(total_covariance, mean_pred, rea_valu,alpha_calibrate)
fig_1 = plt.figure()
plt.scatter(ppf_run,sumeT)
plt.scatter(ppf_run_recali,sumeT_recali,label='calibrated')
line_s1 = np.arange(0.0, 1, 0.01)
plt.plot(line_s1, line_s1, 'r-', alpha=0.1)
plt.xlabel('Confidence level')
plt.ylabel('Estimated coverage probability')
plt.legend()
plt.savefig(self._create_name("calibration_{}".format(Aleatoric), baseName) + ".png")
plt.close(fig_1)
if Aleatoric:
return sumeT, ppf_run
def general_ellip_counts_calibrate(self, covariance, mean, real_values,alpha_ini=1):
shapes=np.array(mean).shape
shapes_batch=shapes[0]*shapes[1]
means=np.array(mean).reshape(shapes_batch,shapes[2])
reals=np.array(real_values).reshape(shapes_batch,shapes[2])
covas=np.array(covariance).reshape(shapes_batch,shapes[2],shapes[2])
return self.general_ellip_counts_calibrate_core(covas, means, reals,alpha_ini)
def general_ellip_counts_calibrate_core(self, covas, means, reals,alpha_ini=1):
shapes=np.array(means).shape
Inverse_covariance = np.linalg.inv(covas)
Ellip_eq = np.einsum('nl,nlm,mn->n', (reals - means), Inverse_covariance, (reals - means).T)
ppf_run=list(np.arange(0.1, 1.0, 0.035))
suma_T=[0] * len(ppf_run)
rv = chi2(df=shapes[1])
for ix, ppf in enumerate(ppf_run):
square_norm = rv.ppf(ppf)
values = Ellip_eq / (square_norm*alpha_ini)
for ids, inst in enumerate(values):
if inst <= 1:
suma_T[ix] += 1
else:
pass
return list(np.array(suma_T)/shapes[0]), list(ppf_run)
def func(self,x, a, b):
return beta.cdf(x,a,b)
def invfunc(self,x, a, b):
return beta.ppf(x,a,b)
def mininizer(self,datacov, datamean, datareal, x0, s):
sumat_Re,ppft_Re=self.general_ellip_counts_calibrate(datacov, datamean, datareal,s)
column_2_Re=np.array(sumat_Re)
column_1_Re=np.array(ppft_Re)
results_calibred_Re=np.stack((column_1_Re,column_2_Re)).T
popt1_Re, pcov1_Re = curve_fit(self.func, results_calibred_Re[:,0],results_calibred_Re[:,1])
return self.func(x0, *popt1_Re)
def calibrated_number(self,datacov, datamean, datareal):
x_val = np.linspace(0.2,1, 100)
y_val = np.linspace(0.1,3, 50)
summa=[]
for s0 in y_val:
summa.append(sum(np.abs(self.mininizer(datacov, datamean, datareal,x_val,s0)-x_val)))
return y_val[np.argmin(summa)]
def _calculate_mc_dropout(self, session, ds_key, baseName, is_training_value=False):
if type(session).__name__ != 'Session':
raise Exception("I need a raw session to evaluate metric over dataset.")
dataset_initializer = self._ds_initializers[ds_key]
dataset_handle = self._ds_handles[ds_key]
handle = self._ds_handle
model = self._model
parameters_list = self._parameters_list
init_ops = dataset_initializer
session.run(init_ops)
count_68 = 0
count_95 = 0
count_99 = 0
count_all = 0
cal_count_68 = 0
cal_count_95 = 0
cal_count_99 = 0
cal_count_all = 0
means_means = []
covs_means = []
cal_means_covs = []
means_covs = []
Total_std = []
Total_covariance=[]
cal_Total_std = []
Real_valu=[]
Batch_samples_stack_T=[]
Batch_means_stack_T=[]
Batch_covs_stack_T=[]
b = 0
while True:
batch_means = []
batch_vars = []
batch_covs = []
cal_batch_vars = []
cal_batch_covs = []
batch_samples = []
try:
# model.raw_x is the input before any noise addition (if present), we want to make sure we get the clean batch before noise
batch_x, batch_y = session.run([model.raw_x, model.y],
feed_dict={model.is_training: is_training_value,
handle: dataset_handle,
model.n_samples_ph:1})
# model.x is the input after noise addition (if present), we want to make sure we feed x, so that noiose will not be added.
for _ in range(self._posterior_samples):
samples, means, varss, covs = session.run([model.prediction_sample,
model.prediction_mean,
model.prediction_variance,
model.prediction_covariance],
feed_dict={model.x: batch_x,
model.is_training: is_training_value,
handle: dataset_handle})
##calibration###
cal_varss = varss#*calibrated_valued
cal_covs = covs#*calibrated_valued
# ssa=[]
# for i in range(means.shape[0]):
# ssa.append(np.random.multivariate_normal(means[i,:], cal_covs[i,:,:]))
# cal_samples = np.array(ssa)
# ####end
batch_means.append(means)
batch_vars.append(varss)
batch_covs.append(covs)
# cal_batch_vars.append(cal_varss)
# cal_batch_covs.append(cal_covs)
batch_samples.append(samples)##calibr
batch_means_stack = np.stack(batch_means, axis=2)
batch_vars_stack = np.stack(batch_vars, axis=2)
batch_covs_stack = np.stack(batch_covs, axis=3)
# cal_batch_vars_stack = np.stack(cal_batch_vars, axis=2)
# cal_batch_covs_stack = np.stack(cal_batch_covs, axis=3)
batch_samples_stack = np.stack(batch_samples, axis=2)
coverage_value_68, coverage_value_95, coverage_value_99, coverage_all, \
total_std, cov_pred_p, mean_covar_p, total_covariance, rea_valu, mean_pred = self.CI(
batch_means_stack,
batch_y,
batch_vars_stack,
batch_covs_stack,
baseName = baseName)
# cal_coverage_value_68, cal_coverage_value_95, cal_coverage_value_99, cal_coverage_all, \
# cal_total_std, cal_cov_pred_p, cal_mean_covar_p, cal_total_covariance, cal_rea_valu, cal_mean_pred = self.CI(
# batch_means_stack,
# batch_y,
# cal_batch_vars_stack,
# cal_batch_covs_stack,
# baseName = "cal_"+baseName,
# alpha_calibrate=calibrated_valued)
# these are same for calibrated and uncalibrated
means_means.append(mean_pred)
covs_means.append(cov_pred_p)
# this changes
means_covs.append(mean_covar_p)
# cal_means_covs.append(cal_mean_covar_p)
Total_std.append(total_std)
Total_covariance.append(total_covariance)
# cal_Total_std.append(cal_total_std)
Real_valu.append(rea_valu)
Batch_samples_stack_T.append(batch_samples_stack)
Batch_means_stack_T.append(batch_means_stack)
Batch_covs_stack_T.append(batch_covs_stack)
count_68 += coverage_value_68
count_95 += coverage_value_95
count_99 += coverage_value_99
count_all += coverage_all
# cal_count_68 += cal_coverage_value_68
# cal_count_95 += cal_coverage_value_95
# cal_count_99 += cal_coverage_value_99
# cal_count_all += cal_coverage_all
b+=1
if b==self._n_batches:
break
except tf.errors.OutOfRangeError:
break
means_means = np.stack(means_means[:-1], axis=0)
covs_means = np.stack(covs_means[:-1], axis=0)
means_covs = np.stack(means_covs[:-1], axis=0)
Total_std = np.stack(Total_std[:-1], axis=0)
Total_covariance = np.stack(Total_covariance[:-1], axis=0)
# cal_means_covs = np.stack(cal_means_covs, axis=0)
# cal_Total_std = np.stack(cal_Total_std, axis=0)
Real_valu = np.stack(Real_valu[:-1], axis=0)
Batch_samples_stack = np.stack(Batch_samples_stack_T[:-1], axis=0)
Batch_means_stack = np.stack(Batch_means_stack_T[:-1], axis=0)
Batch_covs_stack = np.stack(Batch_covs_stack_T[:-1], axis=0)
np.save(self._create_name('means_means', baseName), means_means)
np.save(self._create_name('covs_means_', baseName), covs_means)
np.save(self._create_name('means_covs_', baseName), means_covs)
np.save(self._create_name('Total_std_', baseName), Total_std)
#np.save(self._create_name('cal_means_covs_', baseName), cal_means_covs)
#np.save(self._create_name('cal_Total_std_', baseName), cal_Total_std)
np.save(self._create_name('Real_valu_', baseName), Real_valu)
np.save(self._create_name('batch_samples_', baseName), Batch_samples_stack)
np.save(self._create_name('Batch_means_stack_', baseName), Batch_means_stack)
np.save(self._create_name('Batch_covs_stack_', baseName), Batch_covs_stack)
cal_count_68, cal_count_95, cal_count_99, \
calibrated_valued = self.find_calibration( Total_covariance, means_means, Real_valu,baseName)
cal_samples= self.samples_calibrated(Batch_means_stack,Batch_covs_stack)
np.save(self._create_name('cal_Batch_samples_stack', baseName), cal_samples)
with open(self._create_name('ci_info', baseName)+'.dat', 'w') as ft1:
ft1.write("count_68 count_95 count_99 count_all calibration\n")
ft1.write("{} {} {} {} {}\n".format(count_68 , count_95, count_99, count_all, self.calibrated_value_Aleatoric))
ft1.write("{} {} {} {} {}\n".format(cal_count_68 , cal_count_95, cal_count_99, count_all, calibrated_valued))
# batch_samples_stack = np.array(list(map(lambda x,y: np.random.multivariate_normal(mean = x, cov = y), batch_means_stack, batch_covs_stack)))
try:
GTC = pygtc.plotGTC(chains=[np.transpose(batch_samples_stack[0])], figureSize=5, nContourLevels=2, sigmaContourLevels=True,
paramNames = parameters_list, plotName = self._create_name("fullGTC", baseName) + '.pdf')
plt.close()
except Exception as e:
tf_logging.error(" an Error occurred with plotGTC, continuing training... \n")
tf_logging.error(traceback.format_exc())
def _create_name(self, prefix, baseName):
return self._dirName + "/" + prefix + '_' + baseName
def samples_calibrated(self,means_stack,covs_stack,alpha_calibrate=1):
ssa1=[]
shap=means_stack.shape
shap_0=shap[0]*shap[1]
means_stack_reshaped=means_stack.reshape(shap_0,shap[2],shap[3])
covariance_stack_reshaped=covs_stack.reshape(shap_0,shap[2],shap[2],shap[3])
covariance_stack_reshaped=covariance_stack_reshaped*alpha_calibrate
for i in range(shap_0):
ssa=[]
for j in range(shap[3]):
ssa.append(np.random.multivariate_normal(means_stack_reshaped[i,:,j], covariance_stack_reshaped[i,:,:,j]))
ssa1.append(ssa)
cal_samples = np.stack(ssa1, axis=2).T
return cal_samples
def CI(self, predictions, rea_valu, variance_s, covariance_s, baseName,alpha_calibrate=1):
# rea_valu=denormalize(rea_valu)
batch_size = rea_valu.shape[0]
mean_pred = np.mean(predictions, axis=2)
var_pred = np.var(predictions, axis=2)
# covariance over parameters only, for each example in the batch
cov_pred = np.array(list(map(lambda x: np.cov(x), predictions)))
mean_var = np.mean(variance_s, axis=2)
mean_covar = np.mean(covariance_s, axis=3)
total_variance = var_pred + mean_var
total_covariance = cov_pred + mean_covar
total_std = np.sqrt(total_variance)
sume68,sume95, sume99 = self.ellip_counts_3_sigmas(total_covariance, mean_pred, rea_valu, alpha_calibrate)
sumeT = batch_size #np.logical_and(rea_valu > confiden_inter_T_min, rea_valu < confiden_inter_T_max)
fig_1 = plt.figure()
for i, param_name in enumerate(self._parameters_list):
plt.errorbar(rea_valu[:, i], mean_pred[:, i], total_std[:, i], fmt='o', #color=colors[param_name], ecolor=ecolor[param_name],
elinewidth=3, capsize=0, label=param_name)
line_s1 = np.arange(0.0, 1, 0.01)
plt.plot(line_s1, line_s1, 'r-', alpha=0.1)
plt.xlabel('True value')
plt.ylabel('Predicted value')
plt.legend()
plt.savefig(self._create_name("correlation", baseName) + ".png")
plt.close(fig_1)
return sume68, sume95, sume99, sumeT,\
total_std, cov_pred, mean_covar, total_covariance, rea_valu, mean_pred
def ellip_counts_3_sigmas(self, covariance, mean, rea_values, alpha_calibrate):
sume68 = self.general_ellip_counts(covariance, mean, rea_values, alpha_calibrate ,nstd=1)
sume95 = self.general_ellip_counts(covariance, mean, rea_values, alpha_calibrate ,nstd=2)
sume99 = self.general_ellip_counts(covariance, mean, rea_values, alpha_calibrate ,nstd=3)
return sume68, sume95,sume99
def find_calibration(self, covariance, means,real,baseName):
shap=means.shape
means_reshaped=means.reshape(shap[0]*shap[1],shap[2])
real_reshaped=real.reshape(shap[0]*shap[1],shap[2])
covariance_reshaped=covariance.reshape(shap[0]*shap[1],shap[2],shap[2])
calibrated_value= self.calibrated_number(covariance, means, real)
summe68, summe95,summe99 = self.ellip_counts_3_sigmas( covariance_reshaped, means_reshaped, real_reshaped, alpha_calibrate=calibrated_value)
self.CI_calibrate(covariance_reshaped, means_reshaped, real_reshaped, baseName,alpha_calibrate=calibrated_value)
return summe68, summe95,summe99,calibrated_value
def general_ellip_counts(self, covariance, mean, real_values, alpha_calibrate=1,nstd=1):
Inverse_covariance = np.linalg.inv(covariance)
Ellip_eq = np.einsum('nl,nlm,mn->n', (real_values - mean), Inverse_covariance, (real_values - mean).T)
if nstd == 1:
ppf = 0.68
if nstd == 2:
ppf = 0.95
if nstd == 3:
ppf = 0.997
rv = chi2(df=mean.shape[1])
square_norm = rv.ppf(ppf)
values = Ellip_eq / (square_norm*alpha_calibrate)
suma_T = 0
for ids, inst in enumerate(values):
if inst <= 1:
suma_T += 1
# print(ids, inst)
else:
pass
return suma_T
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return None
slow, fast = head,head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
break
if not fast or not fast.next:
return
i = head
while i != slow:
i = i.next
slow = slow.next
return i
# # find the first meet point
# slowPtr = head
# fastPtr = head
# zPtr = None
# while slowPtr and fastPtr:
# slowPtr = slowPtr.next
# if fastPtr.next is None or fastPtr.next.next is None:
# return None
# else:
# fastPtr = fastPtr.next.next
# if slowPtr == fastPtr:
# zPtr = slowPtr
# break
# # find K by the condition
# # K+Z = -(2n-m)Y, m>n
# kPtr = head
# while kPtr:
# if kPtr == zPtr:
# return kPtr
# kPtr = kPtr.next
# zPtr = zPtr.next
# return None
|
from .model import HighResNet3D
|
from django.contrib import admin
from .models import Photo, PhotoMetaData
class PhotoMetaDataInline (admin.StackedInline):
model = PhotoMetaData
class PhotoAdmin (admin.ModelAdmin):
list_display = ['__str__', 'upload_date', 'marker']
inlines = [PhotoMetaDataInline]
admin.site.register(Photo, PhotoAdmin)
|
import math
import pyorama
from pyorama import app
from pyorama.asset import *
from pyorama.core import *
from pyorama.event import *
from pyorama.graphics import *
from pyorama.math import *
def on_window_event(event, *args, **kwargs):
if event["sub_type"] == WINDOW_EVENT_TYPE_CLOSE:
app.trigger_quit()
width = 800
height = 600
title = b"Cubes"
base_path = b"./examples/005_texture/"
vs_source_path = base_path + b"shaders/vs_mesh.sc"
fs_source_path = base_path + b"shaders/fs_mesh.sc"
image_path = base_path + b"textures/capsule.jpg"
counter = 0
at = Vec3(0.0, 0.0, 0.0)
eye = Vec3()
up = Vec3(0.0, 1.0, 0.0)
view_mat = Mat4()
proj_mat = Mat4()
model_mat = Mat4()
app.init()
vertex_format = BufferFormat([
(b"a_position", 3, BUFFER_FIELD_TYPE_F32),
(b"a_texcoord0", 2, BUFFER_FIELD_TYPE_F32),
])
vertex_layout = VertexLayout.init_create(
vertex_format,
normalize={b"a_color0",},
)
vertices = Buffer(vertex_format)
vertices.init_from_list([
(-400, -300, 0.0, 0.0, 0.0),
(+400, -300, 0.0, 1.0, 0.0),
(+400, +300, 0.0, 1.0, 1.0),
(-400, +300, 0.0, 0.0, 1.0),
])
vertex_buffer = VertexBuffer.init_create(vertex_layout, vertices)
index_format = BufferFormat([
(b"a_indices", 1, BUFFER_FIELD_TYPE_U16),
])
indices = Buffer(index_format)
indices.init_from_list([
0, 1, 2, 0, 2, 3
], is_flat=True)
index_layout = INDEX_LAYOUT_U16
index_buffer = IndexBuffer.init_create(index_layout, indices)
queue = AssetQueue.init_create()
queue.add_asset(ASSET_TYPE_IMAGE, b"capsule", image_path)
queue.load()
asset_manager = app.get_asset_system()
image = Image()
asset_manager.get_asset(b"capsule", image)
texture = Texture.init_create_2d_from_image(image)
sampler = Uniform.init_create(b"s_tex0", UNIFORM_TYPE_SAMPLER)
vertex_shader = Shader.init_create_from_source_file(SHADER_TYPE_VERTEX, vs_source_path)
fragment_shader = Shader.init_create_from_source_file(SHADER_TYPE_FRAGMENT, fs_source_path)
program = Program.init_create(vertex_shader, fragment_shader)
window = Window.init_create(width, height, title)
frame_buffer = FrameBuffer.init_create_from_window(window)
view = View.init_create()
on_window_listener = Listener.init_create(EventType._WINDOW, on_window_event, None, None)
Vec3.set_data(eye, 0, 0, 1000)
Mat4.look_at(view_mat, eye, at, up)
Mat4.orthographic(proj_mat, 1.0/2.0, 1.0/2.0, 0.01, 1000.0)
"""
void mtxOrtho(float* _result, float _left, float _right, float _bottom, float _top, float _near, float _far, float _offset, bool _homogeneousNdc, Handness::Enum _handness)
{
const float aa = 2.0f/(_right - _left);
const float bb = 2.0f/(_top - _bottom);
const float cc = (_homogeneousNdc ? 2.0f : 1.0f) / (_far - _near);
const float dd = (_left + _right )/(_left - _right);
const float ee = (_top + _bottom)/(_bottom - _top );
const float ff = _homogeneousNdc
? (_near + _far)/(_near - _far)
: _near /(_near - _far)
;
memSet(_result, 0, sizeof(float)*16);
_result[ 0] = aa;
_result[ 5] = bb;
_result[10] = cc;
_result[12] = dd + _offset;
_result[13] = ee;
_result[14] = ff;
_result[15] = 1.0f;
}
@staticmethod
cdef void c_orthographic(Mat4C *out, float x_mag, float y_mag, float z_near, float z_far) nogil:
out.m00 = 1.0/x_mag
out.m01 = 0
out.m02 = 0
out.m03 = 0
out.m10 = 0
out.m11 = 1.0/y_mag
out.m12 = 0
out.m13 = 0
out.m20 = 0
out.m21 = 0
out.m22 = 2.0/(z_near - z_far)
out.m23 = 0
out.m30 = 0
out.m31 = 0
out.m32 = (z_far + z_near) / (z_near - z_far)
out.m33 = 1
"""
#print(proj_mat.data)
#Mat4.orthographic_alt(proj_mat, 0, width, 0, height, 0.01, 1000.0)
#print(proj_mat.data)
Mat4.perspective(proj_mat, math.radians(60.0), float(width) / float(height), 0.01, 1000.0)
print(proj_mat.data)
#print("")
clear_flags = VIEW_CLEAR_COLOR | VIEW_CLEAR_DEPTH
view.set_clear(clear_flags, 0x443355FF, 1.0, 0)
view.set_rect(0, 0, width, height)
view.set_frame_buffer(frame_buffer)
view.set_vertex_buffer(vertex_buffer)
view.set_index_buffer(index_buffer)
view.set_program(program)
view.set_texture(sampler, texture, 0)
view.set_transform_model(model_mat)
view.set_transform_view(view_mat)
view.set_transform_projection(proj_mat)
view.submit()
app.run()
on_window_listener.delete()
program.delete()
fragment_shader.delete()
vertex_shader.delete()
vertex_buffer.delete(); vertex_layout.delete(); vertices.free()
index_buffer.delete(); indices.free()
view.delete()
frame_buffer.delete()
window.delete()
app.quit() |
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
import requests
import shutil
def setup():
# needs a geckodriver.exe in the same folder as a script,
# as well as a firefox exe in the specified location
options = Options()
options.binary_location = (
"C:\\Users\\KARNAV\\AppData\\Local\\Firefox Developer Edition\\firefox.exe"
)
driver_object = webdriver.Firefox(options=options)
driver_object.implicitly_wait(0.5)
# for convenience, runs fine in headless too
driver_object.maximize_window()
return driver_object
def toggle_animation(driver):
toggle = wait(driver, 10).until(
EC.element_to_be_clickable((By.CLASS_NAME, "uom-playpause"))
)
toggle.click()
def download_image(image_url, time_period, measurement):
filename = (
"./pos_images/" + str(time_period) + str(measurement) + image_url.split("/")[-1]
)
r = requests.get(image_url, stream=True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filename, "wb") as f:
shutil.copyfileobj(r.raw, f)
print("Image sucessfully downloaded: ", filename)
else:
print("Image couldn't be retreived")
def grab_image(driver):
image_element = driver.find_element(By.CLASS_NAME, "uom-image")
image_url = image_element.get_attribute("src")
return image_url
def scrape(driver, time_period, measurement):
driver.get(
f"http://www.buildyourownearth.com/byoe.html?e1={time_period}&c1={measurement}&v=pm"
)
toggle_animation(driver)
for month in range(12):
download_image(grab_image(driver), time_period, measurement)
toggle_animation(driver)
sleep(0.5)
toggle_animation(driver)
def main():
current_day = 0
last_glacial = 33
late_permian = 39
mean_temp = 0
# get the data from source
driver = setup()
scrape(driver, current_day, mean_temp)
scrape(driver, last_glacial, mean_temp)
scrape(driver, late_permian, mean_temp)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#
# dewadl
#
# Turn WADL XML into Python API.
#
# Matt Kubilus 2015
#
# This is written to support the uDeploy WADL specifically. Your mileage may vary with other WADLs.
#
#
import os
import re
import cmd
import json
import urlparse
import urllib2
from types import FunctionType
import xml.etree.ElementTree as ET
from functools import partial
from optparse import OptionParser
import ConfigParser
import pprint
import getpass
DEBUG=False
class wadl_processor(object):
ns={"ns":"http://wadl.dev.java.net/2009/02"}
base_url = ""
def __init__(self, wadl_file=None, wadl_url=None, userid=None, passwd=None):
if wadl_url:
# If we were supplied wadl_url, first we may need to authenticate in order to get the WADL file
self.__auth(wadl_url, userid, passwd)
wadl_string = self.__do_url(wadl_url)
self.__process_wadl(wadl_file=wadl_file, wadl_string=wadl_string)
else:
# If we have a supplied wadl_file, we will need to get the base_url from the file before auth
self.__process_wadl(wadl_file=wadl_file)
self.__auth(self.base_url, userid, passwd)
def __auth(self, url, userid=None, passwd=None):
if userid:
if not passwd:
passwd = getpass.getpass()
p = urlparse.urlparse(url)
auth_url = "%s://%s" % (p.scheme, p.netloc)
if DEBUG:
print "Authenticating to %s" % auth_url
connected = False
for i in range(5):
try:
p_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
p_mgr.add_password(None, auth_url, userid, passwd)
auth_handler = urllib2.HTTPBasicAuthHandler(p_mgr)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
except urllib2.URLError:
print "Error connecting"
time.sleep(i)
continue
connected = True
if DEBUG:
print "Successfully authenticated."
break
if not connected:
print "Could not connect to: %s" % url
sys.exit(1)
def __do_url(self, url, mtype="GET", data_dict=None):
myurl = "/".join(x.strip('/') for x in [self.base_url, url])
myurl = myurl.lstrip("/")
req = urllib2.Request(myurl, json.dumps(data_dict))
req.get_method = lambda: mtype
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError, err:
print "Error %sting url: %s" % (mtype, myurl)
print err
return
con_type = response.info().getheader('Content-Type')
resp_data = response.read()
if resp_data and "application/json" in con_type :
#return json.loads(resp_data)
return json.loads(resp_data, object_hook=wadl_to_obj)
elif resp_data:
return resp_data
def __method_creator(cls, url, mtype, params):
if DEBUG:
print "Creating method: ", url, mtype, params
def method_template(*args, **kwds):
data_dict = kwds.get("data_dict")
if DEBUG:
print "PARAMS:", params
print "ARGS:", args
print "KWDS:", kwds
print "URL:", url
print "DATADICT:", data_dict
arglen = len(args)
m = re.findall("{(.*?)}", url)
if arglen != len(params):
print "Requires %s arguments(s) %s" % (len(params), params)
return
do_url = url
#for idx in xrange(arglen):
for idx in xrange(len(m)):
# First replace REST positional arguments
do_url = do_url.replace("{%s}" % m[idx], args[idx])
url_args = '&'.join([ "%s=%s" % (k,v) for k,v in zip(params[len(m):],args[len(m):])])
do_url = do_url.replace("//","/")
if url_args:
do_url = "%s?%s" % (do_url, url_args)
return cls.__do_url(do_url, mtype, data_dict=data_dict)
return method_template
def __handleResources(self, resources):
if DEBUG:
print resources
self.base_url = resources.get("base")
if DEBUG:
print "Setting base_url to: %s" % self.base_url
for resource in resources:
self.__handleResource(resource)
def __handleResource(self, resource, path=""):
if DEBUG:
print "resource", resource.tag, resource.get('path')
prev_path = path
path = '/'.join([path, resource.get('path')])
params = re.findall("{(.*?)}", path)
method=None
for node in resource:
# We have to assume params come before methods
if node.tag == "{%s}method" % self.ns.get('ns'):
mtype, method, method_params = self.__handleMethod(node, path)
if hasattr(self, method):
# We have clashed with an existing method name
# TODO: After we process the entire file, perhaps cleanup original clashed name
basename = os.path.basename(prev_path)
if DEBUG:
print "RESOURCE: ", prev_path
print "Method %s already exists. Adjusting name to %s" % (method, "%s_%s" % (basename, method))
old_method_t = getattr(self, method)
method = "%s_%s" % (basename, method)
old_method_name = "%s_%s" % (os.path.basename(old_method_t.__prev_path), old_method_t.__name__)
if DEBUG:
print "Also updating %s to %s" % (old_method_t.__name__, old_method_name)
setattr(self, old_method_name, old_method_t)
params.extend(method_params)
#print "Create method for %s" % path
tmethod = self.__method_creator(path, mtype, tuple(params))
tmethod.__doc__ = "%s accepts arguments: %s" % (method, params)
tmethod.__name__ = method
tmethod.__prev_path = prev_path
setattr(self, method, tmethod)
#params = []
if node.tag == "{%s}param" % self.ns.get('ns'):
param = self.__handleParam(node, path)
#params.append(param)
if node.tag == "{%s}resource" % self.ns.get('ns'):
self.__handleResource(node, path)
def __handleRequest(self, request):
if DEBUG:
print " ", request.tag
tparams = []
for node in request:
if node.tag == "{%s}param" % self.ns.get('ns'):
param = self.__handleParam(node, "")
if param:
tparams.append(param)
return tparams
def __handleResponse(self, response):
pass
def __handleMethod(self, method, path):
if DEBUG:
print " ", method.tag, method.get('id')
method_type = method.get('name')
method_name = method.get('id')
method_params = []
for node in method:
if node.tag == "{%s}request" % self.ns.get('ns'):
tparams = self.__handleRequest(node)
method_params.extend(tparams)
elif node.tag == "{%s}response" % self.ns.get('ns'):
self.__handleResponse(node)
return method_type, method_name, method_params
def __handleParam(self, param, path):
if DEBUG:
print " ", param.tag, param.get('name'), param.get('type'), param.get('style')
p = None
if param.get('style') == 'template':
p = param.get('name')
return p
def __process_wadl(self, wadl_file=None, wadl_string=None):
if wadl_file:
tree = ET.parse(wadl_file)
root = tree.getroot()
elif wadl_string:
root = ET.fromstring(wadl_string)
else:
print "Must provide either wadl_file or wadl_string"
return 1
#print root.tag
m = re.match("\{(.*)\}application", root.tag)
if m:
self.ns['ns'] = m.groups()[0]
#print "Setting namespace to: %s" % self.ns.get('ns')
for resources in root.findall('{%s}resources' % self.ns.get('ns')):
self.__handleResources(resources)
def call_method(obj, args):
if len(args) >= 1:
meth_name = args[0]
else:
meth_name = "help"
if args > 1:
params = args[1:]
meths = [method for method in dir(obj) if callable(getattr(obj, method)) and not method.startswith('__')]
if meth_name == "help":
print "------------------"
print "Available methods:"
print "------------------"
for meth in meths:
print meth,
do_method = getattr(obj, meth)
argc = do_method.func_code.co_argcount
print do_method.func_code.co_varnames[1:argc]
print " ", do_method.__doc__
print
return
if meth_name in meths:
do_method = getattr(obj, meth_name)
return do_method(*params)
else:
print "Could not find: %s", meth_name
def wadl_to_obj(d):
tmpobj = _wadl_obj(d)
return tmpobj
class _wadl_obj(dict):
def __init__(self, data):
for key, value in data.iteritems():
setattr(self, key, value)
self.__dict__[key] = value
def __setattr__(self, name, value):
if not hasattr(super(_wadl_obj, self), name):
super(_wadl_obj, self).__setitem__(name, value)
def get_config():
config = ConfigParser.ConfigParser()
config.read([".dewadl", "/etc/dewadl.cfg", os.path.expanduser("~/.dewadl")])
#print config._sections
all_defaults = config._sections
return all_defaults.get("dewadl", {})
if __name__ == "__main__":
cfg_defaults = get_config()
parser = OptionParser()
parser.add_option(
"-f",
"--wadlfile",
action="store",
dest="wadlfile",
default=None
)
parser.add_option(
"-w",
"--weburl",
action="store",
dest="weburl",
default=cfg_defaults.get("weburl")
)
parser.add_option(
"-u",
"--userid",
action="store",
dest="userid",
default=cfg_defaults.get("userid")
)
parser.add_option(
"-p",
"--password",
action="store",
dest="password",
default=cfg_defaults.get("password")
)
parser.add_option(
"-i",
"--interact",
action="store_true",
dest="interact",
default=False
)
opts, args = parser.parse_args()
if opts.wadlfile:
wadl = wadl_processor(wadl_file=opts.wadlfile, userid=opts.userid, passwd=opts.password)
elif opts.weburl:
wadl = wadl_processor(wadl_url=opts.weburl, userid=opts.userid, passwd=opts.password)
else:
parser.error("Please provider either --wadlfile or --weburl")
if opts.interact:
import rlcompleter
import readline
import code
import sys
readline.parse_and_bind('tab: complete')
sys.ps1 = "W) "
sys.ps2 = ". "
vars = globals().copy()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact(banner="\n\n-----------------------------\n\nWelcome to DeWADL Python interface!.\n'wadl' object has been created.\n")
sys.exit(0)
ret = call_method(wadl, args)
if ret:
pprint.pprint(ret)
|
from pathlib import Path
from django.dispatch import receiver
from django.template import engines
from django.template.backends.django import DjangoTemplates
from django.utils._os import to_path
from django.utils.autoreload import (
autoreload_started,
file_changed,
is_django_path,
)
def get_template_directories():
# Iterate through each template backend and find
# any template_loader that has a 'get_dirs' method.
# Collect the directories, filtering out Django templates.
items = set()
for backend in engines.all():
if not isinstance(backend, DjangoTemplates):
continue
items.update(Path.cwd() / to_path(dir) for dir in backend.engine.dirs)
for loader in backend.engine.template_loaders:
if not hasattr(loader, "get_dirs"):
continue
items.update(
Path.cwd() / to_path(directory)
for directory in loader.get_dirs()
if not is_django_path(directory)
)
return items
def reset_loaders():
for backend in engines.all():
if not isinstance(backend, DjangoTemplates):
continue
for loader in backend.engine.template_loaders:
loader.reset()
@receiver(autoreload_started, dispatch_uid="template_loaders_watch_changes")
def watch_for_template_changes(sender, **kwargs):
for directory in get_template_directories():
sender.watch_dir(directory, "**/*")
@receiver(file_changed, dispatch_uid="template_loaders_file_changed")
def template_changed(sender, file_path, **kwargs):
for template_dir in get_template_directories():
if template_dir in file_path.parents:
reset_loaders()
return True
|
# -*- coding: utf-8 -*-
"""
斗地主算法包
"""
from .call_landlord import process
__all__ = ['process']
|
from abc import *
import torch.nn as nn
class DiscriminatorBase(nn.Module,metaclass=ABCMeta):
@abstractmethod
def __init__(self):
super(DiscriminatorBase, self).__init__()
@abstractmethod
def forward(self,x):
pass
@abstractmethod
def get_reward(self):
pass
@abstractmethod
def train_network(self):
pass
class Discriminator(DiscriminatorBase):
def __init__(self):
super(Discriminator, self).__init__()
def name(self):
return self.__class__.__name__.lower()
def get_reward(self):
pass
def forward(self,x):
pass
def network_init(self):
for layer in self.modules():
if isinstance(layer, nn.Linear):
nn.init.orthogonal_(layer.weight)
layer.bias.data.zero_() |
import requests
class Gemini:
def __init__(self, fiat, crypto):
self.url = 'https://api.gemini.com/v1/'
self.fiat = fiat
self.crypto = crypto
self.rate = crypto.lower() + fiat.lower()
self.deposit = 0.00 # EUR
self.fiat_withdrawal = 0.09 # EUR
self.withdrawal_map = {'BTC': 0.001,
'ETH': 0.005,
'XRP': 0.02,
'XLM': 0.00002,
'LTC': 0.02}
def get_market_rate(self):
market_rate = requests.get(self.url + "pubticker/" + self.rate ).json()
self.normalised_rates = {self.crypto: {'ask':{}, 'bid':{}}}
self.normalised_rates[self.crypto]['ask'] = float(market_rate['ask'])
self.normalised_rates[self.crypto]['bid'] = float(market_rate['bid'])
return self.normalised_rates
def get_fees(self):
"""
:return:
"""
return {'deposit': self.deposit, 'fiat_withdrawal': self.fiat_withdrawal, 'crypto_withdrawal': self.withdrawal_map[self.crypto]}
if __name__ == '__main__':
print(Gemini('GBP', 'ETH').get_fees()) |
import re
import pytest
import yapic.di
from yapic.di import Injector, ProvideError, InjectError, __version__
def test_provide_callable():
""" Test only providable callables """
# !!! THIS IS NOT AN EXAMPLE, THIS IS A BAD USAGE, BUT IT CAN WORK IN TEST !!!
injector = Injector()
@injector.provide
def normalfn():
pass
with pytest.raises(ProvideError) as exc:
@injector.provide
def fn_with_args(a1):
pass
assert "Argument must have a type" in str(exc.value)
# @injector.provide
# def fn_with_defs1(a1, a2=2):
# pass
# @injector.provide
# def fn_with_defs2(a1, a2=2, a3=3):
# pass
# @injector.provide
# def fn_with_defs3(a1, a2=2, a3=3, a4=4):
# pass
@injector.provide
def fn_kwonly(*, kw1, kw2):
pass
@injector.provide
def fn_kwonly_def1(*, kw1, kw2=2):
pass
@injector.provide
def fn_kwonly_def2(*, kw1, kw2=2, kw3=3):
pass
@injector.provide
def fn_kwonly_def3(*, kw1, kw2=2, kw3=3, kw4=4):
pass
@injector.provide
def fn_mixed2(a1=1, *, kw1):
pass
@injector.provide
def fn_mixed3(a1=1, *, kw4=4):
pass
class X:
pass
class X2:
def __init__(self):
pass
class C:
def __call__(self, a1=1):
pass
class CC:
@classmethod
def cls_method(cls, arg1: X):
pass
@staticmethod
def static_method(arg1: X):
pass
injector.provide(X)
injector.provide(X2)
injector.provide(C())
injector.provide(CC.cls_method)
injector.provide(CC.static_method)
with pytest.raises(TypeError) as exc:
injector.provide(Injector.provide)
exc.match("^Cannot get type hints from built / c-extension method")
with pytest.raises(TypeError) as exc:
injector.provide(injector.provide)
exc.match("^Cannot get type hints from built / c-extension method")
def test_provide_attrs():
class A:
pass
class B:
a: A
class X:
pass
class C(B):
x: X
injector = Injector()
injector.provide(A)
injector.provide(B)
injector.provide(X)
injector.provide(C)
def test_errors():
assert isinstance(ProvideError(), TypeError)
assert isinstance(InjectError(), TypeError)
def test_version():
assert isinstance(__version__, str)
assert re.match(r"^\d+\.\d+.\d+$", __version__)
|
from ..dao.event import EventDao
from ..schema.base import ListArgsSchema, RespListSchema, RespIdSchema, RespBaseSchema
class BaseService(object):
"""Base(基础)服务,用于被继承.
CRUD基础服务类,拥有基本方法,可直接继承使用
Attributes:
auth_data: 认证数据,包括用户、权限等
user_id: 当前操作用户id
event_dao: 业务事件dao
dao: 当前业务数据处理类
"""
auth_data: dict = {}
user_id: int = 0
event_dao: EventDao
dao = None
Model = None
def __init__(self, user_id: int = 0, auth_data: dict = {}):
"""Service初始化."""
self.user_id = user_id
self.auth_data = auth_data
self.event_dao = EventDao(user_id)
def read(self, id: int) -> Model:
"""读取单条数据.
Args:
id: 数据id
Returns:
一个model实体
"""
return self.dao.read(id)
def list(self, args: ListArgsSchema) -> RespListSchema:
"""读取多条数据.
Args:
args: 列表请求参数,详见ListArgsSchema
Returns:
多个model实体组成的List
"""
return self.dao.read_list(args)
def create(self, schema) -> RespIdSchema:
"""创建一条数据.
Args:
schema: model对应的schema,详见schema中对应的实体
model: model的实体
Returns:
是否创建成功,创建成功则附加数据id
"""
resp = RespIdSchema()
model = self.Model()
self.set_model_by_schema(schema, model)
model.user_id = self.user_id
model.created_by = self.user_id
model.updated_by = self.user_id
self.dao.create(model)
event_log = self.event_dao.get_event_log(2, model.__tablename__)
event_log.name = '创建{}:{}'.format(model.__table_args__.get('comment', '数据'), model.name)
event_log.relation_id = model.id
self.event_dao.create_event_log(event_log, model)
resp.id = model.id
return resp
@staticmethod
def set_model_by_schema(schema, model):
"""给model赋值,从schema.
Args:
schema: model对应的schema,详见schema中对应的实体
model: model的实体
Returns:
是否创建成功,创建成功则附加数据id
"""
for (key, value) in schema:
model.__setattr__(key, value)
if hasattr(model, 'search'):
model.search = model.name
def update(self, schema) -> RespBaseSchema:
"""更新一条数据.
Args:
schema: model对应的schema,详见schema中对应的实体
model: model的实体
Returns:
是否更新成功
"""
resp = RespBaseSchema()
model = self.dao.read(schema.id)
if not model:
resp.code = 2002191527
resp.message = '找不到对应的:{}'.format(model.__table_args__.get('comment', '数据'))
return resp
event_log = self.event_dao.get_event_log(1, model.__tablename__, model)
event_log.name = '修改{}:{}'.format(model.__table_args__.get('comment', '数据'), model.name)
self.set_model_by_schema(schema, model)
model.updated_by = self.user_id
self.dao.update(model)
self.event_dao.create_event_log(event_log, model)
return resp
def delete(self, id: int) -> RespBaseSchema:
"""删除单条数据.
Args:
id: 数据id
Returns:
是否删除成功
"""
resp = RespBaseSchema()
model = self.dao.read(id)
if not model:
resp.code = 2002191553
resp.message = '找不到对应的:{}'.format(model.__table_args__.get('comment', '数据'))
return resp
event_log = self.event_dao.get_event_log(5, model.__tablename__, model)
event_log.name = '删除{}:{}'.format(model.__table_args__.get('comment', '数据'), model.name)
self.dao.delete(model)
self.event_dao.create_event_log(event_log, model)
return resp
|
# NG
import os, sys
# OK
import os
import sys
import math
import os
import sys
import Requests
import my_package1
import my_package2
|
# Python-SDL2 : Yet another SDL2 wrapper for Python
#
# * https://github.com/vaiorabbit/python-sdl2
#
# [NOTICE] This is an automatically generated file.
import ctypes
from .api import SDL2_API_NAMES, SDL2_API_ARGS_MAP, SDL2_API_RETVAL_MAP
# Define/Macro
# Enum
SDL_HINT_DEFAULT = 0
SDL_HINT_NORMAL = 1
SDL_HINT_OVERRIDE = 2
# Typedef
SDL_HintPriority = ctypes.c_int
SDL_HintCallback = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p)
# Struct
# Function
def setup_symbols():
SDL2_API_NAMES.append('SDL_SetHintWithPriority')
SDL2_API_ARGS_MAP['SDL_SetHintWithPriority'] = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int]
SDL2_API_RETVAL_MAP['SDL_SetHintWithPriority'] = ctypes.c_int
SDL2_API_NAMES.append('SDL_SetHint')
SDL2_API_ARGS_MAP['SDL_SetHint'] = [ctypes.c_char_p, ctypes.c_char_p]
SDL2_API_RETVAL_MAP['SDL_SetHint'] = ctypes.c_int
SDL2_API_NAMES.append('SDL_GetHint')
SDL2_API_ARGS_MAP['SDL_GetHint'] = [ctypes.c_char_p]
SDL2_API_RETVAL_MAP['SDL_GetHint'] = ctypes.c_char_p
SDL2_API_NAMES.append('SDL_GetHintBoolean')
SDL2_API_ARGS_MAP['SDL_GetHintBoolean'] = [ctypes.c_char_p, ctypes.c_int]
SDL2_API_RETVAL_MAP['SDL_GetHintBoolean'] = ctypes.c_int
SDL2_API_NAMES.append('SDL_AddHintCallback')
SDL2_API_ARGS_MAP['SDL_AddHintCallback'] = [ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p]
SDL2_API_RETVAL_MAP['SDL_AddHintCallback'] = None
SDL2_API_NAMES.append('SDL_DelHintCallback')
SDL2_API_ARGS_MAP['SDL_DelHintCallback'] = [ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p]
SDL2_API_RETVAL_MAP['SDL_DelHintCallback'] = None
SDL2_API_NAMES.append('SDL_ClearHints')
SDL2_API_ARGS_MAP['SDL_ClearHints'] = None
SDL2_API_RETVAL_MAP['SDL_ClearHints'] = None
|
import pyrosetta.rosetta.core.scoring as scoring
import pyrosetta.toolbox.rcsb as rcsb
import numpy as np
from scipy.stats import wilcoxon
from pyrosetta import *
from jmetal.problem.singleobjective.protein_structure_prediction import ProteinStructurePredictionMultiObjective
if __name__ == '__main__':
problem = ProteinStructurePredictionMultiObjective(
protein_config_file='/home/guest1/Documents/Source/jMetalPy/resources/PSP_instances/2P81.yaml',
energy_type='centroid')
problem_custom_parameters = problem.get_custom_information()
pdb_pose = rcsb.pose_from_rcsb(problem_custom_parameters['protein'])
working_dir = "/home/guest1/Documents/Source/jMetalPy/"+problem_custom_parameters['protein']
nsga_vals = []
gde_vals = []
demo_vals = []
for i in range(0, 30):
demo_file = pose_from_pdb(working_dir + '/DEMO/' + problem_custom_parameters['protein'] + '_' + str(i) + '.pdb')
gdt_demo = scoring.CA_gdtmm(demo_file, pdb_pose)
demo_vals.append(gdt_demo)
gde_file = pose_from_pdb(working_dir + '/GDE3/' + problem_custom_parameters['protein'] + '_' + str(i) + '.pdb')
gdt_gde = scoring.CA_gdtmm(gde_file, pdb_pose)
gde_vals.append(gdt_gde)
nsga_file = pose_from_pdb(working_dir + '/NSGA-II/' + problem_custom_parameters['protein'] + '_' + str(i) + '.pdb')
gdt_nsga = scoring.CA_gdtmm(nsga_file, pdb_pose)
nsga_vals.append(gdt_nsga)
nsga = np.array(nsga_vals)
gde = np.array(gde_vals)
demo = np.array(demo_vals)
print("Mean Values")
print("NSGA", nsga.max()*100, nsga.mean()*100, nsga.std()*100)
print("GDE3", gde.max()*100, gde.mean()*100, gde.std()*100)
print("DEMO", demo.max()*100, demo.mean()*100, demo.std()*100)
print("\nWilcoxon Rank Test - GDE3 vs NSGA")
stat, p_value = wilcoxon(gde, nsga)
print("Statistics=%.4f, p=%.4f" % (stat, p_value))
if p_value > 0.05:
print("Same distribution (fail to reject H0)")
else:
print("Different Distribution (reject H0)")
print("\nWilcoxon Rank Test - GDE3 vs DEMO")
stat, p_value = wilcoxon(gde, demo)
print("Statistics=%.4f, p=%.4f" % (stat, p_value))
if p_value > 0.05:
print("Same distribution (fail to reject H0)")
else:
print("Different Distribution (reject H0)")
|
import numpy as np
import operator
# Accuracy from the testing predictions
def getAccuracy(testSet, predictions):
correct = 0
for x in range(len(testSet)):
if testSet[x] == predictions[x]:
correct += 1
return 1.0 * correct / len(testSet)
# A custom distance function for use with k-NN
def distance(instance1, instance2, k):
mm1 = instance1[0]
cm1 = instance1[1:]
dcm1 = np.linalg.det(cm1)
mm2 = instance2[0]
cm2 = instance2[1:]
dcm2 = np.linalg.det(cm2)
icm2 = np.linalg.inv(cm2)
dmm = mm2 - mm1
#
distance = np.trace(np.dot(icm2, cm1))
# Mahalanobis distance between the two instances
distance += np.sqrt(np.dot(np.dot((dmm).transpose(), icm2), dmm))
# Difference in Differential entropy between instances
# (measured indirectly as a property of Covariance matrices)
distance += np.log(dcm2) - np.log(dcm1)
# distance -= k
return distance
# A function which finds k neighbours of the given instance in the training set
def getNeighbors(trainingSet, trainingLabels, instance, k):
distances = []
for x in range(len(trainingSet)):
# Since the distance function is not symmetric, taking the distance in both directions
dist = distance(trainingSet[x], instance, k) + distance(instance, trainingSet[x], k)
distances.append((trainingLabels[x], dist))
# sorting by distance in ascending order
distances.sort(key=operator.itemgetter(1))
neighbors = [d[0] for d in distances[:k]]
return neighbors
# k-NN logic to find the nearest neighbour's class
def nearestClass(neighbors):
classVote = {}
for x in range(len(neighbors)):
response = neighbors[x]
if response in classVote:
classVote[response] += 1
else:
classVote[response] = 1
sorter = sorted(classVote.items(), key=operator.itemgetter(1), reverse=True)
return sorter[0][0]
|
import lxml.html
from lxml.html.clean import Cleaner
from lxml.cssselect import CSSSelector
cleaner = Cleaner(
scripts=True,
javascript=True,
comments=True,
style=True,
inline_style=True,
links=True,
meta=True,
page_structure=True,
processing_instructions=True,
embedded=True,
frames=True,
forms=True,
allow_tags=['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'hr', 'li'],
remove_unknown_tags=False,
safe_attrs=[],
safe_attrs_only=True,
)
blacklist_sel = CSSSelector(', '.join(
[f'.block.{c}' for c in ['code', 'html', 'image', 'photo', 'video']]
+ ['.slide']
))
def clean_text(text: str) -> str:
parts = [part.strip() for part in text.replace('\t', '').split('\n')]
return ' '.join(part for part in parts if part)
def html_to_text(html: str) -> str:
doc = lxml.html.fromstring(html)
for kill_node in blacklist_sel(doc):
kill_node.getparent().remove(kill_node)
cleaner(doc)
paragraphs = [clean_text(paragraph) for paragraph in doc.itertext()]
return '\n'.join(paragraph for paragraph in paragraphs if paragraph)
|
from dataclasses import dataclass, field
from typing import Optional
from bindings.csw.actuate_type import ActuateType
from bindings.csw.datum import Datum
from bindings.csw.engineering_datum import EngineeringDatum
from bindings.csw.geodetic_datum import GeodeticDatum
from bindings.csw.image_datum import ImageDatum
from bindings.csw.show_type import ShowType
from bindings.csw.temporal_datum import TemporalDatum
from bindings.csw.type_type import TypeType
from bindings.csw.vertical_datum import VerticalDatum
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class DatumRefType:
"""
Association to a datum, either referencing or containing the definition of
that datum.
"""
geodetic_datum: Optional[GeodeticDatum] = field(
default=None,
metadata={
"name": "GeodeticDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
temporal_datum: Optional[TemporalDatum] = field(
default=None,
metadata={
"name": "TemporalDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vertical_datum: Optional[VerticalDatum] = field(
default=None,
metadata={
"name": "VerticalDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
image_datum: Optional[ImageDatum] = field(
default=None,
metadata={
"name": "ImageDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
engineering_datum: Optional[EngineeringDatum] = field(
default=None,
metadata={
"name": "EngineeringDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
datum: Optional[Datum] = field(
default=None,
metadata={
"name": "_Datum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
type: TypeType = field(
init=False,
default=TypeType.SIMPLE,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
href: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
role: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
"min_length": 1,
},
)
arcrole: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
"min_length": 1,
},
)
title: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
show: Optional[ShowType] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
actuate: Optional[ActuateType] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
remote_schema: Optional[str] = field(
default=None,
metadata={
"name": "remoteSchema",
"type": "Attribute",
"namespace": "http://www.opengis.net/gml",
},
)
|
# encoding: utf-8
import os
from unittest import mock
import pytest
from requests.exceptions import ChunkedEncodingError
from gdcapiwrapper.tcga import Data as TCGAData
from gdcapiwrapper.tcia import Data as TCIAData
from ..mockserver import get_free_port, start_mock_server
class TestTCGAData(object):
@classmethod
def setup_class(cls):
cls.mock_server_port = get_free_port()
start_mock_server(cls.mock_server_port)
def test_download(self, tmpdir):
base_url = "http://localhost:{port}/data".format(port=self.mock_server_port)
with mock.patch.dict(
"gdcapiwrapper.tcga.tcga.__dict__", {"base_url": base_url}
):
response, filename = TCGAData.download(
uuid="fakeuuid", path=tmpdir, name="fakefilename"
)
assert response.ok is True
assert os.path.exists(os.path.join(tmpdir, "fakefilename")) is True
def test_download_multiple(self, tmpdir):
base_url = "http://localhost:{port}".format(port=self.mock_server_port)
try:
with mock.patch.dict(
"gdcapiwrapper.tcga.tcga.__dict__", {"base_url": base_url}
):
response, filename = TCGAData.download_multiple(
uuid_list=["1", "2"], path=tmpdir
)
except ChunkedEncodingError:
pytest.skip("Flaky ConnectionResetError")
assert response.ok is True
assert os.path.exists(os.path.join(tmpdir, "fake.gzip")) is True
class TestTCIAData(object):
@classmethod
def setup_class(cls):
cls.mock_server_port = get_free_port()
start_mock_server(cls.mock_server_port)
def test_json_sop_instance_uids(self):
base_url = "http://localhost:{port}/query".format(port=self.mock_server_port)
with mock.patch.dict(
"gdcapiwrapper.tcia.tcia.__dict__", {"base_url": base_url}
):
response, json = TCIAData.sop_instance_uids(series_instance_uid="fakeuid")
assert response.ok is True
assert json == []
def test_other_formats_sop_instance_uids(self, tmpdir):
base_url = "http://localhost:{port}/query".format(port=self.mock_server_port)
with mock.patch.dict(
"gdcapiwrapper.tcia.tcia.__dict__", {"base_url": base_url}
):
response, filename = TCIAData.sop_instance_uids(
series_instance_uid="fakeuid", format_="CSV", path=tmpdir
)
assert response.ok is True
assert os.path.exists(os.path.join(tmpdir, "fakeuid.csv")) is True
def test_download_single_image(self, tmpdir):
base_url = "http://localhost:{port}/query".format(port=self.mock_server_port)
with mock.patch.dict(
"gdcapiwrapper.tcia.tcia.__dict__", {"base_url": base_url}
):
response, filename = TCIAData.download_single_image(
series_instance_uid="fakeuid",
sop_instance_uid="sopfakeuid",
path=tmpdir,
)
assert response.ok is True
assert os.path.exists(os.path.join(tmpdir, "sopfakeuid.dcm")) is True
def test_download_series_instance_images(self, tmpdir):
base_url = "http://localhost:{port}/query".format(port=self.mock_server_port)
with mock.patch.dict(
"gdcapiwrapper.tcia.tcia.__dict__", {"base_url": base_url}
):
response, filename = TCIAData.download_series_instance_images(
series_instance_uid="fakeuid", path=tmpdir
)
assert response.ok is True
assert os.path.exists(os.path.join(tmpdir, "fakeuid.zip")) is True
|
import paginate
from flask import request, url_for, render_template, jsonify
from flask.views import MethodView
from flask_login import login_required, current_user
from paginate_sqlalchemy import SqlalchemyOrmWrapper
from sqlalchemy import desc, func
from nanumlectures.common import is_admin_role, paginate_link_tag
from nanumlectures.database import db_session
from nanumlectures.models import BoardModel
class BoardListView(MethodView):
decorators = [is_admin_role, login_required]
def get(self):
current_page = request.args.get("page", 1, type=int)
search_option = request.args.get("search_option", '')
search_word = request.args.get("search_word", '')
if search_option:
search_column = getattr(BoardModel, search_option)
page_url = url_for("admin.board")
if search_word:
page_url = url_for("admin.board", search_option=search_option, search_word=search_word)
page_url = str(page_url) + "&page=$page"
else:
page_url = str(page_url) + "?page=$page"
items_per_page = 10
records = db_session.query(BoardModel)
if search_word:
records = records.filter(search_column.ilike('%{}%'.format(search_word)))
records = records.order_by(desc(BoardModel.id))
total_cnt = records.count()
paginator = paginate.Page(records, current_page, page_url=page_url,
items_per_page=items_per_page,
wrapper_class=SqlalchemyOrmWrapper)
return render_template("admin/preparatory_committee/list.html", paginator=paginator,
paginate_link_tag=paginate_link_tag,
page_url=page_url, items_per_page=items_per_page,
total_cnt=total_cnt, page=current_page)
class BoardRegView(MethodView):
decorators = [is_admin_role, login_required]
def get(self):
return render_template("admin/preparatory_committee/reg.html")
def post(self):
req_json = request.get_json()
board_obj = BoardModel()
board_obj.title = req_json.get('title')
board_obj.content = req_json.get('content')
board_obj.wdate = func.now()
board_obj.mdate = func.now()
board_obj.user_id = current_user.id
board_obj.hit = 0
db_session.add(board_obj)
return jsonify(success=True)
class BoardEditView(MethodView):
decorators = [is_admin_role, login_required]
def get(self, board):
return render_template("admin/preparatory_committee/edit.html", board=board)
def post(self, board):
req_json = request.get_json()
board.title = req_json.get('title')
board.content = req_json.get('content')
board.mdate = func.now()
return jsonify(success=True)
class BoardDetailView(MethodView):
decorators = [is_admin_role, login_required]
def get(self, board):
board.hit += 1
return render_template("admin/preparatory_committee/view.html", board=board)
def delete(self, board):
db_session.delete(board)
return jsonify(success=True)
|
# Order sensitive imports.
from genomic_neuralnet.analyses.optimization_constants \
import DROPOUT, HIDDEN, WEIGHT_DECAY, EPOCHS, RUNS, \
SINGLE_MULTIPLIER, DOUBLE_MULTIPLIER
from genomic_neuralnet.analyses.optimization_result \
import OptimizationResult
from genomic_neuralnet.analyses.optimization_runner \
import run_optimization
|
import pandas as pd
import matplotlib.pyplot as plt
raw_data = pd.read_csv("https://storage.googleapis.com/dqlab-dataset/dataset_statistic.csv", sep=';')
plt.clf()
plt.figure()
raw_data[raw_data['Produk'] == 'A'].hist()
plt.tight_layout()
plt.show()
plt.figure()
raw_data[raw_data['Produk'] == 'B'].hist()
plt.tight_layout()
plt.show()
plt.figure()
raw_data[raw_data['Produk'] == 'C'].hist()
plt.tight_layout()
plt.show()
plt.figure()
raw_data[raw_data['Produk'] == 'D'].hist()
plt.tight_layout()
plt.show()
plt.figure()
raw_data[raw_data['Produk'] == 'E'].hist()
plt.tight_layout()
plt.show() |
# Reprojecting a Vector Layer
# https://github.com/GeospatialPython/Learn/raw/master/MSCities_MSTM.zip
import processing
processing.runalg("qgis:reprojectlayer", "/qgis_data/ms/MSCities_MSTM.shp", "epsg:4326", "/qgis_data/ms/MSCities_MSTM_4326.shp") |
import re
import asyncio
import aiohttp
import os
import sys
import datetime
import tushare as ts
import pandas as pd
from stock.globalvar import TICK_DIR
from jobs.get_tick import init, run
def filter_open(date):
folder = TICK_DIR["stock"]
files = os.listdir(folder)
df_res = pd.DataFrame(columns=["up_speed", "chg", "kaipan_money", "opengap"])
tasks = []
for filename in files:
exsymbol = filename
filepath = os.path.join(folder, exsymbol)
if not os.path.isfile(filepath):
continue
df = pd.read_csv(filepath, sep='\t', header=0, names=['time', 'price', 'change', 'volume', 'amount', 'type'])
df_open = df.iloc[:2]
if len(df_open) == 0:
continue
df.loc[:, "time"] = pd.to_datetime(date + ' ' + df["time"], format="%Y-%m-%d %H:%M:%S")
open_dt = datetime.datetime.strptime(date+" 09:30:00", "%Y-%m-%d %H:%M:%S")
if df.iloc[0].time >= open_dt:
continue
yest_close = df.iloc[0].price - df.iloc[0].change
opengap = df.iloc[0].price / yest_close - 1
kaipan_money = 0
kaipan_money = df.iloc[0].amount
chg = df_open.iloc[1].price / yest_close - 1
up_speed = df_open.iloc[1].price/df_open.iloc[0].price - 1
if opengap > 0.03 and chg < 0.08:
df_res.loc[exsymbol] = [up_speed, chg, kaipan_money, opengap]
return df_res
def filter_speed(date):
folder = TICK_DIR["stock"]
files = os.listdir(folder)
df_res = pd.DataFrame(columns=["up_speed", "down_speed", "chg", "kaipan_money", "opengap"])
tasks = []
for filename in files:
exsymbol = filename
filepath = os.path.join(folder, exsymbol)
if not os.path.isfile(filepath):
continue
df = pd.read_csv(filepath, sep='\t', header=0, names=['time', 'price', 'change', 'volume', 'amount', 'type'])
df.loc[:, "time"] = pd.to_datetime(date + ' ' + df["time"], format="%Y-%m-%d %H:%M:%S")
df_5min = df[(df.time<=date+" 09:35:00") & (df.time>=date+" 09:30:00")]
if len(df_5min) == 0:
continue
yest_close = df.iloc[0].price - df.iloc[0].change
opengap = df.iloc[0].price / yest_close - 1
kaipan_money = 0
open_dt = datetime.datetime.strptime(date+" 09:30:00", "%Y-%m-%d %H:%M:%S")
if df.iloc[0].time < open_dt:
kaipan_money = df.iloc[0].amount
#df1.loc[:, "time_diff"] = (df1.time-df1.time.shift(20)) / datetime.timedelta(seconds=1)
#df1.loc[:, "price_diff"] = df1.price-df1.price.shift(20)
#df1.loc[:, "speed"] = df1.price_diff/yest_close/df1.time_diff
idxmin = df_5min.price.idxmin()
idxmax = df_5min.price.idxmax()
min_time = df_5min.loc[idxmin].time
max_time = df_5min.loc[idxmax].time
dtime = (df_5min.iloc[-1].time - min_time) / datetime.timedelta(minutes=1)
dprice = df_5min.iloc[-1].price - df_5min.loc[idxmin].price
up_speed = dprice/yest_close
dtime = (df_5min.iloc[-1].time - max_time) / datetime.timedelta(minutes=1)
dprice = df_5min.iloc[-1].price - df_5min.loc[idxmax].price
down_speed = dprice/yest_close
chg = df_5min.iloc[-1].price / yest_close - 1
#speed = df_5min.iloc[-1].speed
#chg = df_5min.iloc[-1].price/yest_close-1
if up_speed > 0.03 and down_speed == 0:
df_res.loc[exsymbol] = [up_speed, down_speed, chg, kaipan_money, opengap]
return df_res
def filter_close(date):
folder = TICK_DIR["stock"]
files = os.listdir(folder)
df_res = pd.DataFrame(columns=["up_speed", "down_speed", "chg", "amount", "opengap"])
tasks = []
for filename in files:
exsymbol = filename
filepath = os.path.join(folder, exsymbol)
if not os.path.isfile(filepath):
continue
df = pd.read_csv(filepath, sep='\t', header=0, names=['time', 'price', 'change', 'volume', 'amount', 'type'])
df.loc[:, "time"] = pd.to_datetime(date + ' ' + df["time"], format="%Y-%m-%d %H:%M:%S")
df_5min = df[(df.time<=date+" 14:55:00") & (df.time>=date+" 14:45:00")]
if len(df_5min) == 0:
continue
yest_close = df.iloc[0].price - df.iloc[0].change
opengap = df.iloc[0].price / yest_close - 1
idxmin = df_5min.price.idxmin()
idxmax = df_5min.price.idxmax()
min_time = df_5min.loc[idxmin].time
max_time = df_5min.loc[idxmax].time
dtime = (df_5min.iloc[-1].time - min_time) / datetime.timedelta(minutes=1)
dprice = df_5min.iloc[-1].price - df_5min.loc[idxmin].price
amount = df_5min.loc[idxmin:].amount.sum()
up_speed = dprice/yest_close
dtime = (df_5min.iloc[-1].time - max_time) / datetime.timedelta(minutes=1)
dprice = df_5min.iloc[-1].price - df_5min.loc[idxmax].price
down_speed = dprice/yest_close
chg = df_5min.iloc[-1].price / yest_close - 1
if up_speed > 0.02 and down_speed == 0:
df_res.loc[exsymbol] = [up_speed, down_speed, chg, amount, opengap]
return df_res
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: {} <date>".format(sys.argv(0)))
sys.exit(1)
pd.set_option('display.max_rows', None)
date = sys.argv[1]
#df_res = filter_speed(date)
df_res = filter_close(date)
print(df_res.sort_values("up_speed"))
|
"""Top-level package for python-upwork."""
from upwork.config import Config
from upwork.client import Client
from . import routers
__author__ = """Maksym Novozhylov"""
__email__ = "[email protected]"
__version__ = "2.1.0"
__all__ = ("Config", "Client", "routers")
|
#from distutils.core import setup
from setuptools import setup, find_packages
install_requires = ['tinytools']
# import __version__
exec(open('dgsamples/_version.py').read())
setup(
name='dgsamples',
version=__version__,
author='Nathan Longbotham',
author_email='[email protected]',
packages=find_packages(),
description='Sample image chips and vectors that can be used for '\
'unit testing',
long_description=open('README.rst').read(),
install_requires=install_requires,
include_package_data=True
)
|
# encoding: latin2
"""clusterPy input methods
"""
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
__author__ = "Juan C. Duque, Alejandro Betancourt"
__credits__ = "Copyright (c) 2009-10 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "[email protected]"
__all__ = ['new','load','importArcData','createPoints','createHexagonalGrid',
'createGrid','importDBF','importCSV','importShape','importGWT']
import struct
import pickle
import re
from contiguity import weightsFromAreas, fixIntersections
from layer import Layer
try:
from toolboxes import rimap as rim
__all__ += ['rimap']
except:
pass
# INDEX
# new
# load
# importArcData
# createPoints
# createGrid
# importShape
# readShape
# readPoints
# readPolylines
# readPolygons
# importDBF
# importGWT
def rimap(n, N = 30, alpha = [0.01,0.3], sigma = [1.1,1.4], dt = 0.1,
pg = 0.01, pu = 0.05, su = 0.315, boundary = ""):
"""Creates an irregular maps
:param n: number of areas
:type n: integer
:param N: number of points sampled from each irregular polygon (MR-Polygon)
:type N: integer
:param alpha: min and max value to sampled alpha; default is (0.1,0.5)
:type alpha: List
:param sigma: min and max value to sampled sigma; default is (1.2,1.5)
:type sigma: List
:param dt: time delta to be used to create irregular polygons (MR-Polygons)
:type dt: Float
:param pg: parameter to define the scaling factor of each polygon before being introduced as part of the irregular map
:type pg: Float
:param pu: parameter to define the probability of increase the number of areas of each polygon before being introduced into the irregular map
:type pu: Float
:param su: parameter to define how much is increased the number of areas of each polygon before being introduced into the irregular map
:type su: Float
:param boundary: Initial irregular boundary to be used into the recursive irregular map algorithm
:type boundary: Layer
:rtype: Layer
:return: RI-Map instance
**Examples** ::
import clusterpy
lay = clusterpy.rimap(1000)
lay.exportArcData("rimap_1000")
"""
rm = rim(n, N, alpha, sigma, dt, pg, pu, su, boundary)
areas = rm.carteAreas
areas = fixIntersections(areas)
Wqueen,Wrook, = weightsFromAreas(areas)
layer = Layer()
layer.areas = areas
layer.Wqueen = Wqueen
layer.Wrook = Wrook
layer.shpType = 'polygon'
layer.name = "rimap_" + str(len(areas))
layer.fieldNames = ["Id","nw"]
layer.Y = {}
for i in Wrook:
layer.Y[i] = [i,len(Wrook[i])]
return layer
def new():
"""Creates an empty Layer
**Description**
Use this function to create an empty layer. This allows the user
to create his own maps.
:rtype: Layer (new empty Layer)
**Examples** ::
import clusterpy
lay = clusterpy.new()
"""
print("Creating new layer")
layer = Layer()
print("Done")
return layer
def load(filename):
"""Load a ClusterPy project (<file>.CP)
:param filename: filename without extension
:type filename: string
:rtype: Layer
:return: CP project
**Description**
With clusterPy you can save your layer objects on a .cp file,
which can be reopened in the future using this function.
**Examples** ::
import clusterpy
lay = clusterpy.new()
lay.save("lay")
layer = clusterpy.load("lay")
"""
print("Loading cp project")
f = open(filename + '.cp', 'r')
layer = pickle.load(f)
f.close()
print("Done")
return layer
def importArcData(filename):
"""Creates a new Layer from a shapefile (<file>.shp)
:param filename: filename without extension
:type filename: string
:rtype: Layer (CP project)
**Description**
`ESRI <http://www.esri.com/>`_ shapefile is a binary file used to
save and transport maps. During the last times it has become
the most used format for the spatial scientists around the world.
On clusterPy's "data_examples" folder you can find some shapefiles. To
load a shapefile in clusterPy just follow the example bellow.
**Example** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
"""
layer = Layer()
layer.name = filename.split('/')[-1]
print("Loading " + filename + ".dbf")
data, fields, specs = importDBF(filename + '.dbf')
print("Loading " + filename + ".shp")
if fields[0] != "ID":
fields = ["ID"] + fields
for y in list(data.keys()):
data[y] = [y] + data[y]
layer.fieldNames = fields
layer.Y = data
layer.areas, layer.Wqueen, layer.Wrook, layer.shpType = importShape(filename + '.shp')
layer._defBbox()
print("Done")
return layer
def createPoints(nRows, nCols, lowerLeft=(0,0), upperRight=(100,100)):
"""Creates a new Layer with uniformly distributed points in space
:param nRows: number of rows
:type nRows: integer
:param nCols: number of cols
:type nCols: integer
:param lowerLeft: lower-left corner coordinates; default is (0,0)
:type lowerLeft: tuple or none
:param upperRight: upper-right corner coordinates; default is (100,100)
:type upperRight: tuple or none
:rtype: Layer (new points layer)
**Description**
The example below shows how to create a point-based regular grids with clusterPy.
**Examples**
Creating a grid of ten by ten points.::
import clusterpy
points = clusterpy.createPoints(10, 10)
Creating a grid of ten by ten points on the bounding box (0,0,100,100). ::
import clusterpy
points = clusterpy.createPoints(10, 10, lowerLeft=(0, 0), upperRight=(100, 100))
"""
print("Creating points")
yMin = lowerLeft[1]
yMax = upperRight[1]
xMin = lowerLeft[0]
xMax = upperRight[0]
nyPoints = nRows
nxPoints = nCols
areaHeight = float(yMax - yMin) / nRows
areaWidth = float(xMax - xMin) / nCols
N = nyPoints*nxPoints
Y = {}
acty = yMax
actx = xMin
map = []
verticalBorderAreas = []
cont = 0
for i in range(N):
Y[i] = [i]
for i in range(nyPoints):
nexty = acty - areaHeight
for j in range(nxPoints):
nextx = actx + areaWidth
point = (actx + areaWidth / float(2), acty - areaHeight / float(2))
area = [point]
map.append([area])
actx = nextx
Y[cont].extend([point[0],point[1]])
cont = cont + 1
acty = nexty
actx = xMin
layer = Layer()
layer.Y = Y
layer.fieldNames = ['ID','X','Y']
layer.areas = map
layer.shpType = 'point'
layer.name = 'root'
layer._defBbox()
print("Done")
return layer
def createHexagonalGrid(nRows, nCols, lowerLeft=(0,0), upperRight=(100,100)):
"""Creates a new Layer with a hexagonal regular lattice
:param nRows: number of rows
:type nRows: integer
:param nCols: number of columns
:type nCols: integer
:type lowerLeft: tuple or none, lower-left corner coordinates; default is (0,0)
:type upperRight: tuple or none, upper-right corner coordinates; default is (100,100)
:rtype: Layer new lattice
**Description**
Regular lattices are widely used in both theoretical and empirical
applications in Regional Science. The example below shows how easy
the creation of this kind of maps is using clusterPy.
**Examples**
Create a grid of ten by ten points.::
import clusterpy
points = clusterpy.createGrid(10,10)
Create a grid of ten by ten points on the bounding box (0,0,100,100).::
import clusterpy
points = clusterpy.createGrid(10, 10, lowerLeft=(0, 0), upperRight=(100, 100))
"""
print("Creating grid")
rowHeight = (upperRight[1] - lowerLeft[1])/float(nRows)
colStep = rowHeight/float(2)
N = nRows*nCols
areas = []
for row in range(nRows):
actx = lowerLeft[0]
for col in range(nCols):
if col != 0:
actx += 2*colStep
if col%2 == 1:
y0 = lowerLeft[1] + rowHeight*row - 2*rowHeight/float(2)
y1 = lowerLeft[1] + rowHeight*row - rowHeight/float(2)
y2 = lowerLeft[1] + rowHeight*row
else:
y0 = lowerLeft[1] + rowHeight*row - rowHeight/float(2)
y1 = lowerLeft[1] + rowHeight*row
y2 = lowerLeft[1] + rowHeight*row + rowHeight/float(2)
x0 = actx
x1 = actx + colStep
x2 = actx + 2*colStep
x3 = actx + 3*colStep
pol = [(x0,y1),(x1,y2),(x2,y2),
(x3,y1),(x2,y0),(x1,y0),
(x0,y1)]
areas.append([pol])
Y = {}
for i in range(N):
Y[i]=[i]
layer = Layer()
layer.Y = Y
layer.fieldNames = ['ID']
layer.areas = areas
layer.Wqueen, layer.Wrook, = weightsFromAreas(layer.areas)
layer.shpType = 'polygon'
layer.name = 'root'
layer._defBbox()
print("Done")
return layer
def createGrid(nRows, nCols, lowerLeft=None, upperRight=None):
"""Creates a new Layer with a regular lattice
:param nRows: number of rows
:type nRows: integer
:param nCols: number of columns
:type nCols: integer
:type lowerLeft: tuple or none, lower-left corner coordinates; default is (0,0)
:type upperRight: tuple or none, upper-right corner coordinates; default is (100,100)
:rtype: Layer new lattice
**Description**
Regular lattices are widely used in both theoretical and empirical
applications in Regional Science. The example below shows how easy
the creation of this kind of maps is using clusterPy.
**Examples**
Create a grid of ten by ten points.::
import clusterpy
points = clusterpy.createGrid(10,10)
Create a grid of ten by ten points on the bounding box (0,0,100,100).::
import clusterpy
points = clusterpy.createGrid(10, 10, lowerLeft=(0, 0), upperRight=(100, 100))
"""
print("Creating grid")
if lowerLeft != None and upperRight != None:
ymin = lowerLeft[1]
ymax = upperRight[1]
xmin = lowerLeft[0]
xmax = upperRight[0]
areaHeight = float(ymax - ymin) / nRows
areaWidth = float(xmax - xmin) / nCols
else:
ymin = 0
xmin = 0
xmax = 10*nCols
ymax = 10*nRows
areaHeight = 10
areaWidth = 10
nyPoints = nRows
nxPoints = nCols
N = nyPoints*nxPoints
Y = {}
acty = ymax
actx = xmin
map = []
wr = {}
wq = {}
# Creating the wr matrix writh towrer criterium
disAreas = [0, nxPoints - 1, (N-nxPoints), N - 1]
wr[0] = [1, nyPoints]
wr[nxPoints - 1] = [nxPoints - 2, 2 * nxPoints - 1]
wr[N - nxPoints] = [N - nxPoints - nxPoints, N - nxPoints + 1]
wr[N - 1] = [N - 2, N - 1 - nxPoints]
wq[0] = [1, nxPoints, nxPoints + 1]
wq[nxPoints - 1] = [nxPoints - 2, nxPoints + nxPoints - 1,
nxPoints + nxPoints - 2]
wq[N - nxPoints] = [N - nxPoints - nxPoints, N - nxPoints + 1,
N - nxPoints - nxPoints + 1]
wq[N - 1] = [N - 2, N - 1 - nxPoints, N - 1 - nxPoints - 1]
verticalBorderAreas = []
for i in range(1, nxPoints - 1): #Asigning the neighborhood of the corner Areas
wr[i * nxPoints] = [i * nxPoints - nxPoints, i * nxPoints + 1,
i * nxPoints + nxPoints]
wr[nxPoints * i + nxPoints - 1] = [nxPoints * i - 1,
nxPoints * i + nxPoints - 2, nxPoints * i + 2 * nxPoints - 1]
wq[i * nxPoints] = [i * nxPoints - nxPoints, i * nxPoints - nxPoints + 1,
i * nxPoints + 1,i * nxPoints + nxPoints, i * nxPoints + nxPoints + 1]
wq[nxPoints * i + nxPoints - 1] = [nxPoints * i - 1, nxPoints * i - 2,
nxPoints * i + nxPoints - 2, nxPoints * i + 2 * nxPoints - 1,
nxPoints * i + 2 * nxPoints - 2]
disAreas = disAreas + [i * nxPoints, nxPoints * i + nxPoints - 1]
disAreas = disAreas + list(range(1, nxPoints - 1)) + list(range((N - nxPoints) + 1, N - 1))
for i in range(1, nxPoints - 1): # Asigning the neighborhood of the side Areas
wr[i]=[i - 1, i + nxPoints, i + 1]
wq[i]=[i - 1, i + nxPoints - 1, i + nxPoints, i + nxPoints + 1, i + 1]
for i in range((N - nxPoints) + 1, N - 1):
wr[i]=[i - 1, i - nxPoints, i + 1]
wq[i]=[i - 1, i - nxPoints - 1, i - nxPoints, i - nxPoints + 1, i + 1]
cont = 0
for i in range(nyPoints): #Creating de clusterPy areas
nexty = acty - areaHeight
for j in range(nxPoints):
nextx = actx + areaWidth
x1 = tuple([actx, acty])
x2 = tuple([nextx, acty])
x3 = tuple([nextx, nexty])
x4 = tuple([actx, nexty])
x5 = tuple([actx, acty])
area = [x1, x2, x3, x4, x5]
map.append([area])
actx = nextx
if cont not in disAreas: # Asigning the rest of the neighborhoods
wr[cont]=[cont - 1, cont - nxPoints, cont + 1, cont + nxPoints]
wq[cont]=[cont - 1, cont - nxPoints - 1, cont - nxPoints,
cont - nxPoints + 1, cont + 1, cont + nxPoints - 1,
cont + nxPoints, cont + nxPoints + 1]
cont = cont + 1
acty = nexty
actx = xmin
for i in range(N):
Y[i]=[i]
layer = Layer()
layer.Y = Y
layer.fieldNames = ['ID']
layer.areas = map
layer.Wrook = wr
layer.Wqueen = wq
layer.Wqueen, layer.Wrook, = weightsFromAreas(layer.areas)
layer.shpType = 'polygon'
layer.name = 'root'
layer._defBbox()
print("Done")
return layer
def importShape(shapefile):
"""Reads the geographic information stored in a shape file and returns
them in python objects.
:param shapefile: path to shapefile including the extension ".shp"
:type shapefile: string
:rtype: tuple (coordinates(List), Wqueen(Dict), Wrook(Dict)).
**Example** ::
import clusterpy
chinaAreas = clusterpy.importShape("clusterpy/data_examples/china.shp")
"""
INFO, areas = readShape(shapefile)
if INFO['type'] == 5:
Wqueen, Wrook = weightsFromAreas(areas)
shpType = 'polygon'
elif INFO['type'] == 3:
shpType = 'line'
Wrook = {}
Wqueen = {}
elif INFO['type'] == 1:
shpType = 'point'
Wrook = {}
Wqueen = {}
return areas, Wqueen, Wrook, shpType
def readShape(filename):
""" This function automatically detects the type of the shape and then reads an ESRI shapefile of polygons, polylines or points.
:param filename: name of the file to be read
:type filename: string
:rtype: tuple (information about the layer and areas coordinates).
"""
fileObj=open(filename, 'rb')
fileObj.seek(32, 1)
shtype = struct.unpack('<i', fileObj.read(4))[0]
if shtype == 1: # Points
INFO, areas = readPoints(fileObj)
elif shtype == 3: #PolyLine
INFO, areas = readPolylines(fileObj)
elif shtype == 5: #Polygon
INFO, areas = readPolygons(fileObj)
fileObj.close()
return INFO, areas
def readPoints(bodyBytes):
"""This function reads an ESRI shapefile of points.
:param bodyBytes: bytes to be processed
:type bodyBytes: string
:rtype: tuple (information about the layer and area coordinates).
"""
INFO = {}
INFO['type'] = 1
AREAS = []
id = 0
bb0 = struct.unpack('>d', bodyBytes.read(8))[0]
bb1 = struct.unpack('>d', bodyBytes.read(8))[0]
bb2 = struct.unpack('>d', bodyBytes.read(8))[0]
bb3 = struct.unpack('>d', bodyBytes.read(8))[0]
bb4 = struct.unpack('>d', bodyBytes.read(8))[0]
bb5 = struct.unpack('>d', bodyBytes.read(8))[0]
bb6 = struct.unpack('>d', bodyBytes.read(8))[0]
bb7 = struct.unpack('>d', bodyBytes.read(8))[0]
while bodyBytes.read(1) != "":
bodyBytes.seek(11, 1)
x = struct.unpack('<d', bodyBytes.read(8))[0]
y = struct.unpack('<d', bodyBytes.read(8))[0]
area = [x, y]
AREAS = AREAS + [[[tuple(area)]]]
return INFO, AREAS
def readPolylines(bodyBytes):
"""This function reads a ESRI shape file of lines.
:param bodyBytes: bytes to be processed
:type bodyBytes: string
:rtype: tuple (information about the layer and areas coordinates).
"""
INFO = {}
INFO['type'] = 3
AREAS=[]
id = 0
pos = 100
bb0 = struct.unpack('>d', bodyBytes.read(8))[0]
bb1 = struct.unpack('>d', bodyBytes.read(8))[0]
bb2 = struct.unpack('>d', bodyBytes.read(8))[0]
bb3 = struct.unpack('>d', bodyBytes.read(8))[0]
bb4 = struct.unpack('>d', bodyBytes.read(8))[0]
bb5 = struct.unpack('>d', bodyBytes.read(8))[0]
bb6 = struct.unpack('>d', bodyBytes.read(8))[0]
bb7 = struct.unpack('>d', bodyBytes.read(8))[0]
while bodyBytes.read(1) != "":
bodyBytes.seek(7, 1)
bodyBytes.seek(36, 1)
nParts = struct.unpack('<i', bodyBytes.read(4))[0]
nPoints = struct.unpack('<i', bodyBytes.read(4))[0]
r = 1
parts = []
while r <= nParts:
parts += [struct.unpack('<i', bodyBytes.read(4))[0]]
r += 1
ring = []
area = []
l = 0
while l < nPoints:
if l in parts[1:]:
area += [ring]
ring = []
x = struct.unpack('<d', bodyBytes.read(8))[0]
y = struct.unpack('<d', bodyBytes.read(8))[0]
l += 1
ring = ring + [(x, y)]
area += [ring]
AREAS = AREAS + [area]
id += 1
return INFO, AREAS
def readPolygons(bodyBytes):
"""This function reads an ESRI shape file of polygons.
:param bodyBytes: bytes to be processed
:type bodyBytes: string
:rtype: tuple (information about the layer and areas coordinates).
"""
INFO = {}
INFO['type'] = 5
AREAS = []
id = 0
pos = 100
parts = []
bb0 = struct.unpack('>d', bodyBytes.read(8))[0]
bb1 = struct.unpack('>d', bodyBytes.read(8))[0]
bb2 = struct.unpack('>d', bodyBytes.read(8))[0]
bb3 = struct.unpack('>d', bodyBytes.read(8))[0]
bb4 = struct.unpack('>d', bodyBytes.read(8))[0]
bb5 = struct.unpack('>d', bodyBytes.read(8))[0]
bb6 = struct.unpack('>d', bodyBytes.read(8))[0]
bb7 = struct.unpack('>d', bodyBytes.read(8))[0]
while bodyBytes.read(1) != "":# 100 bytes for header
area = []
bodyBytes.seek(7, 1)
bodyBytes.seek(36, 1)
numParts = struct.unpack('<i', bodyBytes.read(4))[0]
numPoints = struct.unpack('<i', bodyBytes.read(4))[0]
parts = []
for i in range(numParts):
parts += [struct.unpack('<i', bodyBytes.read(4))[0]]
ring = []
for i in range(numPoints):
if i in parts and i != 0:
area.append(ring)
ring = []
x = struct.unpack('<d', bodyBytes.read(8))[0]
y = struct.unpack('<d', bodyBytes.read(8))[0]
ring += [(x, y)]
else:
x = struct.unpack('<d', bodyBytes.read(8))[0]
y = struct.unpack('<d', bodyBytes.read(8))[0]
ring += [(x, y)]
area.append(ring)
AREAS.append(area)
return INFO, AREAS
def importDBF(filename):
"""Get variables from a dbf file.
:param filename: name of the file (String) including ".dbf"
:type filename: string
:rtype: tuple (dbf file Data, fieldNames and fieldSpecs).
**Example** ::
import clusterpy
chinaData = clusterpy.importDBF("clusterpy/data_examples/china.dbf")
"""
Y = {}
fieldNames = []
fieldSpecs = []
fileBytes = open(filename, 'rb')
fileBytes.seek(4, 1)
numberOfRecords = struct.unpack('i', fileBytes.read(4))[0]
firstDataRecord = struct.unpack('h', fileBytes.read(2))[0]
lenDataRecord = struct.unpack('h', fileBytes.read(2))[0]
fileBytes.seek(20, 1)
while fileBytes.tell() < firstDataRecord - 1:
name = ''.join(struct.unpack(11 * 'c', fileBytes.read(11))).replace("\x00", "")
typ = ''.join(struct.unpack('c', fileBytes.read(1)))
fileBytes.seek(4, 1)
siz = struct.unpack('B', fileBytes.read(1))[0]
dec = struct.unpack('B', fileBytes.read(1))[0]
spec = (typ, siz, dec)
fieldNames += [name]
fieldSpecs += [spec]
fileBytes.seek(14, 1)
fileBytes.seek(1, 1)
Y = {}
for nrec in range(numberOfRecords):
record = fileBytes.read(lenDataRecord)
start = 0
first = 0
Y[nrec] = []
for nf, field in enumerate(fieldSpecs):
l = field[1] + 1
dec = field[2]
end = start + l + first
value = record[start: end]
while value.find(" ") != -1:
value = value.replace(" ", " ")
if value.startswith(" "):
value = value[1:]
if value.endswith(" "):
value = value[:-1]
if field[0] in ["N", "F", "B", "I", "O"]:
if dec == 0:
value = int(float(value))
else:
value = float(value)
start = end
first = -1
Y[nrec] += [value]
return (Y, fieldNames, fieldSpecs)
def importCSV(filename,header=True,delimiter=","):
"""Get variables from a csv file.
:param filename: name of the file (String)
:type filename: string
:param header: Boolean, which is True if the csv have headers.
:type header: Boolean or None
:rtype: tuple (csv file Data, fieldnames).
**Example** ::
import clusterpy
chinaData = clusterpy.importCSV("clusterpy/data_examples/china.csv")
"""
f = open(filename)
fields = [c[:-1].strip().rsplit(delimiter) for c in f.readlines()]
f.close()
if fields[-1][0] == "":
fields = fields[:-1]
nc = len(fields[0])
Y = {}
if header:
fieldnames = fields[0]
for i, c in enumerate(fields[1:]):
appY = []
for x in c:
try:
appY.append(float(x))
except:
appY.append(x)
Y[i] = appY
else:
fieldnames = ['VAR' + str(i) for i in range(nc)]
for i, c in enumerate(fields):
appY = []
for x in c:
try:
appY.append(float(x))
except:
appY.append(x)
Y[i] = appY
return (Y, fieldnames)
def importGWT(filename,initialId=1):
"""Get the a neighborhood structure from a GWT file.
:param filename: name of the file (String)
:type filename: string
:param initialId: First id of the areas.
:type initialId: integer
:rtype: contiguity dictionary.
**Example 1** Storing a GWT neighborhood structure into a layer
object::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.customW = clusterpy.importGWT("clusterpy/data_examples/china_gwt_658.193052.gwt")
"""
finp = open(filename)
finp.readline()
w = {}
reg = re.compile(r"(\d+)\s(\d+)\s+(\d+.\d*)")
for line in finp:
items = reg.match(line.strip())
if items:
id = int(items.group(1))
neigh = int(items.group(2))
if id - initialId in w:
w[id - initialId].append(neigh - initialId)
else:
w[id - initialId] = [neigh - initialId]
else:
raise NameError("File structure is not from a GWT file")
return w
|
"""Web Routes."""
from masonite.routes import Get, Post, Put, Delete, RouteGroup
ROUTES = [
Get("/", "WelcomeController@show").name("welcome"),
RouteGroup([
Get("/","LocationController@index").name("index"),
Get("/@id", "LocationController@show").name("show"),
Post("/", "LocationController@create").name("create"),
Put("/@id", "LocationController@update").name("update"),
Delete("/@id", "LocationController@destroy").name("destroy"),
],prefix="/locations", name="locations"),
RouteGroup([
Post("/login","AuthController@login").name("login"),
Post("/signup","AuthController@signup").name("signup"),
Post("logout","AuthController@logout").name("logout"),
Get("/all","AuthController@users").name("all"),
],prefix="/auth", name="auth"),
Post("/login","AuthController@login").name("login"),
RouteGroup([
Post("/","LocationController@create_post").name("create"),
Get("/","LocationController@all_posts").name("all"),
Get("/@id","LocationController@show_post").name("show"),
Put("/@id", "LocationController@update_post").name("update"),
Delete("/@id", "LocationController@destroy").name("destroy"),
],prefix="/posts",name="posts")
]
|
import os
import requests
import logging
import json
from django.shortcuts import redirect
from social_core.utils import handle_http_errors
from social_core.exceptions import AuthFailed
from social_core.backends.oauth import BaseOAuth2
class Auth0(BaseOAuth2):
"""Auth0 OAuth authentication backend"""
name = 'auth0'
SCOPE_SEPARATOR = ' '
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('picture', 'picture')
]
@handle_http_errors
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
try:
self.process_error(self.data)
state = self.validate_state()
response = self.request_access_token(
self.access_token_url(),
data=self.auth_complete_params(state),
headers=self.auth_headers(),
auth=self.auth_complete_credentials(),
method=self.ACCESS_TOKEN_METHOD
)
self.process_error(response)
details = self.get_user_details(response)
user_dict = json.loads(details["user"])
user_type = user_dict["userType"]
if user_type != 'Producer':
data = {"error":"denied"}
raise AuthFailed(self, data)
except AuthFailed:
if (os.getenv('AUTH0_REDIRECT_URL'))
return redirect('http://producer-toolkit.eu/#/login')
else:
return
return self.do_auth(response['access_token'], response=response,
*args, **kwargs)
# Silent authentication
# Check if user if already logged in via SSO
def auth_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
params = {
'client_id': client_id,
'redirect_uri': self.get_redirect_uri(state),
'prompt': 'none'
}
if self.STATE_PARAMETER and state:
params['state'] = state
if self.RESPONSE_TYPE:
params['response_type'] = self.RESPONSE_TYPE
return params
def authorization_url(self):
"""Return the authorization endpoint."""
return "https://" + self.setting('DOMAIN') + "/authorize"
def access_token_url(self):
"""Return the token endpoint"""
return "https://" + self.setting('DOMAIN') + "/oauth/token"
def get_user_id(self, details, response):
"""Return current user id."""
return details['user_id']
def get_user_details(self, response):
url = 'https://' + self.setting('DOMAIN') + '/userinfo'
headers = {'authorization': 'Bearer ' + response['access_token']}
resp = requests.get(url, headers=headers)
userinfo = resp.json()
return {'username': userinfo['nickname'],
'first_name': userinfo['name'],
'picture': userinfo['picture'],
'user_id': userinfo['sub'],
'user': userinfo['https://producer.eu/user_metadata']['user']}
|
#import pandas as pd
#import pandas_ml as pdml
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
predicted = np.genfromtxt ('res/predicted1.txt', delimiter=",")
expected = np.genfromtxt ('res/expected1.txt', delimiter=",")
y_true = expected[0:311029]
y_pred = predicted[0:311029]
#print(np.count_nonzero(y_true <> 2))
cm = confusion_matrix(y_true, y_pred)
y_true1 = np.array(y_true)
y_pred1 = np.array(y_pred)
print(accuracy_score(y_true1, y_pred1))
#print(cm)
target_names = ['class 0', 'class 1', 'class 2','class3','class4']
#print("**************Built-In****************************")
#print(classification_report(y_true, y_pred, target_names=target_names))
#print("*************************************************")
cl = 0;
for num in range(0,5):
new_y_true = y_true.astype(int)
new_y_pred = y_pred.astype(int)
#Convert one class as 1 and rest of the classes as 0
ipresent= np.where(new_y_true == cl)[0]
new_y_true[ipresent] = 9
inot = np.where(new_y_true <> 9)[0]
new_y_true[inot] = 0
new_y_true[ipresent] = 1
ipresent= np.where(new_y_pred == cl)[0]
new_y_pred[ipresent] = 9
inot = np.where(new_y_pred <> 9)[0]
new_y_pred[inot] = 0
new_y_pred[ipresent] = 1
print ("---------------------------------------------------------\n"+ "CLASS " + str(cl) )
#print( np.count_nonzero(new_y_true))
#print( np.count_nonzero(new_y_pred))
tn, fp, fn, tp = confusion_matrix(new_y_true, new_y_pred).ravel()
#print(" TP:\t" + str(tp) + "\nTN:\t" + str(tn) + "\nFP:\t" + str(fp) + "\nFN:\t" + str(fn))
# Measures are calculated according to https://en.wikipedia.org/wiki/Confusion_matrix,
# If further measures are given in above page, can be extended easily.
# Sensitivity, Recall, hit rate, or true positive rate (TPR) = TP/(TP+FN)
tp = tp.astype(float)
tn = tn.astype(float)
fp = fp.astype(float)
tn = tn.astype(float)
# tpr
tpr = tp/(tp+fn)
# Precision = TP/(TP+FP)
#prec = tp/(tp+fp)
# False Positive Rate (FPR) = FP/(FP+TN)
fpr = fp/(fp+tn)
# Accuracy = (TP+TN)/(TP+FP+TN+FN)
acc = (tp+tn)/(tp+fp+tn+fn)
# recall
#rec = tp/(tp+fn)
print("Accuracy = \t" + str(acc) + "\nFPR= \t "+ str(fpr) + "\nTPR= \t" + str(tpr))
cl = cl + 1
|
from quizard_backend.models import QuizAttempt
from quizard_backend.tests import (
profile_created_from_origin,
get_fake_quiz,
get_fake_quiz_questions,
get_fake_user,
get_access_token_for_user,
)
async def test_get_own_created_and_attempted_quizzes(
app, client, users, questions, quizzes
):
# Create a fresh user
new_user = get_fake_user()
new_user.pop("id")
res = await client.post("/users", json=new_user)
assert res.status == 200
body = await res.json()
new_user_id = body["data"]["id"]
new_user_token = await get_access_token_for_user(body["data"], app)
# Create a few quizzes
created_quizzes = []
for _ in range(20):
fake_quiz = get_fake_quiz()
fake_quiz.pop("creator_id")
new_quiz = {**fake_quiz, "questions": get_fake_quiz_questions(has_id=False)}
new_quiz.pop("id", None)
# Cannot create an quiz without token
res = await client.post("/quizzes", json=new_quiz)
assert res.status == 401
# Create a quiz with valid args
res = await client.post(
"/quizzes", json=new_quiz, headers={"Authorization": new_user_token}
)
assert res.status == 200
body = await res.json()
created_quizzes.append(body["data"])
# Attempt to do a few quizzes as well
attempted_quizzes = []
for quiz_index in range(1, 17):
question_index = 3
selected_option_3 = 1
res = await client.post(
"/quizzes/{}/questions/{}/answers".format(
quizzes[quiz_index]["id"], questions[quiz_index][question_index]["id"]
),
json={"selected_option": selected_option_3},
headers={"Authorization": new_user_token},
)
assert res.status == 200
body = await res.json()
attempted_quizzes.append(quizzes[quiz_index])
# Check if the attempted and created quizzes are correct
res = await client.get("/users/{}/quizzes/created".format(new_user_id))
assert res.status == 200
body = await res.json()
assert "data" in body
assert isinstance(body["data"], list)
assert len(body["data"]) == 15
# Check if the created quizzes are correct
for created, retrieve in zip(created_quizzes, body["data"]):
assert profile_created_from_origin(
retrieve, created, ignore={"questions", "updated_at"}
)
# Check if the attempted quizzes are correct
res = await client.get("/users/{}/quizzes/attempted".format(new_user_id))
assert res.status == 200
body = await res.json()
assert "data" in body
assert isinstance(body["data"], list)
assert len(body["data"]) == 15
for expected_quiz, actual_quiz in zip(attempted_quizzes[::-1], body["data"]):
assert profile_created_from_origin(
{**expected_quiz, "is_finished": False},
actual_quiz,
ignore={"questions", "updated_at"},
)
async def test_pagination_created_attempted_quizzes(
app, client, users, questions, quizzes, token_user
):
# Create a fresh user, as the created user already has some previously created quizzes
new_user = get_fake_user()
new_user.pop("id")
res = await client.post("/users", json=new_user)
assert res.status == 200
body = await res.json()
new_user_id = body["data"]["id"]
new_user_token = await get_access_token_for_user(body["data"], app)
# Create a few quizzes
created_quizzes = []
for _ in range(35):
fake_quiz = get_fake_quiz()
fake_quiz.pop("creator_id")
new_quiz = {**fake_quiz, "questions": get_fake_quiz_questions(has_id=False)}
new_quiz.pop("id", None)
# Create a quiz with valid args
res = await client.post(
"/quizzes", json=new_quiz, headers={"Authorization": new_user_token}
)
assert res.status == 200
body = await res.json()
created_quizzes.append(body["data"])
# Check pagination
res = await client.get("/users/{}/quizzes/created".format(new_user_id))
assert res.status == 200
body = await res.json()
assert "data" in body
assert isinstance(body["data"], list)
assert len(body["data"]) == 15
for created, retrieve in zip(created_quizzes[:15], body["data"]):
assert profile_created_from_origin(
retrieve, created, ignore={"questions", "updated_at"}
)
# Check second page
next_page_link = body["links"]["next"]
# Strip the host, as it is a testing host
next_page_link = "/" + "/".join(next_page_link.split("/")[3:])
res = await client.get(next_page_link)
assert res.status == 200
body = await res.json()
assert "data" in body
assert isinstance(body["data"], list)
assert len(body["data"]) == 15
for created, retrieve in zip(created_quizzes[15:30], body["data"]):
assert profile_created_from_origin(
retrieve, created, ignore={"questions", "updated_at"}
)
# Check last page
next_page_link = body["links"]["next"]
# Strip the host, as it is a testing host
next_page_link = "/" + "/".join(next_page_link.split("/")[3:])
res = await client.get(next_page_link)
assert res.status == 200
body = await res.json()
assert "data" in body
assert isinstance(body["data"], list)
assert len(body["data"]) == 5
for created, retrieve in zip(created_quizzes[30:], body["data"]):
assert profile_created_from_origin(
retrieve, created, ignore={"questions", "updated_at"}
)
## ATTEMPTED
# Attempt to do a few quizzes as well
attempt_user_id = users[0]["id"]
attempted_quizzes = created_quizzes[::-1]
for quiz in created_quizzes:
question_index = 5
selected_option = 3
res = await client.post(
"/quizzes/{}/questions/{}/answers".format(
quiz["id"], quiz["questions"][question_index]
),
json={"selected_option": selected_option},
headers={"Authorization": token_user},
)
assert res.status == 200
body = await res.json()
# Check pagination
res = await client.get("/users/{}/quizzes/attempted".format(attempt_user_id))
assert res.status == 200
body = await res.json()
assert "data" in body
assert isinstance(body["data"], list)
assert len(body["data"]) == 15
assert "links" in body
assert "next" in body["links"]
for created, retrieve in zip(attempted_quizzes[:15], body["data"]):
assert profile_created_from_origin(
retrieve,
{**created, "num_attempts": 1, "is_finished": False},
ignore={"questions", "updated_at"},
)
# Check second page
next_page_link = body["links"]["next"]
# Strip the host, as it is a testing host
next_page_link = "/" + "/".join(next_page_link.split("/")[3:])
res = await client.get(next_page_link)
assert res.status == 200
body = await res.json()
assert "data" in body
assert isinstance(body["data"], list)
assert len(body["data"]) == 15
for created, retrieve in zip(attempted_quizzes[15:30], body["data"]):
assert profile_created_from_origin(
retrieve,
{**created, "num_attempts": 1, "is_finished": False},
ignore={"questions", "updated_at"},
)
# Check last page
next_page_link = body["links"]["next"]
# Strip the host, as it is a testing host
next_page_link = "/" + "/".join(next_page_link.split("/")[3:])
res = await client.get(next_page_link)
assert res.status == 200
body = await res.json()
assert "data" in body
assert isinstance(body["data"], list)
assert len(body["data"]) == 5
for created, retrieve in zip(attempted_quizzes[30:], body["data"]):
assert profile_created_from_origin(
retrieve,
{**created, "num_attempts": 1, "is_finished": False},
ignore={"questions", "updated_at"},
)
|
"""
experiments.py
Run experiments with multiple configurations.
Create a driver config specifying the experiments under configs/other.
Run as: python3 -W ignore experiments.py -b region.yaml -d driver.yaml
"""
import argparse
import copy
import datetime
import itertools
import json
import logging
import multiprocessing
import os
import pickle
import sys
import matplotlib.pyplot as plt
import numpy as np
import yaml
from joblib import Parallel, delayed
from tqdm import tqdm
sys.path.append('../../')
from main.ihme.main import single_fitting_cycle
from utils.fitting.util import update_dict, chunked, CustomEncoder
from utils.generic.config import read_config, process_config_ihme, make_date_str, generate_configs_from_driver
default_loss_methods = ['mape', 'rmse', 'rmse_log']
def create_output(predictions_dict, output_folder, tag):
"""Custom output generation function"""
directory = f'{output_folder}/{tag}'
if not os.path.exists(directory):
os.makedirs(directory)
d = {}
# Numpy
for key in ['best_init', 'best_params', 'draws']:
np.save(f'{directory}/{key}.npy', predictions_dict[key])
# Pickle
for key in ['trials', 'run_params', 'plots', 'smoothing_description']:
with open(f'{directory}/{key}.pkl', 'wb') as f:
pickle.dump(predictions_dict[key], f)
# Dataframes
for key in ['df_prediction', 'df_district', 'df_train', 'df_val', 'df_loss', 'df_loss_pointwise',
'df_district_unsmoothed', 'df_train_nora_notrans', 'df_val_nora_notrans', 'df_test_nora_notrans']:
if predictions_dict[key] is not None:
predictions_dict[key].to_csv(f'{directory}/{key}.csv')
# JSON
d['data_last_date'] = predictions_dict['data_last_date']
with open(f'{directory}/other.json', 'w') as f:
json.dump(d, f, indent=4)
with open(f'{directory}/config.json', 'w') as f:
json.dump(make_date_str(predictions_dict['config']), f, indent=4, cls=CustomEncoder)
with open(f'{directory}/config.yaml', 'w') as f:
yaml.dump(make_date_str(predictions_dict['config']), f)
def get_experiment(driver_config_filename):
"""Get experiment configuration"""
logging.info('Getting experiment choices')
configs = generate_configs_from_driver(driver_config_filename)
return configs
def run(config):
"""Run single experiment for given config"""
predictions_dict = single_fitting_cycle(**copy.deepcopy(config['fitting']))
predictions_dict['fitting_date'] = datetime.datetime.now().strftime("%Y-%m-%d")
return predictions_dict
def run_parallel(key, params):
"""Read config and run corresponding experiment"""
config = read_config(f'{key}.yaml', preprocess=False, config_dir='ihme')
config = update_dict(config, params)
config_copy = copy.deepcopy(config)
config = process_config_ihme(config)
try:
logging.info(f'Start run: {key}')
x = run(config)
x['config'] = config_copy
plt.close('all')
except Exception as e:
x = e
logging.error(e)
return x
def perform_batch_runs(driver_config_filename='list_of_exp.yaml', output_folder=None):
"""Run all experiments"""
# Specifying the folder where checkpoints will be saved
timestamp = datetime.datetime.now().strftime("%Y_%m%d_%H%M%S")
if output_folder is None:
output_folder = f'../../outputs/ihme/{timestamp}'
os.makedirs(output_folder, exist_ok=True)
n_jobs = multiprocessing.cpu_count()
# Get generator of partial configs corresponding to experiments
what_to_vary = get_experiment(driver_config_filename)
# Run experiments
logging.info('Start batch runs')
for i, chunk in enumerate(chunked(what_to_vary, n_jobs)):
chunk1, chunk2 = itertools.tee(chunk, 2)
print(f'Group {i}')
predictions_arr = Parallel(n_jobs=n_jobs)(
delayed(run_parallel)(key, config) for key, config in tqdm(chunk1))
# Save results
for j, (key, _) in tqdm(enumerate(chunk2)):
if isinstance(predictions_arr[j], dict):
create_output(predictions_arr[j], output_folder, f'{key}/{n_jobs * i + j}')
# with open(f'{output_folder}/{key}_predictions_dict.pkl', 'wb') as f:
# pickle.dump(predictions_arr[j], f)
if __name__ == '__main__':
logging.basicConfig(level=logging.ERROR)
parser = argparse.ArgumentParser(description="IHME Batch Running Script")
parser.add_argument("-d", "--driver_config", type=str, required=True,
help="driver config used for multiple experiments")
parsed_args = parser.parse_args()
perform_batch_runs(parsed_args.driver_config)
|
import unittest, os
from erclient.list import communication_types_list
class TestStringMethods(unittest.TestCase):
def test_er_client_id_exists(self):
er_client_id = os.environ.get('ER_CLIENT_ID')
self.assertIsNotNone(er_client_id, "ER_CLIENT_ID Environment var is missing")
def test_er_client_secret_exists(self):
er_client_secret = os.environ.get('ER_CLIENT_SECRET')
self.assertIsNotNone(er_client_secret, "ER_CLIENT_SECRET Environment var is missing")
def test_er_token_url_exists(self):
er_client_secret = os.environ.get('ER_TOKEN_URL')
self.assertIsNotNone(er_client_secret, "ER_TOKEN_URL Environment var is missing")
def test_er_base_url_exists(self):
er_client_secret = os.environ.get('ER_BASE_URL')
self.assertIsNotNone(er_client_secret, "ER_BASE_URL Environment var is missing")
def test_connects_to_er_and_returns_data(self):
self.assertIsInstance(communication_types_list(), list)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.