repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
UKPLab/cdcr-beyond-corpus-tailored | python/handwritten_baseline/pipeline/model/feature_extr/debug.py | 52bf98692c7464f25628baea24addd1a988f9a1f | import pprint
from typing import Optional, List, Tuple, Set, Dict
import numpy as np
from overrides import overrides
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import DEBUG_EXTR
from python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin
class DebugFeatureExtractor(FeatureExtractorMixin):
"""
Returns constant or random feature value for testing purposes.
"""
def __init__(self,
strategy: str,
num_features: int,
use_cache: bool,
features_to_select: Optional[List[str]]):
super(DebugFeatureExtractor, self).__init__(DEBUG_EXTR, use_cache, features_to_select)
self.strategy = strategy
self.num_features = num_features
@overrides
def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]):
if self.strategy == "random":
return np.random.normal(0, 1, (len(pairs), self.num_features))
elif self.strategy == "zero":
return np.zeros((len(pairs), self.num_features))
elif self.strategy == "mix":
num_zero_features = self.num_features // 2
print(f"Generating {num_zero_features} zero features and {self.num_features - num_zero_features} random features.")
zero_features = np.zeros((len(pairs), num_zero_features))
random_features = np.random.normal(0, 1, (len(pairs), self.num_features - num_zero_features))
feature_matrix = np.hstack([zero_features, random_features])
np.random.shuffle(np.transpose(feature_matrix))
return feature_matrix
@overrides
def _get_plain_names_of_all_features(self) -> List[str]:
return [str(i) for i in range(self.num_features)]
@classmethod
@overrides
def from_params(cls, config: Dict):
strategy = config.pop("strategy")
num_features = config.pop("num_features")
use_cache = config.pop("use_cache", False)
features_to_select = config.pop("features_to_select", None)
obj = DebugFeatureExtractor(strategy, num_features, use_cache, features_to_select)
if config:
raise ValueError("Leftover configuration: " + pprint.pformat(config))
return obj | [((36, 29, 36, 72), 'numpy.hstack', 'np.hstack', ({(36, 39, 36, 71): '[zero_features, random_features]'}, {}), '([zero_features, random_features])', True, 'import numpy as np\n'), ((54, 58, 54, 80), 'pprint.pformat', 'pprint.pformat', ({(54, 73, 54, 79): 'config'}, {}), '(config)', False, 'import pprint\n'), ((37, 30, 37, 58), 'numpy.transpose', 'np.transpose', ({(37, 43, 37, 57): 'feature_matrix'}, {}), '(feature_matrix)', True, 'import numpy as np\n')] |
cyberixae/kunquat | kunquat/tracker/errorbase.py | 06ae72b2c1519686cc510ce887d9d45a5c3fa3a3 | # -*- coding: utf-8 -*-
#
# Author: Tomi Jylhä-Ollila, Finland 2014
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from __future__ import print_function
import sys
import traceback
import os
_ERROR_BRIEF = 'Kunquat Tracker encountered an error.'
_SUBMIT_INFO = \
'''Please submit an issue to Kunquat issue tracker at
https://github.com/kunquat/kunquat/issues with the following
information attached.'''
def get_error_details(eclass, einst, trace):
details_list = traceback.format_exception(eclass, einst, trace)
return ''.join(details_list)
def print_error_msg(eclass, einst, trace):
details = get_error_details(eclass, einst, trace)
print('\n{}\n{}\n\n{}'.format(_ERROR_BRIEF, _SUBMIT_INFO, details),
file=sys.stderr)
def log_error(eclass, einst, trace):
pass # TODO: implement once we decide where to write
def setup_basic_error_handler():
sys.excepthook = _basic_handler
def _basic_handler(eclass, einst, trace):
print_error_msg(eclass, einst, trace)
log_error(eclass, einst, trace)
os.abort()
| [((28, 19, 28, 67), 'traceback.format_exception', 'traceback.format_exception', ({(28, 46, 28, 52): 'eclass', (28, 54, 28, 59): 'einst', (28, 61, 28, 66): 'trace'}, {}), '(eclass, einst, trace)', False, 'import traceback\n'), ((49, 4, 49, 14), 'os.abort', 'os.abort', ({}, {}), '()', False, 'import os\n')] |
dtklinh/Protein-Rigid-Domains-Estimation | venv/lib/python3.5/site-packages/igraph/test/atlas.py | a27152ef5437eb87ee31c317091356c4787f82a4 |
import warnings
import unittest
from igraph import *
class TestBase(unittest.TestCase):
def testPageRank(self):
for idx, g in enumerate(self.__class__.graphs):
try:
pr = g.pagerank()
except Exception as ex:
self.assertTrue(False, msg="PageRank calculation threw exception for graph #%d: %s" % (idx, ex))
raise
if g.vcount() == 0:
self.assertEqual([], pr)
continue
self.assertAlmostEqual(1.0, sum(pr), places=5, \
msg="PageRank sum is not 1.0 for graph #%d (%r)" % (idx, pr))
self.assertTrue(min(pr) >= 0, \
msg="Minimum PageRank is less than 0 for graph #%d (%r)" % (idx, pr))
def testEigenvectorCentrality(self):
# Temporarily turn off the warning handler because g.evcent() will print
# a warning for DAGs
warnings.simplefilter("ignore")
try:
for idx, g in enumerate(self.__class__.graphs):
try:
ec, eval = g.evcent(return_eigenvalue=True)
except Exception as ex:
self.assertTrue(False, msg="Eigenvector centrality threw exception for graph #%d: %s" % (idx, ex))
raise
if g.vcount() == 0:
self.assertEqual([], ec)
continue
if not g.is_connected():
# Skip disconnected graphs; this will be fixed in igraph 0.7
continue
n = g.vcount()
if abs(eval) < 1e-4:
self.assertTrue(min(ec) >= -1e-10,
msg="Minimum eigenvector centrality is smaller than 0 for graph #%d" % idx)
self.assertTrue(max(ec) <= 1,
msg="Maximum eigenvector centrality is greater than 1 for graph #%d" % idx)
continue
self.assertAlmostEqual(max(ec), 1, places=7, \
msg="Maximum eigenvector centrality is %r (not 1) for graph #%d (%r)" % \
(max(ec), idx, ec))
self.assertTrue(min(ec) >= 0, \
msg="Minimum eigenvector centrality is less than 0 for graph #%d" % idx)
ec2 = [sum(ec[u.index] for u in v.predecessors()) for v in g.vs]
for i in range(n):
self.assertAlmostEqual(ec[i] * eval, ec2[i], places=7, \
msg="Eigenvector centrality in graph #%d seems to be invalid "\
"for vertex %d" % (idx, i))
finally:
# Reset the warning handler
warnings.resetwarnings()
def testHubScore(self):
for idx, g in enumerate(self.__class__.graphs):
sc = g.hub_score()
if g.vcount() == 0:
self.assertEqual([], sc)
continue
self.assertAlmostEqual(max(sc), 1, places=7, \
msg="Maximum authority score is not 1 for graph #%d" % idx)
self.assertTrue(min(sc) >= 0, \
msg="Minimum hub score is less than 0 for graph #%d" % idx)
def testAuthorityScore(self):
for idx, g in enumerate(self.__class__.graphs):
sc = g.authority_score()
if g.vcount() == 0:
self.assertEqual([], sc)
continue
self.assertAlmostEqual(max(sc), 1, places=7, \
msg="Maximum authority score is not 1 for graph #%d" % idx)
self.assertTrue(min(sc) >= 0, \
msg="Minimum authority score is less than 0 for graph #%d" % idx)
class GraphAtlasTests(TestBase):
graphs = [Graph.Atlas(i) for i in range(1253)]
class IsoclassTests(TestBase):
graphs = [Graph.Isoclass(3, i, directed=True) for i in range(16)] + \
[Graph.Isoclass(4, i, directed=True) for i in range(218)]
def suite():
atlas_suite = unittest.makeSuite(GraphAtlasTests)
isoclass_suite = unittest.makeSuite(IsoclassTests)
return unittest.TestSuite([atlas_suite, isoclass_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| [((101, 18, 101, 53), 'unittest.makeSuite', 'unittest.makeSuite', ({(101, 37, 101, 52): 'GraphAtlasTests'}, {}), '(GraphAtlasTests)', False, 'import unittest\n'), ((102, 21, 102, 54), 'unittest.makeSuite', 'unittest.makeSuite', ({(102, 40, 102, 53): 'IsoclassTests'}, {}), '(IsoclassTests)', False, 'import unittest\n'), ((103, 11, 103, 60), 'unittest.TestSuite', 'unittest.TestSuite', ({(103, 30, 103, 59): '[atlas_suite, isoclass_suite]'}, {}), '([atlas_suite, isoclass_suite])', False, 'import unittest\n'), ((106, 13, 106, 38), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ({}, {}), '()', False, 'import unittest\n'), ((28, 8, 28, 39), 'warnings.simplefilter', 'warnings.simplefilter', ({(28, 30, 28, 38): '"""ignore"""'}, {}), "('ignore')", False, 'import warnings\n'), ((67, 12, 67, 36), 'warnings.resetwarnings', 'warnings.resetwarnings', ({}, {}), '()', False, 'import warnings\n')] |
momipsl/pycspr | pycspr/types/cl.py | 82c1ca003525a3d205d2aa3b7da5d1ecd275e9b5 | import dataclasses
import enum
class CLType(enum.Enum):
"""Enumeration over set of CL types.
"""
BOOL = 0
I32 = 1
I64 = 2
U8 = 3
U32 = 4
U64 = 5
U128 = 6
U256 = 7
U512 = 8
UNIT = 9
STRING = 10
KEY = 11
UREF = 12
OPTION = 13
LIST = 14
BYTE_ARRAY = 15
RESULT = 16
MAP = 17
TUPLE_1 = 18
TUPLE_2 = 19
TUPLE_3 = 20
ANY = 21
PUBLIC_KEY = 22
# Set of types considered to be simple.
CL_TYPES_SIMPLE = {
CLType.BOOL,
CLType.I32,
CLType.I64,
CLType.KEY,
CLType.PUBLIC_KEY,
CLType.STRING,
CLType.U8,
CLType.U32,
CLType.U64,
CLType.U128,
CLType.U256,
CLType.U512,
CLType.UNIT,
CLType.UREF,
}
@dataclasses.dataclass
class CLTypeInfo():
"""Encapsulates CL type information associated with a value.
"""
# Associated type within CSPR type system.
typeof: CLType
@property
def type_tag(self) -> int:
"""Returns a tag used when encoding/decoding."""
return self.typeof.value
@dataclasses.dataclass
class CLTypeInfoForByteArray(CLTypeInfo):
"""Encapsulates CL type information associated with a byte array value.
"""
# Size of associated byte array value.
size: int
@dataclasses.dataclass
class CLTypeInfoForList(CLTypeInfo):
"""Encapsulates CL type information associated with a list value.
"""
# Inner type within CSPR type system.
inner_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForMap(CLTypeInfo):
"""Encapsulates CL type information associated with a byte array value.
"""
# Type info of map's key.
key_type_info: CLType
# Type info of map's value.
value_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForOption(CLTypeInfo):
"""Encapsulates CL type information associated with an optional value.
"""
# Inner type within CSPR type system.
inner_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForSimple(CLTypeInfo):
"""Encapsulates CL type information associated with a simple value.
"""
pass
@dataclasses.dataclass
class CLTypeInfoForTuple1(CLTypeInfo):
"""Encapsulates CL type information associated with a 1-ary tuple value value.
"""
# Type of first value within 1-ary tuple value.
t0_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForTuple2(CLTypeInfo):
"""Encapsulates CL type information associated with a 2-ary tuple value value.
"""
# Type of first value within 1-ary tuple value.
t0_type_info: CLTypeInfo
# Type of first value within 2-ary tuple value.
t1_type_info: CLTypeInfo
@dataclasses.dataclass
class CLTypeInfoForTuple3(CLTypeInfo):
"""Encapsulates CL type information associated with a 3-ary tuple value value.
"""
# Type of first value within 1-ary tuple value.
t0_type_info: CLTypeInfo
# Type of first value within 2-ary tuple value.
t1_type_info: CLTypeInfo
# Type of first value within 3-ary tuple value.
t2_type_info: CLTypeInfo
@dataclasses.dataclass
class CLValue():
"""A CL value mapped from python type system.
"""
# Byte array representation of underlying data.
bytes: bytes
# Parsed pythonic representation of underlying data (for human convenience only).
parsed: object
# Type information used by a deserializer.
type_info: CLTypeInfo
| [] |
nachocano/python-aiplatform | google/cloud/aiplatform_v1/types/env_var.py | 1c6b998d9145309d79712f494a2b00b50a9a9bf4 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(package="google.cloud.aiplatform.v1", manifest={"EnvVar",},)
class EnvVar(proto.Message):
r"""Represents an environment variable present in a Container or
Python Module.
Attributes:
name (str):
Required. Name of the environment variable.
Must be a valid C identifier.
value (str):
Required. Variables that reference a $(VAR_NAME) are
expanded using the previous defined environment variables in
the container and any service environment variables. If a
variable cannot be resolved, the reference in the input
string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped
references will never be expanded, regardless of whether the
variable exists or not.
"""
name = proto.Field(proto.STRING, number=1)
value = proto.Field(proto.STRING, number=2)
__all__ = tuple(sorted(__protobuf__.manifest))
| [((21, 15, 21, 88), 'proto.module', 'proto.module', (), '', False, 'import proto\n'), ((43, 11, 43, 46), 'proto.Field', 'proto.Field', (), '', False, 'import proto\n'), ((45, 12, 45, 47), 'proto.Field', 'proto.Field', (), '', False, 'import proto\n')] |
MrCoolSpan/openbor | tools/borplay/packlib.py | 846cfeb924906849c8a11e76c442e47286b707ea | # Copyright (c) 2009 Bryan Cain ("Plombo")
# Class and functions to read .PAK files.
import struct
from cStringIO import StringIO
class PackFileReader(object):
''' Represents a BOR packfile. '''
files = dict() # the index holding the location of each file
packfile = None # the file object
def __init__(self, fp):
'''fp is a file path (string) or file-like object (file, StringIO,
etc.) in binary read mode'''
if isinstance(fp, str):
self.packfile = open(fp, 'rb')
else:
self.packfile = fp
self.read_index()
# reads the packfile's index into self.files
def read_index(self):
f = self.packfile
# read through file
tmp = True # placeholder that doesn't evaluate to false
while tmp: tmp = f.read(8192)
# read index start postition and seek there
f.seek(-4, 1)
endpos = f.tell()
f.seek(struct.unpack('<I', f.read(4))[0])
while f.tell() < endpos:
ssize, fpos, fsize = struct.unpack('<III', f.read(12))
name = f.read(ssize-12).strip('\x00').replace('\\', '/').lower()
self.files[name] = fpos, fsize
# reads a file with its full path.
def read_file(self, filename):
'''Returns a file-like object for the file or None if the file isn't
contained in this packfile.
This method takes the full path starting with "data/" as a parameter.'''
key = filename.replace('\\', '/').lower().strip('\x00').strip()
if key not in self.files.keys(): return None
start, size = self.files[key]
self.packfile.seek(start)
f = StringIO()
bytesrem = size
while bytesrem >= 8192:
f.write(self.packfile.read(8192))
bytesrem -= 8192
if bytesrem: f.write(self.packfile.read(bytesrem))
f.seek(0)
return f
def find_file(self, filename):
'''Returns a file-like object for the file or None if the file isn't
contained in this packfile.
This method searches for the file by its filename.'''
filename = filename.lower().strip()
start, size = None, None
for key in self.files.keys():
if key.endswith(filename):
return self.read_file(key)
return None # file not found if it gets to this point
def list_music_files(self):
'''Lists the BOR files in the packfile.'''
borfiles = []
for key in self.files.keys():
if key.endswith('.bor'): borfiles.append(key)
borfiles.sort()
for key in borfiles: print key
def get_file(pak, borfile):
'''Prevents a need to directly use PackFileReader when you only want to get
one file, like in borplay and bor2wav. Returns a file-like object.'''
rdr = PackFileReader(pak)
if ('/' not in borfile) and ('\\' not in borfile): # only the filename is given; search for the file
return rdr.find_file(borfile)
else: # full path given
return rdr.read_file(borfile)
# For testing
if __name__ == '__main__':
rdr = PackFileReader('K:/BOR/OpenBOR/Paks/BOR.PAK')
#keys = rdr.files.keys(); keys.sort()
#print '\n'.join(keys)
#print rdr.read_file('data/chars/yamazaki/yamazaki.txt').read()
#print rdr.find_file('yamazaki.txt').read()
rdr.list_music_files()
| [] |
magus0219/icloud-photo-downloader | artascope/src/web/app.py | 6334530d971cf61089d031de99a38f204c201837 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created by magus0219[[email protected]] on 2020/3/23
from types import FunctionType
from flask import (
Flask,
redirect,
url_for,
)
import artascope.src.web.lib.filter as module_filter
from artascope.src.web.lib.content_processor import inject_version
def index():
return redirect(url_for("task.get_task_list"))
def create_app():
# create and configure the app
app = Flask(__name__)
app.jinja_env.filters.update(
{
key: val
for key, val in module_filter.__dict__.items()
if isinstance(val, FunctionType)
}
)
from . import user
from . import task
from . import scheduler
# register blueprint
app.register_blueprint(user.bp)
app.register_blueprint(task.bp)
app.register_blueprint(scheduler.bp)
# register index
app.add_url_rule("/", "index", index)
# register context processor
app.context_processor(inject_version)
return app
| [((21, 10, 21, 25), 'flask.Flask', 'Flask', ({(21, 16, 21, 24): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, redirect, url_for\n'), ((16, 20, 16, 49), 'flask.url_for', 'url_for', ({(16, 28, 16, 48): '"""task.get_task_list"""'}, {}), "('task.get_task_list')", False, 'from flask import Flask, redirect, url_for\n'), ((25, 28, 25, 58), 'artascope.src.web.lib.filter.__dict__.items', 'module_filter.__dict__.items', ({}, {}), '()', True, 'import artascope.src.web.lib.filter as module_filter\n')] |
KnowingNothing/akg-test | tests/common/test_op/scatter_nd.py | 114d8626b824b9a31af50a482afc07ab7121862b | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: scatter_nd"""
import akg.tvm
from akg.utils import validation_check as vc_util
def scatter_nd(indices, updates, shape):
"""
Scatters input tensor updates to a new tensor according to indices.
Args:
indices(akg.tvm.Tensor): Tensor of type int32.
updates(akg.tvm.Tensor): Tensor of type float16, float32, int32.
shape(list, tuple): Specifies the shape of output tensor.
Returns:
Scattered tensor with same type as input tensor updates and shape specified by parameter shape.
"""
# check shapes dtype
indices_shape = [x.value for x in indices.shape]
data_shape = [x.value for x in updates.shape]
vc_util.check_shape(indices_shape)
vc_util.check_shape(data_shape)
indices_dtype = indices.dtype
if not indices_dtype in "int32":
raise TypeError("indices_dtype only support int32 while dtype is %s" % indices_dtype)
dtype = updates.dtype
support_list = {"float16", "float32", "int32"}
if not (dtype in support_list):
raise TypeError("scatter_nd only support %s while dtype is %s" % (",".join(support_list), dtype))
n = indices.shape[0].value
def pick(i, j, *indexes):
return akg.tvm.expr.Select(j == indices[i][0],
akg.tvm.const(1, updates.dtype),
akg.tvm.const(0, updates.dtype)) * updates[(i,) + indexes]
reducible = akg.tvm.compute([n] + list(shape), lambda *i: pick(i[0], i[1], *i[2:]), name="reduc")
k = akg.tvm.reduce_axis((0, n))
res = akg.tvm.compute(shape, lambda *i: akg.tvm.sum(reducible[(k,) + i], axis=k))
return res
| [((38, 4, 38, 38), 'akg.utils.validation_check.check_shape', 'vc_util.check_shape', ({(38, 24, 38, 37): 'indices_shape'}, {}), '(indices_shape)', True, 'from akg.utils import validation_check as vc_util\n'), ((39, 4, 39, 35), 'akg.utils.validation_check.check_shape', 'vc_util.check_shape', ({(39, 24, 39, 34): 'data_shape'}, {}), '(data_shape)', True, 'from akg.utils import validation_check as vc_util\n')] |
adamnew123456/spc | spc/backend_utils.py | 8809d1817f66cf8266f145aa0c2474b32dc1087a | """
Utility functions and classes shared by multiple backends
"""
from collections import namedtuple
import logging
from . import symbols
from . import types
LOGGER = logging.getLogger('spc.backend_utils')
# NameContexts encapsulate both the function stack (which holds values) and
# the symbol table context (which binds them)
NameContext = namedtuple('NameContext', ['symbol_ctx', 'func_stack'])
# While loops are identified by two labels - the start label, for re-running
# the condition, and the end label, for exiting when the condition is false
WhileLabels = namedtuple('WhileLabels', ['cond', 'exit'])
# If conditions are identified by two labels - the else label, for when
# the condition is false (to skip the then block) and the end label, for
# when the condition is true (to skip the else block)
IfLabels = namedtuple('IfLabels', ['else_body', 'end'])
# Switch conditionals are handled sort of like if conditionals:
#
# (switch |
# (case T1 B1) | jump-if-not T1, l1prime; ...; jump l4; l1prime:
# (case T2 B2) | jump-if-not T2, l2prime; ...; jump l4; l2prime:
# (else B3)) | ...
# | l4:
class SwitchLabels:
"""
Switch labels are similar to conditionals:
(switch |
(case T1 B1) | jump-if-not T1, case_lbl_1; ...; jump end; case_lbl_1:
(case T2 B2) | jump-if-not T2, case_lbl_2; ...; jump end; case_lbl_2:
(else B3) | ...; end_lbl:
Since each case is processed in order, only the current case end label and
the end switch label is available at any given time.
"""
def __init__(self, end_label):
self.end_label = end_label
self.case_end_label = None
class CoercionContext:
"""
This is used to wrap up all the information needed to coerce values from
one type to another.
"""
def __init__(self, backend, temp_context, code_templates):
self.backend = backend
self.temp_context = temp_context
self.templates = code_templates
def copy_with_context(self, new_context):
"""
Creates a copy of this object, but within a new temporary context.
"""
return CoercionContext(self.backend, new_context, self.templates)
def coerce(self, input_offset, input_type, output_type):
"""
Coerces a value, located on the stack, from the given input type to the
given output type. Returns the stack offset of the converted
variable and the output type.
Raises a TypeError if this is not possible.
"""
if input_type == output_type:
return input_offset, output_type
elif (input_type, output_type) == (types.Integer, types.Byte):
return self._coerce_int_to_byte(input_offset), output_type
elif (input_type, output_type) == (types.Byte, types.Integer):
return self._coerce_byte_to_int(input_offset), output_type
else:
raise TypeError('Cannot coerce {} -> {}'.format(input_type, output_type))
def _coerce_int_to_byte(self, input_offset):
"""
Coerces an integer to a byte, returning the stack offset of the
resulting byte.
"""
byte_size = self.backend._type_size(types.Byte)
byte_align = self.backend._type_alignment(types.Byte)
dest_offset = self.temp_context.add_temp(byte_size, byte_align)
tmp_reg = self.templates.tmp_regs[0]
self.backend._write_comment('Coercing int@{} to byte@{}',
input_offset, dest_offset)
self.templates.emit_load_stack_word(tmp_reg, input_offset)
self.templates.emit_int_to_byte(tmp_reg)
self.templates.emit_save_stack_byte(tmp_reg, dest_offset)
return dest_offset
def _coerce_byte_to_int(self, input_offset):
"""
Coerces a byte to an integer, returning the stack offset of the
resulting integer.
"""
int_size = self.backend._type_size(types.Integer)
int_align = self.backend._type_alignment(types.Integer)
dest_offset = self.temp_context.add_temp(int_size, int_align)
tmp_reg = self.templates.tmp_regs[0]
self.backend._write_comment('Coercing byte@{} to int@{}',
input_offset, dest_offset)
self.templates.emit_load_stack_byte(tmp_reg, input_offset)
self.templates.emit_byte_to_int(tmp_reg)
self.templates.emit_save_stack_word(tmp_reg, dest_offset)
return dest_offset
class FunctionStack:
"""
Tracks where variables are on the function's stack.
Note that this makes a number of assumptions about how things are stored:
- All arguments are stored on the stack, in reverse order. This goes
against the calling conventions for register rich architectures, like
MIPS, but there are enough corner cases (like copying structs by value)
that ignoring the calling convention is worthwhile for a non-optimizing
compiler like this.
- Locals and temporaries are stored on the stack, in order of creation.
"""
def __init__(self, backend):
self.backend = backend
self.local_offset = self._starting_locals_offset()
self.param_offset = self._starting_param_offset()
self.vars = {}
def _starting_locals_offset(self):
"""
Returns the starting offset of the local variables on the stack.
"""
raise NotImplementedError
def _starting_param_offset(self):
"""
Returns the starting offset of the parameter on the stack.
"""
raise NotImplementedError
def _expand_stack(self, size):
"""
Emits code to expand the stack frame by the given size.
"""
raise NotImplementedError
def _shrink_stack(self, size):
"""
Emits code to reduce the stack frame by the given size.
"""
raise NotImplementedError
def pad_param(self, space):
"""
Adds blank space before the next parameter.
"""
self.param_offset += space
def add_param(self, name, size, alignment):
"""
Adds a new parameter to the stack.
"""
self.param_offset = types.align_address(self.param_offset, alignment)
self.vars[name] = self.param_offset
self.param_offset += size
self.backend._write_comment('Binding param "{}" to offset {}', name, self.vars[name])
def add_local(self, name, size, alignment):
"""
Adds a local variable to the stack.
"""
self.local_offset = (
types.align_address(self.local_offset - size, alignment,
types.Alignment.Down))
self.vars[name] = self.local_offset
self.backend._write_comment('Binding local "{}" to offset {}', name, self.vars[name])
def get_temp_context(self, backend):
"""
Returns a context which can be used for putting temporary values on
the stack. When the context exits, the space used by the temporary
variables is cleaned up.
"""
root = self
class TemporaryContext:
def __init__(self, start_offset):
self.tmp_offset = start_offset
self.total_tmp_size = 0
def __enter__(self):
pass
def __exit__(self, *exc_info):
root._shrink_stack(self.total_tmp_size)
def add_temp(self, size, alignment):
"""
Makes space for a new temporary, returning the $fp offset at
which to write it.
"""
old_tmp_offset = self.tmp_offset
self.tmp_offset = (
types.align_address(self.tmp_offset - size, alignment,
types.Alignment.Down))
size_used = old_tmp_offset - self.tmp_offset
self.total_tmp_size += size_used
root._expand_stack(size_used)
return self.tmp_offset
def get_temp_context(self):
"""
Creates a temporary context, which starts at this temporary context.
"""
return TemporaryContext(self.tmp_offset)
return TemporaryContext(self.local_offset)
def expand_locals(self):
"""
Makes enough space for the local variables on the stack.
"""
self._expand_stack(self.locals_size())
def cleanup_locals(self):
"""
Cleans up the space used by the local variables on the stack.
"""
self._shrink_stack(self.locals_size())
def locals_size(self):
"""
Gets the size used by all the locals.
"""
return abs(self.local_offset) - abs(self._starting_locals_offset())
def __getitem__(self, name):
"""
Gets the offset to the variable on the stack, or a Register (if the
name was bound to one of the first four parameters)
"""
return self.vars[name]
class VerificationContext:
"""
Used to record all values and types defined all at once (i.e. inside the
same declaration block), so that they can be verified all at once.
"Verification" here means that their types are checked to be valid, which
means different things for different types.
"""
def __init__(self):
self.types = []
self.values = []
def add_value(self, name):
"""
Registers a new value to be verified.
"""
self.values.append(name)
def add_type(self, name):
"""
Registers a new type to be defined.
"""
self.types.append(types)
def verify(self, backend):
"""
Verifies all the definitions against the backend.
"""
backend._check_valid_types(backend.ctx_types[name] for name in self.types)
backend._check_valid_types(backend.ctx_values[name] for name in self.values)
class ContextMixin:
"""
Manages the symbol table contexts for this backend (as well as its function stack
Depends upon the user of this mixin to inherit from BaseBackend in
addition to this one.
"""
def __init__(self):
self.parent_contexts = []
self.current_context = NameContext(symbols.Context(), None)
self.verify_context = VerificationContext()
def _register_file_ns(self, namespace):
"""
Replaces the current context, with one where the symbol context is
expanded to contain the file's namespace.
"""
file_context = self.current_context.symbol_ctx.register(namespace)
self.current_context = self.current_context._replace(symbol_ctx=file_context)
@property
def ctx_namespace(self):
"""
Gets the current namespace
"""
return self.current_context.symbol_ctx.search_path[0]
@property
def ctx_values(self):
"""
Returns the current context's value symbols.
"""
return self.current_context.symbol_ctx.values
@property
def ctx_types(self):
"""
Returns the current context's type symbols.
"""
return self.current_context.symbol_ctx.types
@property
def ctx_stack(self):
"""
Returns the current context's stack information.
"""
return self.current_context.func_stack
def _value_is_defined(self, name):
"""
Returns True if the given variable is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.ctx_values and
self.ctx_values.is_visible(name))
def _type_is_defined(self, name):
"""
Returns True if the given type is defined in the current scope, or
False otherwise.
This is for the static expression processor function, var-def?
"""
return (name in self.ctx_types and
self.ctx_types.is_visible(name))
def _make_func_stack(self):
raise NotImplementedError
def _push_context(self):
"""
Pushes a new binding context.
"""
old_context = self.current_context
self.parent_contexts.append(old_context)
self.current_context = NameContext(
self.current_context.symbol_ctx.enter(),
self._make_func_stack())
def _pop_context(self):
"""
Loads the previous binding context.
"""
self.current_context = self.parent_contexts.pop()
def _resolve_if_type_name(self, name):
"""
Resolves a type name into a concrete type.
"""
try:
return types.resolve_name(name, self.ctx_types)
except PermissionError as exn:
self.error(self.line, self.col,
'Cannot resolve hidden type "{}"', str(exn))
except RecursionError:
self.error(self.line, self.col,
'Type aliases too deep, when resolving "{}"', name)
except KeyError as exn:
self.error(self.line, self.col,
'Invalid type "{}"', str(exn))
def _verify_types(self):
"""
Verifies all the types across all this current context's symbols.
"""
self.verify_context.verify(self)
self.verify_context = VerificationContext()
class ThirtyTwoMixin:
"""
Defines some information about type sizes and alignment which 32-bit
platforms have in common.
Depends upon the user of this mixin to inherit from ContextMixin.
"""
def _type_alignment(self, type_obj):
"""
Returns alignment of the given type (1 for byte, 4 for word, etc.)
"""
type_obj = self._resolve_if_type_name(type_obj)
if type_obj is types.Integer:
return 4
elif type_obj is types.Byte:
return 1
elif isinstance(type_obj, (types.PointerTo, types.FunctionPointer)):
return 4
elif isinstance(type_obj, types.ArrayOf):
return self._type_alignment(type_obj.type)
elif isinstance(type_obj, types.Struct):
# The alignment only concerns the first element of the struct -
# the struct's internal alignment doesn't come into play
#
# Also, an OrderdDict's fields are not iterable, for whatever reason
struct_types = list(type_obj.fields.values())
return self._type_alignment(struct_types[0])
else:
raise TypeError('Not a compiler type: {}'.format(type_obj))
def _type_size(self, type_obj, depth=0):
"""
Returns the size of a type object in bytes.
"""
MAX_DEPTH = 100
if depth >= MAX_DEPTH:
self.error(self.line, self.col,
"Type nested too deeply - potential self-referential type")
type_obj = self._resolve_if_type_name(type_obj)
if type_obj is types.Integer:
return 4
elif type_obj is types.Byte:
return 1
elif isinstance(type_obj, (types.PointerTo, types.FunctionPointer)):
return 4
elif isinstance(type_obj, types.ArrayOf):
# To avoid wasting space on the last element, this pads all the
# elements but the last
base_size = self._type_size(type_obj.type, depth + 1)
return self._array_offset(type_obj, type_obj.count - 1) + base_size
elif isinstance(type_obj, types.Struct):
last_field = list(type_obj.fields)[-1]
last_field_type = type_obj.fields[last_field]
last_field_offset = self._field_offset(type_obj, last_field)
return last_field_offset + self._type_size(last_field_type, depth + 1)
else:
raise TypeError('Not a compiler type: {}'.format(type_obj))
class comment_after:
"""
Wraps a method - after the method executes, something is written to
the log.
"""
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __call__(self, func):
def wrapper(parent, *args, **kwargs):
x = func(parent, *args, **kwargs)
parent._write_comment(self.fmt, *self.args, **self.kwargs)
return x
return wrapper
| [((10, 9, 10, 47), 'logging.getLogger', 'logging.getLogger', ({(10, 27, 10, 46): '"""spc.backend_utils"""'}, {}), "('spc.backend_utils')", False, 'import logging\n'), ((14, 14, 14, 69), 'collections.namedtuple', 'namedtuple', ({(14, 25, 14, 38): '"""NameContext"""', (14, 40, 14, 68): "['symbol_ctx', 'func_stack']"}, {}), "('NameContext', ['symbol_ctx', 'func_stack'])", False, 'from collections import namedtuple\n'), ((18, 14, 18, 57), 'collections.namedtuple', 'namedtuple', ({(18, 25, 18, 38): '"""WhileLabels"""', (18, 40, 18, 56): "['cond', 'exit']"}, {}), "('WhileLabels', ['cond', 'exit'])", False, 'from collections import namedtuple\n'), ((23, 11, 23, 55), 'collections.namedtuple', 'namedtuple', ({(23, 22, 23, 32): '"""IfLabels"""', (23, 34, 23, 54): "['else_body', 'end']"}, {}), "('IfLabels', ['else_body', 'end'])", False, 'from collections import namedtuple\n')] |
neuralaudio/hear-eval-kit | heareval/__init__.py | f92119592954544dfb417f8e9aea21eadb4a65d0 | __version__ = "2021.0.6"
| [] |
tranlyvu/recommender | recommender_engine/similarity_measure/__init__.py | 4985c355d54ee22ba48f4891077fd7e12bd21b47 | """
recommender_engine
-----
recommender_engine is a recommendation application using either item-based or user-based approaches
:copyright: (c) 2016 - 2019 by Tran Ly Vu. All Rights Reserved.
:license: Apache License 2.0
"""
from .cosine import cosine
from .euclidean_distance import euclidean_distance
from .pearson_correlation import pearson_correlation
name="similarity_measure"
__all__ = ["cosine", "euclidean_distance", "pearson_correlation"]
__author__ = "Tran Ly Vu ([email protected])"
__copyright__ = "Copyright (c) 2016 - 2019 Tran Ly Vu. All Rights Reserved."
__license__ = "Apache License 2.0"
__credits__ = ["Tran Ly Vu"]
__maintainer__ = "Tran Ly Vu"
__email__ = "[email protected]"
__status__ = "Beta"
| [] |
KoyanagiHitoshi/AtCoder | code/abc057_a_02.py | 731892543769b5df15254e1f32b756190378d292 | a,b=map(int,input().split())
print((a+b)%24) | [] |
kopsh/python_cookbook | 8/8_9.py | 298c092cd20404a0755e2170776c44a04e8648ad | class CheckType:
r"""
8.9 创建新的类或实例属性
使用描述器,实现参数类型检查
>>> @ParamAssert(a=int, b=list)
... class A:
... def __init__(self, a, b):
... self.a = a
... self.b = b
>>> a = A(1, [])
"""
def __init__(self, name, expected_type):
self.name = name
self.expected_type = expected_type
def __get__(self, instance, owner):
if instance is None:
return self
else:
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not isinstance(value, self.expected_type):
raise TypeError("{} cannot be assigned by {!r}, it`s type is {!r}".format(self.name, value,
self.expected_type))
instance.__dict__[self.name] = value
class ParamAssert:
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, expected_type in self.kwargs.items():
setattr(cls, name, CheckType(name, expected_type))
return cls
class Integer:
def __init__(self, name):
self.name = name
def __get__(self, instance, cls):
if instance is None:
return self
else:
return instance.__dict__.get(self.name, None)
def __set__(self, instance, value):
if not isinstance(value, int):
raise TypeError("{} cannot be assigned by {!r}".format(self.name, value))
instance.__dict__[self.name] = value
class Point:
"""
>>> p = Point(0, 0)
>>> print(p.x)
0
>>> p.y = "1"
Traceback (most recent call last):
...
TypeError: y cannot be assigned by '1'
"""
x = Integer('x')
y = Integer('y')
def __init__(self, x, y):
self.x = x
self.y = y
if __name__ == '__main__':
import doctest
doctest.testmod() | [((75, 4, 75, 21), 'doctest.testmod', 'doctest.testmod', ({}, {}), '()', False, 'import doctest\n')] |
ediphy-dwild/gpytorch | test/examples/test_simple_gp_regression.py | 559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a | import math
import torch
import unittest
import gpytorch
from torch import optim
from torch.autograd import Variable
from gpytorch.kernels import RBFKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.random_variables import GaussianRandomVariable
# Simple training data: let's try to learn a sine function
train_x = Variable(torch.linspace(0, 1, 11))
train_y = Variable(torch.sin(train_x.data * (2 * math.pi)))
test_x = Variable(torch.linspace(0, 1, 51))
test_y = Variable(torch.sin(test_x.data * (2 * math.pi)))
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_inputs, train_targets, likelihood):
super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
self.mean_module = ConstantMean(constant_bounds=(-1, 1))
self.covar_module = RBFKernel(log_lengthscale_bounds=(-3, 3))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return GaussianRandomVariable(mean_x, covar_x)
class TestSimpleGPRegression(unittest.TestCase):
def test_posterior_latent_gp_and_likelihood_without_optimization(self):
# We're manually going to set the hyperparameters to be ridiculous
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
# Update bounds to accommodate extreme parameters
gp_model.covar_module.set_bounds(log_lengthscale=(-10, 10))
likelihood.set_bounds(log_noise=(-10, 10))
# Update parameters
gp_model.covar_module.initialize(log_lengthscale=-10)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=-10)
# Compute posterior distribution
gp_model.eval()
likelihood.eval()
# Let's see how our model does, conditioned with weird hyperparams
# The posterior should fit all the data
function_predictions = likelihood(gp_model(train_x))
self.assertLess(
torch.norm(function_predictions.mean().data - train_y.data),
1e-3,
)
self.assertLess(torch.norm(function_predictions.var().data), 1e-3)
# It shouldn't fit much else though
test_function_predictions = gp_model(Variable(torch.Tensor([1.1])))
self.assertLess(
torch.norm(test_function_predictions.mean().data - 0),
1e-4,
)
self.assertLess(torch.norm(test_function_predictions.var().data - 1), 1e-4)
def test_posterior_latent_gp_and_likelihood_with_optimization(self):
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(
list(gp_model.parameters()) + list(likelihood.parameters()),
lr=0.1,
)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x))
mean_abs_error = torch.mean(
torch.abs(test_y - test_function_predictions.mean())
)
self.assertLess(mean_abs_error.data.squeeze()[0], 0.05)
def test_posterior_latent_gp_and_likelihood_fast_pred_var(self):
with gpytorch.fast_pred_var():
# We're manually going to set the hyperparameters to
# something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3))
gp_model = ExactGPModel(train_x.data, train_y.data, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(
list(gp_model.parameters()) + list(likelihood.parameters()),
lr=0.1,
)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
# Set the cache
test_function_predictions = likelihood(gp_model(train_x))
# Now bump up the likelihood to something huge
# This will make it easy to calculate the variance
likelihood.log_noise.data.fill_(3)
test_function_predictions = likelihood(gp_model(train_x))
noise = likelihood.log_noise.exp()
var_diff = (test_function_predictions.var() - noise).abs()
self.assertLess(torch.max(var_diff.data / noise.data), 0.05)
def test_posterior_latent_gp_and_likelihood_with_optimization_cuda(self):
if torch.cuda.is_available():
# We're manually going to set the hyperparameters to
# something they shouldn't be
likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3)).cuda()
gp_model = ExactGPModel(
train_x.data.cuda(),
train_y.data.cuda(),
likelihood
).cuda()
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.covar_module.initialize(log_lengthscale=1)
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(log_noise=1)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x.cuda())
loss = -mll(output, train_y.cuda())
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x.cuda()))
mean_abs_error = torch.mean(
torch.abs(test_y.cuda() - test_function_predictions.mean())
)
self.assertLess(mean_abs_error.data.squeeze()[0], 0.05)
if __name__ == '__main__':
unittest.main()
| [((14, 19, 14, 43), 'torch.linspace', 'torch.linspace', ({(14, 34, 14, 35): '0', (14, 37, 14, 38): '1', (14, 40, 14, 42): '11'}, {}), '(0, 1, 11)', False, 'import torch\n'), ((15, 19, 15, 58), 'torch.sin', 'torch.sin', ({(15, 29, 15, 57): 'train_x.data * (2 * math.pi)'}, {}), '(train_x.data * (2 * math.pi))', False, 'import torch\n'), ((17, 18, 17, 42), 'torch.linspace', 'torch.linspace', ({(17, 33, 17, 34): '0', (17, 36, 17, 37): '1', (17, 39, 17, 41): '51'}, {}), '(0, 1, 51)', False, 'import torch\n'), ((18, 18, 18, 56), 'torch.sin', 'torch.sin', ({(18, 28, 18, 55): 'test_x.data * (2 * math.pi)'}, {}), '(test_x.data * (2 * math.pi))', False, 'import torch\n'), ((187, 4, 187, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((24, 27, 24, 64), 'gpytorch.means.ConstantMean', 'ConstantMean', (), '', False, 'from gpytorch.means import ConstantMean\n'), ((25, 28, 25, 69), 'gpytorch.kernels.RBFKernel', 'RBFKernel', (), '', False, 'from gpytorch.kernels import RBFKernel\n'), ((30, 15, 30, 54), 'gpytorch.random_variables.GaussianRandomVariable', 'GaussianRandomVariable', ({(30, 38, 30, 44): 'mean_x', (30, 46, 30, 53): 'covar_x'}, {}), '(mean_x, covar_x)', False, 'from gpytorch.random_variables import GaussianRandomVariable\n'), ((36, 21, 36, 65), 'gpytorch.likelihoods.GaussianLikelihood', 'GaussianLikelihood', (), '', False, 'from gpytorch.likelihoods import GaussianLikelihood\n'), ((71, 21, 71, 65), 'gpytorch.likelihoods.GaussianLikelihood', 'GaussianLikelihood', (), '', False, 'from gpytorch.likelihoods import GaussianLikelihood\n'), ((73, 14, 73, 71), 'gpytorch.ExactMarginalLogLikelihood', 'gpytorch.ExactMarginalLogLikelihood', ({(73, 50, 73, 60): 'likelihood', (73, 62, 73, 70): 'gp_model'}, {}), '(likelihood, gp_model)', False, 'import gpytorch\n'), ((148, 11, 148, 36), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((105, 13, 105, 37), 'gpytorch.fast_pred_var', 'gpytorch.fast_pred_var', ({}, {}), '()', False, 'import gpytorch\n'), ((108, 25, 108, 69), 'gpytorch.likelihoods.GaussianLikelihood', 'GaussianLikelihood', (), '', False, 'from gpytorch.likelihoods import GaussianLikelihood\n'), ((110, 18, 110, 80), 'gpytorch.mlls.ExactMarginalLogLikelihood', 'gpytorch.mlls.ExactMarginalLogLikelihood', ({(110, 59, 110, 69): 'likelihood', (110, 71, 110, 79): 'gp_model'}, {}), '(likelihood, gp_model)', False, 'import gpytorch\n'), ((157, 18, 157, 80), 'gpytorch.mlls.ExactMarginalLogLikelihood', 'gpytorch.mlls.ExactMarginalLogLikelihood', ({(157, 59, 157, 69): 'likelihood', (157, 71, 157, 79): 'gp_model'}, {}), '(likelihood, gp_model)', False, 'import gpytorch\n'), ((61, 54, 61, 73), 'torch.Tensor', 'torch.Tensor', ({(61, 67, 61, 72): '[1.1]'}, {}), '([1.1])', False, 'import torch\n'), ((145, 28, 145, 65), 'torch.max', 'torch.max', ({(145, 38, 145, 64): '(var_diff.data / noise.data)'}, {}), '(var_diff.data / noise.data)', False, 'import torch\n'), ((151, 25, 151, 69), 'gpytorch.likelihoods.GaussianLikelihood', 'GaussianLikelihood', (), '', False, 'from gpytorch.likelihoods import GaussianLikelihood\n')] |
snavinch/cybersource-rest-samples-python | samples/RiskManagement/Verification/customer-match-denied-parties-list.py | adb7a6b4b55dff6ac833295192d6677b53003c16 | from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def customer_match_denied_parties_list():
clientReferenceInformationCode = "verification example"
clientReferenceInformationComments = "Export-basic"
clientReferenceInformationPartnerDeveloperId = "7891234"
clientReferenceInformationPartnerSolutionId = "89012345"
clientReferenceInformationPartner = Riskv1decisionsClientReferenceInformationPartner(
developer_id = clientReferenceInformationPartnerDeveloperId,
solution_id = clientReferenceInformationPartnerSolutionId
)
clientReferenceInformation = Riskv1decisionsClientReferenceInformation(
code = clientReferenceInformationCode,
comments = clientReferenceInformationComments,
partner = clientReferenceInformationPartner.__dict__
)
orderInformationBillToAddress1 = "901 Metro Centre Blvd"
orderInformationBillToAdministrativeArea = "CA"
orderInformationBillToCountry = "US"
orderInformationBillToLocality = "Foster City"
orderInformationBillToPostalCode = "94404"
orderInformationBillToCompanyName = "A & C International Trade, Inc"
orderInformationBillToCompany = Riskv1exportcomplianceinquiriesOrderInformationBillToCompany(
name = orderInformationBillToCompanyName
)
orderInformationBillToFirstName = "ANDREE"
orderInformationBillToLastName = "AGNESE"
orderInformationBillToEmail = "[email protected]"
orderInformationBillTo = Riskv1exportcomplianceinquiriesOrderInformationBillTo(
address1 = orderInformationBillToAddress1,
administrative_area = orderInformationBillToAdministrativeArea,
country = orderInformationBillToCountry,
locality = orderInformationBillToLocality,
postal_code = orderInformationBillToPostalCode,
company = orderInformationBillToCompany.__dict__,
first_name = orderInformationBillToFirstName,
last_name = orderInformationBillToLastName,
email = orderInformationBillToEmail
)
orderInformationShipToCountry = "IN"
orderInformationShipToFirstName = "DumbelDore"
orderInformationShipToLastName = "Albus"
orderInformationShipTo = Riskv1exportcomplianceinquiriesOrderInformationShipTo(
country = orderInformationShipToCountry,
first_name = orderInformationShipToFirstName,
last_name = orderInformationShipToLastName
)
orderInformationLineItems = []
orderInformationLineItems1 = Riskv1exportcomplianceinquiriesOrderInformationLineItems(
unit_price = "120.50",
quantity = 3,
product_sku = "123456",
product_name = "Qwe",
product_code = "physical_software"
)
orderInformationLineItems.append(orderInformationLineItems1.__dict__)
orderInformation = Riskv1exportcomplianceinquiriesOrderInformation(
bill_to = orderInformationBillTo.__dict__,
ship_to = orderInformationShipTo.__dict__,
line_items = orderInformationLineItems
)
requestObj = ValidateExportComplianceRequest(
client_reference_information = clientReferenceInformation.__dict__,
order_information = orderInformation.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = VerificationApi(client_config)
return_data, status, body = api_instance.validate_export_compliance(requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling VerificationApi->validate_export_compliance: %s\n" % e)
if __name__ == "__main__":
customer_match_denied_parties_list()
| [((6, 27, 6, 38), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((93, 17, 93, 39), 'json.dumps', 'json.dumps', ({(93, 28, 93, 38): 'requestObj'}, {}), '(requestObj)', False, 'import json\n'), ((7, 16, 7, 60), 'importlib.machinery.SourceFileLoader', 'SourceFileLoader', ({(7, 33, 7, 46): '"""module.name"""', (7, 48, 7, 59): 'config_file'}, {}), "('module.name', config_file)", False, 'from importlib.machinery import SourceFileLoader\n')] |
Lucchese-Anthony/MonteCarloSimulation | SimulatePi.py | 45a625b88dab6658b43b472d49d82aaeb1e847bd | import numpy as np
import random
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
angle = np.linspace( 0 , 2 * np.pi , 150)
radius = 1
x = radius * np.cos(angle)
y = radius * np.sin(angle)
#prints the circle
style.use('fivethirtyeight')
fig = plt.figure()
axes = fig.add_subplot(1,1,1)
axes.plot( x, y, color="red")
inside = []
outside = []
def inCircle(x, y):
return math.sqrt( (x**2) + (y**2) ) =< 1
def animate(i):
x = random.uniform(1,-1)
y = random.uniform(1,-1)
if (inCircle(x, y)):
point = axes.scatter(x, y, color="blue")
inside.append(point)
else:
point = axes.scatter(x, y, color="red")
outside.append(point)
try:
ratio = len(inside) / len(outside)
print(ratio)
except ZeroDivisionError:
print(0)
ani = animation.FuncAnimation(fig, animate, interval=5)
plt.show()
| [] |
aarvanitii/adminWebsite | run.py | cf9a07c287571ebbc9954326806b578f6d19a11b | """
This is where the web application starts running
"""
from app.index import create_app
app = create_app()
if __name__ == "__main__":
app.secret_key = 'mysecret'
app.run(port=8080, host="0.0.0.0", debug=True) | [((5, 6, 5, 18), 'app.index.create_app', 'create_app', ({}, {}), '()', False, 'from app.index import create_app\n')] |
jorgelmp/sistop-2022-1 | tareas/3/GarciaFigueroaAlberto-GarciaEdgar/Proceso.py | 5c3b7e5215247533446aa006affe6cc64a48d989 | class Proceso:
def __init__(self,tiempo_de_llegada,t,id):
self.t=t
self.tiempo_de_llegada=tiempo_de_llegada
self.id=id
self.inicio=0
self.fin=0
self.T=0
self.E=0
self.P=0
self.tRestantes = t
| [] |
Sourav692/FAANG-Interview-Preparation | Algo and DSA/LeetCode-Solutions-master/Python/smallest-greater-multiple-made-of-two-digits.py | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | # Time: sum(O(l * 2^l) for l in range(1, 11)) = O(20 * 2^10) = O(1)
# Space: O(1)
class Solution(object):
def findInteger(self, k, digit1, digit2):
"""
:type k: int
:type digit1: int
:type digit2: int
:rtype: int
"""
MAX_NUM_OF_DIGITS = 10
INT_MAX = 2**31-1
if digit1 < digit2:
digit1, digit2 = digit2, digit1
total = 2
for l in xrange(1, MAX_NUM_OF_DIGITS+1):
for mask in xrange(total):
curr, bit = 0, total>>1
while bit:
curr = curr*10 + (digit1 if mask&bit else digit2)
bit >>= 1
if k < curr <= INT_MAX and curr%k == 0:
return curr
total <<= 1
return -1
| [] |
compoundpartners/js-people | aldryn_people/tests/test_plugins.py | a3744c3880f6626e677034a693f337c927baf886 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.core.urlresolvers import reverse
except ImportError:
# Django 2.0
from django.urls import reverse
from django.utils.translation import force_text
from cms import api
from cms.utils.i18n import force_language
from aldryn_people import DEFAULT_APP_NAMESPACE
from ..models import Person, Group
from ..cms_plugins import PeoplePlugin
from . import DefaultApphookMixin, BasePeopleTest
class TestPersonPlugins(DefaultApphookMixin, BasePeopleTest):
def test_add_people_list_plugin_api(self):
"""
We add a person to the People Plugin and look her up
"""
name = 'Donald'
Person.objects.create(name=name)
plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language)
plugin.people = Person.objects.all()
self.assertEqual(force_text(plugin), force_text(plugin.pk))
self.page.publish(self.language)
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, name)
# This fails because of Sane Add Plugin (I suspect). This will be refactored
# and re-enabled in a future commit.
# def test_add_people_list_plugin_client(self):
# """
# We log into the PeoplePlugin
# """
# self.client.login(
# username=self.su_username, password=self.su_password)
#
# plugin_data = {
# 'plugin_type': 'PeoplePlugin',
# 'plugin_language': self.language,
# 'placeholder_id': self.placeholder.pk,
# }
# response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# self.assertEqual(response.status_code, 200)
# self.assertTrue(CMSPlugin.objects.exists())
def test_hide_ungrouped(self):
"""
"""
the_bradys = Group.objects.create(name="The Bradys")
alice = Person.objects.create(name="Alice")
bobby = Person.objects.create(name="Bobby")
cindy = Person.objects.create(name="Cindy")
# Alice is the housekeeper, not a real Brady.
bobby.groups.add(the_bradys)
cindy.groups.add(the_bradys)
# Add a plugin where ungrouped people are not shown
plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language)
plugin.people = Person.objects.all()
plugin.group_by_group = True
plugin.show_ungrouped = False
plugin.save()
self.page.publish(self.language)
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, bobby.name)
self.assertContains(response, cindy.name)
self.assertNotContains(response, alice.name)
def test_show_ungrouped(self):
"""
"""
the_bradys = Group.objects.create(name="The Bradys")
alice = Person.objects.create(name="Alice")
bobby = Person.objects.create(name="Bobby")
cindy = Person.objects.create(name="Cindy")
# Alice is the housekeeper, not a real Brady.
bobby.groups.add(the_bradys)
cindy.groups.add(the_bradys)
# Now, add a new plugin where ungrouped people are shown
plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language)
plugin.people = Person.objects.all()
plugin.group_by_group = True
plugin.show_ungrouped = True
plugin.save()
self.page.publish(self.language)
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, bobby.name)
self.assertContains(response, cindy.name)
self.assertContains(response, alice.name)
class TestPeopleListPluginNoApphook(BasePeopleTest):
def setUp(self):
super(TestPeopleListPluginNoApphook, self).setUp()
# we are testing only en
self.person1.set_current_language('en')
self.namespace = DEFAULT_APP_NAMESPACE
def create_plugin(self, plugin_params=None):
if plugin_params is None:
plugin_params = {}
with force_language('en'):
plugin = api.add_plugin(
self.placeholder, PeoplePlugin, 'en', **plugin_params)
self.page.publish('en')
return plugin
def test_plugin_with_no_apphook_doesnot_breaks_page(self):
self.create_plugin()
url = self.page.get_absolute_url()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.person1.name)
from ..cms_plugins import NAMESPACE_ERROR
self.assertNotContains(response, NAMESPACE_ERROR[:20])
def test_plugin_with_no_apphook_shows_error_message(self):
self.create_plugin()
url = self.page.get_absolute_url()
self.client.login(username=self.su_username,
password=self.su_password)
response = self.client.get(url, user=self.superuser)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.person1.name)
from ..cms_plugins import NAMESPACE_ERROR
self.assertContains(response, NAMESPACE_ERROR[:20])
def test_plugin_with_vcard_enabled_no_apphook(self):
self.create_plugin(plugin_params={'show_vcard': True})
url = self.page.get_absolute_url()
response = self.client.get(url)
self.assertContains(response, self.person1.name)
def test_plugin_with_vcard_disabled_no_apphook(self):
self.create_plugin(plugin_params={'show_vcard': False})
url = self.page.get_absolute_url()
response = self.client.get(url)
self.assertContains(response, self.person1.name)
def test_plugin_show_links_are_shown_if_enabled_and_apphook_page(self):
with force_language('en'):
app_page = self.create_apphook_page()
list_plugin = api.add_plugin(
placeholder=self.placeholder,
plugin_type=PeoplePlugin,
language='en',
)
list_plugin.show_links = True
list_plugin.save()
self.page.publish('en')
url = self.page.get_absolute_url()
person_url = self.person1.get_absolute_url()
# ensure that url is not the link to the home page and not app page
app_page_len = len(app_page.get_absolute_url())
self.assertGreater(len(person_url), app_page_len)
response = self.client.get(url, follow=True)
self.assertContains(response, person_url)
# ensure that url is not shown if not enabled for plugin.
list_plugin.show_links = False
list_plugin.save()
self.page.publish('en')
response = self.client.get(url, follow=True)
self.assertNotContains(response, person_url)
def test_plugin_with_vcard_enabled_with_apphook(self):
vcard_kwargs = {
'slug': self.person1.slug
}
with force_language('en'):
self.create_apphook_page()
person_vcard_url = reverse(
'{0}:download_vcard'.format(self.namespace),
kwargs=vcard_kwargs)
plugin = self.create_plugin(plugin_params={'show_vcard': True})
url = self.page.get_absolute_url()
response = self.client.get(url, follow=True)
self.assertContains(response, self.person1.name)
self.assertContains(response, person_vcard_url)
# test that vcard download link is not shown if disabled
plugin.show_vcard = False
plugin.save()
self.page.publish('en')
response = self.client.get(url, follow=True)
self.assertContains(response, self.person1.name)
self.assertNotContains(response, person_vcard_url)
| [((29, 17, 29, 78), 'cms.api.add_plugin', 'api.add_plugin', ({(29, 32, 29, 48): 'self.placeholder', (29, 50, 29, 62): 'PeoplePlugin', (29, 64, 29, 77): 'self.language'}, {}), '(self.placeholder, PeoplePlugin, self.language)', False, 'from cms import api\n'), ((68, 17, 68, 78), 'cms.api.add_plugin', 'api.add_plugin', ({(68, 32, 68, 48): 'self.placeholder', (68, 50, 68, 62): 'PeoplePlugin', (68, 64, 68, 77): 'self.language'}, {}), '(self.placeholder, PeoplePlugin, self.language)', False, 'from cms import api\n'), ((93, 17, 93, 78), 'cms.api.add_plugin', 'api.add_plugin', ({(93, 32, 93, 48): 'self.placeholder', (93, 50, 93, 62): 'PeoplePlugin', (93, 64, 93, 77): 'self.language'}, {}), '(self.placeholder, PeoplePlugin, self.language)', False, 'from cms import api\n'), ((31, 25, 31, 43), 'django.utils.translation.force_text', 'force_text', ({(31, 36, 31, 42): 'plugin'}, {}), '(plugin)', False, 'from django.utils.translation import force_text\n'), ((31, 45, 31, 66), 'django.utils.translation.force_text', 'force_text', ({(31, 56, 31, 65): 'plugin.pk'}, {}), '(plugin.pk)', False, 'from django.utils.translation import force_text\n'), ((118, 13, 118, 33), 'cms.utils.i18n.force_language', 'force_language', ({(118, 28, 118, 32): '"""en"""'}, {}), "('en')", False, 'from cms.utils.i18n import force_language\n'), ((119, 21, 120, 70), 'cms.api.add_plugin', 'api.add_plugin', ({(120, 16, 120, 32): 'self.placeholder', (120, 34, 120, 46): 'PeoplePlugin', (120, 48, 120, 52): '"""en"""'}, {}), "(self.placeholder, PeoplePlugin, 'en', **plugin_params)", False, 'from cms import api\n'), ((157, 13, 157, 33), 'cms.utils.i18n.force_language', 'force_language', ({(157, 28, 157, 32): '"""en"""'}, {}), "('en')", False, 'from cms.utils.i18n import force_language\n'), ((159, 26, 163, 13), 'cms.api.add_plugin', 'api.add_plugin', (), '', False, 'from cms import api\n'), ((185, 13, 185, 33), 'cms.utils.i18n.force_language', 'force_language', ({(185, 28, 185, 32): '"""en"""'}, {}), "('en')", False, 'from cms.utils.i18n import force_language\n')] |
xcnick/TurboTransformers | turbo_transformers/python/tests/__init__.py | 48b6ba09af2219616c6b97cc5c09222408e080c2 | # Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
| [] |
audreymychan/djsmile | generate_joke.py | 8dc5d6337f1b32db8bf3dfbf13315ec25049ebb5 | # This script contains the get_joke() function to generate a new dad joke
import requests
def get_joke():
"""Return new joke string from icanhazdadjoke.com."""
url = "https://icanhazdadjoke.com/"
response = requests.get(url, headers={'Accept': 'application/json'})
raw_joke = response.json()
joke = raw_joke['joke']
return joke
| [((9, 15, 9, 72), 'requests.get', 'requests.get', (), '', False, 'import requests\n')] |
elihschiff/Rubber-Duck-Python | bot/tests/test_triggers/__init__.py | 24dea3b64a8a46368cd8dd995c800375f355b55e | from .test_commands import all_commands
all_triggers = all_commands
from .test_quack import TestQuack
all_triggers.append(TestQuack)
| [] |
Scout24/crassus | src/main/scripts/crassus_deployer_lambda.py | 8e3d5ff073181cabaf0e764c3d8be18fc7d27992 | from __future__ import print_function
from crassus import Crassus
from crassus.output_converter import OutputConverter
def handler(event, context):
crassus = Crassus(event, context)
crassus.deploy()
def cfn_output_converter(event, context):
"""
Convert an AWS CloudFormation output message to our defined
ResultMessage format.
"""
output_converter = OutputConverter(event, context)
output_converter.convert()
| [((7, 14, 7, 37), 'crassus.Crassus', 'Crassus', ({(7, 22, 7, 27): 'event', (7, 29, 7, 36): 'context'}, {}), '(event, context)', False, 'from crassus import Crassus\n'), ((16, 23, 16, 54), 'crassus.output_converter.OutputConverter', 'OutputConverter', ({(16, 39, 16, 44): 'event', (16, 46, 16, 53): 'context'}, {}), '(event, context)', False, 'from crassus.output_converter import OutputConverter\n')] |
tchamabe1979/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyreadline/clipboard/__init__.py | 462983e4feec7808e1fd447d02901502588a8879 | import sys
success = False
in_ironpython = "IronPython" in sys.version
if in_ironpython:
try:
from ironpython_clipboard import GetClipboardText, SetClipboardText
success = True
except ImportError:
pass
else:
try:
from win32_clipboard import GetClipboardText, SetClipboardText
success = True
except ImportError:
raise
def send_data(lists):
SetClipboardText(make_tab(lists))
def set_clipboard_text(toclipboard):
SetClipboardText(str(toclipboard))
def make_tab(lists):
if hasattr(lists, "tolist"):
lists = lists.tolist()
ut = []
for rad in lists:
if type(rad) in [list, tuple]:
ut.append("\t".join(["%s" % x for x in rad]))
else:
ut.append("%s" % rad)
return "\n".join(ut)
def make_list_of_list(txt):
def make_num(x):
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
try:
return complex(x)
except ValueError:
return x
return x
ut = []
flag = False
for rad in [x for x in txt.split("\r\n") if x != ""]:
raden = [make_num(x) for x in rad.split("\t")]
if str in map(type, raden):
flag = True
ut.append(raden)
return ut, flag
def get_clipboard_text_and_convert(paste_list=False):
"""Get txt from clipboard. if paste_list==True the convert tab separated
data to list of lists. Enclose list of list in array() if all elements are
numeric"""
txt = GetClipboardText()
if txt:
if paste_list and "\t" in txt:
array, flag = make_list_of_list(txt)
if flag:
txt = repr(array)
else:
txt = "array(%s)" % repr(array)
txt = "".join([c for c in txt if c not in " \t\r\n"])
return txt
| [((69, 10, 69, 28), 'win32_clipboard.GetClipboardText', 'GetClipboardText', ({}, {}), '()', False, 'from win32_clipboard import GetClipboardText, SetClipboardText\n')] |
YujieLu10/tslam | mjrl/utils/train_agent.py | 1341dbecdf02ee6b1b6cdd1a538272fffdea6ffd | import logging
logging.disable(logging.CRITICAL)
import math
from tabulate import tabulate
from mjrl.utils.make_train_plots import make_train_plots
from mjrl.utils.gym_env import GymEnv
from mjrl.samplers.core import sample_paths
import numpy as np
import torch
import pickle
import imageio
import time as timer
import os
import copy
import matplotlib.pyplot as plt
try:
import exptools
from colorsys import hsv_to_rgb
import pyvista as pv
except ImportError:
exptools = None
def _load_latest_policy_and_logs(agent, *, policy_dir, logs_dir):
"""Loads the latest policy.
Returns the next step number to begin with.
"""
assert os.path.isdir(policy_dir), str(policy_dir)
assert os.path.isdir(logs_dir), str(logs_dir)
log_csv_path = os.path.join(logs_dir, 'log.csv')
if not os.path.exists(log_csv_path):
return 0 # fresh start
print("Reading: {}".format(log_csv_path))
agent.logger.read_log(log_csv_path)
last_step = agent.logger.max_len - 1
if last_step <= 0:
return 0 # fresh start
# find latest policy/baseline
i = last_step
while i >= 0:
policy_path = os.path.join(policy_dir, 'policy_{}.pickle'.format(i))
baseline_path = os.path.join(policy_dir, 'baseline_{}.pickle'.format(i))
if not os.path.isfile(policy_path):
i = i -1
continue
else:
print("Loaded last saved iteration: {}".format(i))
with open(policy_path, 'rb') as fp:
agent.policy = pickle.load(fp)
with open(baseline_path, 'rb') as fp:
agent.baseline = pickle.load(fp)
# additional
# global_status_path = os.path.join(policy_dir, 'global_status.pickle')
# with open(global_status_path, 'rb') as fp:
# agent.load_global_status( pickle.load(fp) )
agent.logger.shrink_to(i + 1)
assert agent.logger.max_len == i + 1
return agent.logger.max_len
# cannot find any saved policy
raise RuntimeError("Log file exists, but cannot find any saved policy.")
def save_voxel_visualization(obj_name, reset_mode_conf, reward_conf, obj_orientation, obj_relative_position, obj_scale, pc_frame, iternum, is_best_policy):
uniform_gt_data = np.load("/home/jianrenw/prox/tslam/assets/uniform_gt/uniform_{}_o3d.npz".format(obj_name))['pcd']
data_scale = uniform_gt_data * obj_scale
data_rotate = data_scale.copy()
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
x_theta = obj_orientation[0]
data_rotate[:, 0] = x
data_rotate[:, 1] = y*math.cos(x_theta) - z*math.sin(x_theta)
data_rotate[:, 2] = y*math.sin(x_theta) + z*math.cos(x_theta)
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
y_theta = obj_orientation[1]
data_rotate[:, 0] = x * math.cos(y_theta) + z * math.sin(y_theta)
data_rotate[:, 1] = y
data_rotate[:, 2] = z * math.cos(y_theta) - x * math.sin(y_theta)
x = data_rotate[:, 0].copy()
y = data_rotate[:, 1].copy()
z = data_rotate[:, 2].copy()
z_theta = obj_orientation[2]
data_rotate[:, 0] = x * math.cos(z_theta) - y * math.sin(z_theta)
data_rotate[:, 1] = x * math.sin(z_theta) + y * math.cos(z_theta)
data_rotate[:, 2] = z
data_trans = data_rotate.copy()
data_trans[:, 0] += obj_relative_position[0]
data_trans[:, 1] += obj_relative_position[1]
data_trans[:, 2] += obj_relative_position[2]
uniform_gt_data = data_trans.copy()
data = pc_frame
resolution = 0.01
sep_x = math.ceil(0.3 / resolution)
sep_y = math.ceil(0.3 / resolution)
sep_z = math.ceil(0.3 / resolution)
x, y, z = np.indices((sep_x, sep_y, sep_z))
cube1 = (x<0) & (y <1) & (z<1)
gtcube = (x<0) & (y <1) & (z<1)
voxels = cube1
gt_voxels = gtcube
# draw gt
gt_map_list = []
for idx,val in enumerate(uniform_gt_data):
idx_x = math.floor((val[0] + 0.15) / resolution)
idx_y = math.floor((val[1] + 0.15) / resolution)
idx_z = math.floor((val[2]) / resolution)
# if idx_z > 6:
# continue
name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z)
if name not in gt_map_list:
gt_map_list.append(name)
cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z)
# combine the objects into a single boolean array
gt_voxels += cube
# draw cuboids in the top left and bottom right corners, and a link between them
map_list = []
for idx,val in enumerate(data):
idx_x = math.floor((val[0] + 0.15) / resolution)
idx_y = math.floor((val[1] + 0.15) / resolution)
idx_z = math.floor((val[2]) / resolution)
# if idx_z > 6:
# continue
name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z)
if name not in map_list and name in gt_map_list:
map_list.append(name)
cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z)
# combine the objects into a single boolean array
voxels += cube
# gt_obj4:668
occupancy = len(map_list) / len(gt_map_list)
# print(len(map_list) / sep_x / sep_y / sep_z )
is_best_reconstruct = True
files = os.listdir('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/'.format(obj_name, reset_mode_conf, reward_conf))
for file in files:
if "overlap" in file and "png" in file:
file_str = str(file)
previous_occup = file_str[(file_str.index("-")+1):file_str.index(".png")]
if occupancy < float(previous_occup):
is_best_reconstruct = False
# obj_name = "obj{}".format(obj_name)
# set the colors of each object
vis_voxel = gt_voxels | voxels
colors = np.empty(vis_voxel.shape, dtype=object)
colors[gt_voxels] = 'white'
colors[voxels] = 'cyan'
# and plot everything
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(vis_voxel, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
# plt.savefig('uniform_gtbox_{}.png'.format(step))
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_overlap-{}.png'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy))
plt.savefig('voxel/iter-{}-{}-overlap-{}.png'.format(iternum, obj_name, occupancy))
plt.close()
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(gt_voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/gt.png'.format(obj_name, reset_mode_conf, reward_conf))
plt.savefig('voxel/iter-{}-{}-gt.png'.format(iternum, obj_name))
plt.close()
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,30)
ax.voxels(voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
if is_best_policy or is_best_reconstruct:
plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_exp.png'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct))
plt.savefig('voxel/iter-{}-{}-exp.png'.format(iternum, obj_name))
plt.close()
return is_best_reconstruct, occupancy
def train_agent(job_name, agent,
seed = 0,
niter = 101,
gamma = 0.995,
gae_lambda = None,
num_cpu = 16,
sample_mode = 'trajectories',
horizon= int(150),
num_traj = 50,
num_samples = 50000, # has precedence, used with sample_mode = 'samples'
save_freq = 10,
evaluation_rollouts = None,
plot_keys = ['stoc_pol_mean'],
env_kwargs= dict(),
visualize_kwargs= dict(),
sample_paths_kwargs= dict(),
):
print("num_cpu{}".format(num_cpu))
np.random.seed(seed)
if os.path.isdir(job_name) == False:
os.mkdir(job_name)
previous_dir = os.getcwd()
obj_name = env_kwargs["obj_name"]
reset_mode_conf = env_kwargs["reset_mode"]
reward_conf = "cf{}knn{}voxel{}".format(env_kwargs["chamfer_r_factor"], env_kwargs["knn_r_factor"], env_kwargs["new_voxel_r_factor"])
os.chdir(job_name) # important! we are now in the directory to save data
if os.path.isdir('iterations') == False: os.mkdir('iterations')
if os.path.isdir('2dpointcloud') == False: os.mkdir('2dpointcloud')
if os.path.isdir('pointcloudnpz') == False: os.mkdir('pointcloudnpz')
if os.path.isdir('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) == False: os.makedirs('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf))
if os.path.isdir('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) == False: os.makedirs('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf))
if os.path.isdir('voxel') == False: os.mkdir('voxel')
if os.path.isdir('logs') == False and agent.save_logs == True: os.mkdir('logs')
best_policy = copy.deepcopy(agent.policy)
best_perf = -1e8
train_curve = best_perf*np.ones(niter)
mean_pol_perf = 0.0
e = GymEnv(agent.env.env_id, env_kwargs)
# Load from any existing checkpoint, policy, statistics, etc.
# Why no checkpointing.. :(
i_start = _load_latest_policy_and_logs(agent,
policy_dir='iterations',
logs_dir='logs')
if i_start:
print("Resuming from an existing job folder ...")
for i in range(i_start, niter):
print("......................................................................................")
print("ITERATION : %i " % i)
is_best_policy = False
if train_curve[i-1] > best_perf:
if exptools: exptools.logging.logger.log_text("update best_policy")
best_policy = copy.deepcopy(agent.policy)
best_perf = train_curve[i-1]
is_best_policy = True
N = num_traj if sample_mode == 'trajectories' else num_samples
stats = agent.train_step(
N=N,
sample_mode=sample_mode,
horizon= horizon,
gamma=gamma,
gae_lambda=gae_lambda,
num_cpu=num_cpu,
env_kwargs= env_kwargs,
sample_paths_kwargs= sample_paths_kwargs,
)
train_curve[i] = stats[0]
if evaluation_rollouts is not None and evaluation_rollouts > 0:
print("Performing evaluation rollouts ........")
eval_paths = sample_paths(
num_traj=evaluation_rollouts,
env=e.env_id,
policy=agent.policy,
eval_mode=True,
base_seed=seed,
num_cpu=num_cpu,
env_kwargs= env_kwargs,
**sample_paths_kwargs)
mean_pol_perf = np.mean([np.sum(path['rewards']) for path in eval_paths])
if agent.save_logs:
agent.logger.log_kv('eval_score', mean_pol_perf)
if exptools: exptools.logging.logger.log_scalar('eval_score', mean_pol_perf, i)
if exptools:
env_infos = [path["env_infos"] for path in eval_paths] # a list of dict
rewards = dict()
total_points = list()
if env_infos:
# get decomposed reward statistics
keys = [k for k in env_infos[0].keys() if "_p" in k[-2:] or "_r" in k[-2:] or "occupancy" in k]
for k in keys:
rewards[k] = list()
for env_info in env_infos:
rewards[k].append(env_info[k])
for env_info in env_infos:
total_points.append(len(env_info["pointcloud"]))
for k, v in rewards.items():
exptools.logging.logger.log_scalar_batch(k, v, i)
exptools.logging.logger.log_scalar_batch("total_num_points", total_points, i)
print(">>> finish evaluation rollouts")
if (i % save_freq == 0 and i > 0):
if agent.save_logs:
agent.logger.save_log('logs/')
make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')
obj_orientation = env_kwargs["obj_orientation"]
obj_relative_position = env_kwargs["obj_relative_position"]
obj_scale = env_kwargs["obj_scale"]
policy_file = 'policy_%i.pickle' % i
baseline_file = 'baseline_%i.pickle' % i
pickle.dump(agent.policy, open('iterations/' + policy_file, 'wb'))
pickle.dump(agent.baseline, open('iterations/' + baseline_file, 'wb'))
pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))
pickle.dump(agent.global_status, open('iterations/global_status.pickle', 'wb'))
# save videos and pointcloud and reconstruted mesh
if exptools:
video, env_infos = e.visualize_policy_offscreen(
policy= agent.policy,
**visualize_kwargs,
) # (T, C, H, W)
video_explore, env_infos_explore = e.visualize_policy_explore(
policy= agent.policy,
**visualize_kwargs,
) # (T, C, H, W)
pc_frame = np.array(env_infos[-1]["pointcloud"] if len(env_infos[-1]["pointcloud"]) > 0 else np.empty((0, 3)))
# 3d voxel visualization
is_best_reconstruct, occupancy = save_voxel_visualization(obj_name, reset_mode_conf, reward_conf, obj_orientation, obj_relative_position, obj_scale, pc_frame, i, is_best_policy)
if is_best_policy or is_best_reconstruct:
pickle.dump(best_policy, open('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}/bp{}_br{}_best_policy.pickle'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct), 'wb'))
if is_best_policy or is_best_reconstruct:
np.savez_compressed("pointcloudnpz/alpha_pointcloud_"+str(i)+".npz",pcd=pc_frame)
np.savez_compressed("/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_alpha_pointcloud_overlap-{}.npz".format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy), pcd=pc_frame)
# else:
# np.savez_compressed("pointcloudnpz/pointcloud_"+str(i)+".npz",pcd=pc_frame)
# pc_frames.append(pc_frame)
ax = plt.axes()
ax.scatter(pc_frame[:, 0], pc_frame[:, 1], cmap='viridis', linewidth=0.5)
if is_best_policy or is_best_reconstruct:
plt.savefig("2dpointcloud/alpha_{}.png".format('2dpointcloud' + str(i)))
plt.savefig("/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_alpha_2dpointcloud_overlap-{}.png".format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy))
# else:
# plt.savefig("2dpointcloud/{}.png".format('2dpointcloud' + str(i)))
plt.close()
# =======================================================
# if obj_name in ["airplane", "apple", "glass", "cup"]:
exptools.logging.logger.record_image("rendered", video[-1], i)
exptools.logging.logger.record_gif("rendered", video, i)
# exptools.logging.logger.record_image("rendered_explore", video_explore[-1], i)
# exptools.logging.logger.record_gif("rendered_explore", video_explore, i)
# print results to console
if i == 0:
result_file = open('results.txt', 'w')
print("Iter | Stoc Pol | Mean Pol | Best (Stoc) \n")
result_file.write("Iter | Sampling Pol | Evaluation Pol | Best (Sampled) \n")
result_file.close()
result_file = open('results.txt', 'a')
result_file.write("%4i %5.2f %5.2f %5.2f \n" % (i, train_curve[i], mean_pol_perf, best_perf))
result_file.close()
if agent.save_logs:
print_data = sorted(filter(lambda v: np.asarray(v[1]).size == 1,
agent.logger.get_current_log().items()))
print(tabulate(print_data))
if exptools:
exptools.logging.logger.log_scalar("Iter", i, i)
exptools.logging.logger.log_scalar("SamplingPol", train_curve[i], i)
exptools.logging.logger.log_scalar("EvaluationPol", mean_pol_perf, i)
exptools.logging.logger.log_scalar("BestSampled", best_perf, i)
exptools.logging.logger.dump_data()
# final save
pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb'))
if agent.save_logs:
agent.logger.save_log('logs/')
make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/')
os.chdir(previous_dir)
| [((2, 0, 2, 33), 'logging.disable', 'logging.disable', ({(2, 16, 2, 32): 'logging.CRITICAL'}, {}), '(logging.CRITICAL)', False, 'import logging\n'), ((28, 11, 28, 36), 'os.path.isdir', 'os.path.isdir', ({(28, 25, 28, 35): 'policy_dir'}, {}), '(policy_dir)', False, 'import os\n'), ((29, 11, 29, 34), 'os.path.isdir', 'os.path.isdir', ({(29, 25, 29, 33): 'logs_dir'}, {}), '(logs_dir)', False, 'import os\n'), ((31, 19, 31, 52), 'os.path.join', 'os.path.join', ({(31, 32, 31, 40): 'logs_dir', (31, 42, 31, 51): '"""log.csv"""'}, {}), "(logs_dir, 'log.csv')", False, 'import os\n'), ((108, 12, 108, 39), 'math.ceil', 'math.ceil', ({(108, 22, 108, 38): '0.3 / resolution'}, {}), '(0.3 / resolution)', False, 'import math\n'), ((109, 12, 109, 39), 'math.ceil', 'math.ceil', ({(109, 22, 109, 38): '0.3 / resolution'}, {}), '(0.3 / resolution)', False, 'import math\n'), ((110, 12, 110, 39), 'math.ceil', 'math.ceil', ({(110, 22, 110, 38): '0.3 / resolution'}, {}), '(0.3 / resolution)', False, 'import math\n'), ((111, 14, 111, 47), 'numpy.indices', 'np.indices', ({(111, 25, 111, 46): '(sep_x, sep_y, sep_z)'}, {}), '((sep_x, sep_y, sep_z))', True, 'import numpy as np\n'), ((163, 13, 163, 52), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((175, 4, 175, 15), 'matplotlib.pyplot.close', 'plt.close', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((183, 4, 183, 15), 'matplotlib.pyplot.close', 'plt.close', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((191, 4, 191, 15), 'matplotlib.pyplot.close', 'plt.close', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((212, 4, 212, 24), 'numpy.random.seed', 'np.random.seed', ({(212, 19, 212, 23): 'seed'}, {}), '(seed)', True, 'import numpy as np\n'), ((215, 19, 215, 30), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((219, 4, 219, 22), 'os.chdir', 'os.chdir', ({(219, 13, 219, 21): 'job_name'}, {}), '(job_name)', False, 'import os\n'), ((227, 18, 227, 45), 'copy.deepcopy', 'copy.deepcopy', ({(227, 32, 227, 44): 'agent.policy'}, {}), '(agent.policy)', False, 'import copy\n'), ((231, 8, 231, 44), 'mjrl.utils.gym_env.GymEnv', 'GymEnv', ({(231, 15, 231, 31): 'agent.env.env_id', (231, 33, 231, 43): 'env_kwargs'}, {}), '(agent.env.env_id, env_kwargs)', False, 'from mjrl.utils.gym_env import GymEnv\n'), ((375, 4, 375, 26), 'os.chdir', 'os.chdir', ({(375, 13, 375, 25): 'previous_dir'}, {}), '(previous_dir)', False, 'import os\n'), ((32, 11, 32, 39), 'os.path.exists', 'os.path.exists', ({(32, 26, 32, 38): 'log_csv_path'}, {}), '(log_csv_path)', False, 'import os\n'), ((121, 16, 121, 56), 'math.floor', 'math.floor', ({(121, 27, 121, 55): '(val[0] + 0.15) / resolution'}, {}), '((val[0] + 0.15) / resolution)', False, 'import math\n'), ((122, 16, 122, 56), 'math.floor', 'math.floor', ({(122, 27, 122, 55): '(val[1] + 0.15) / resolution'}, {}), '((val[1] + 0.15) / resolution)', False, 'import math\n'), ((123, 16, 123, 49), 'math.floor', 'math.floor', ({(123, 27, 123, 48): 'val[2] / resolution'}, {}), '(val[2] / resolution)', False, 'import math\n'), ((136, 16, 136, 56), 'math.floor', 'math.floor', ({(136, 27, 136, 55): '(val[0] + 0.15) / resolution'}, {}), '((val[0] + 0.15) / resolution)', False, 'import math\n'), ((137, 16, 137, 56), 'math.floor', 'math.floor', ({(137, 27, 137, 55): '(val[1] + 0.15) / resolution'}, {}), '((val[1] + 0.15) / resolution)', False, 'import math\n'), ((138, 16, 138, 49), 'math.floor', 'math.floor', ({(138, 27, 138, 48): 'val[2] / resolution'}, {}), '(val[2] / resolution)', False, 'import math\n'), ((213, 7, 213, 30), 'os.path.isdir', 'os.path.isdir', ({(213, 21, 213, 29): 'job_name'}, {}), '(job_name)', False, 'import os\n'), ((214, 8, 214, 26), 'os.mkdir', 'os.mkdir', ({(214, 17, 214, 25): 'job_name'}, {}), '(job_name)', False, 'import os\n'), ((220, 7, 220, 34), 'os.path.isdir', 'os.path.isdir', ({(220, 21, 220, 33): '"""iterations"""'}, {}), "('iterations')", False, 'import os\n'), ((220, 45, 220, 67), 'os.mkdir', 'os.mkdir', ({(220, 54, 220, 66): '"""iterations"""'}, {}), "('iterations')", False, 'import os\n'), ((221, 7, 221, 36), 'os.path.isdir', 'os.path.isdir', ({(221, 21, 221, 35): '"""2dpointcloud"""'}, {}), "('2dpointcloud')", False, 'import os\n'), ((221, 47, 221, 71), 'os.mkdir', 'os.mkdir', ({(221, 56, 221, 70): '"""2dpointcloud"""'}, {}), "('2dpointcloud')", False, 'import os\n'), ((222, 7, 222, 37), 'os.path.isdir', 'os.path.isdir', ({(222, 21, 222, 36): '"""pointcloudnpz"""'}, {}), "('pointcloudnpz')", False, 'import os\n'), ((222, 48, 222, 73), 'os.mkdir', 'os.mkdir', ({(222, 57, 222, 72): '"""pointcloudnpz"""'}, {}), "('pointcloudnpz')", False, 'import os\n'), ((225, 7, 225, 29), 'os.path.isdir', 'os.path.isdir', ({(225, 21, 225, 28): '"""voxel"""'}, {}), "('voxel')", False, 'import os\n'), ((225, 40, 225, 57), 'os.mkdir', 'os.mkdir', ({(225, 49, 225, 56): '"""voxel"""'}, {}), "('voxel')", False, 'import os\n'), ((226, 67, 226, 83), 'os.mkdir', 'os.mkdir', ({(226, 76, 226, 82): '"""logs"""'}, {}), "('logs')", False, 'import os\n'), ((229, 28, 229, 42), 'numpy.ones', 'np.ones', ({(229, 36, 229, 41): 'niter'}, {}), '(niter)', True, 'import numpy as np\n'), ((374, 8, 374, 80), 'mjrl.utils.make_train_plots.make_train_plots', 'make_train_plots', (), '', False, 'from mjrl.utils.make_train_plots import make_train_plots\n'), ((48, 15, 48, 42), 'os.path.isfile', 'os.path.isfile', ({(48, 30, 48, 41): 'policy_path'}, {}), '(policy_path)', False, 'import os\n'), ((55, 27, 55, 42), 'pickle.load', 'pickle.load', ({(55, 39, 55, 41): 'fp'}, {}), '(fp)', False, 'import pickle\n'), ((57, 29, 57, 44), 'pickle.load', 'pickle.load', ({(57, 41, 57, 43): 'fp'}, {}), '(fp)', False, 'import pickle\n'), ((81, 26, 81, 43), 'math.cos', 'math.cos', ({(81, 35, 81, 42): 'x_theta'}, {}), '(x_theta)', False, 'import math\n'), ((81, 48, 81, 65), 'math.sin', 'math.sin', ({(81, 57, 81, 64): 'x_theta'}, {}), '(x_theta)', False, 'import math\n'), ((82, 26, 82, 43), 'math.sin', 'math.sin', ({(82, 35, 82, 42): 'x_theta'}, {}), '(x_theta)', False, 'import math\n'), ((82, 48, 82, 65), 'math.cos', 'math.cos', ({(82, 57, 82, 64): 'x_theta'}, {}), '(x_theta)', False, 'import math\n'), ((88, 28, 88, 45), 'math.cos', 'math.cos', ({(88, 37, 88, 44): 'y_theta'}, {}), '(y_theta)', False, 'import math\n'), ((88, 52, 88, 69), 'math.sin', 'math.sin', ({(88, 61, 88, 68): 'y_theta'}, {}), '(y_theta)', False, 'import math\n'), ((90, 28, 90, 45), 'math.cos', 'math.cos', ({(90, 37, 90, 44): 'y_theta'}, {}), '(y_theta)', False, 'import math\n'), ((90, 52, 90, 69), 'math.sin', 'math.sin', ({(90, 61, 90, 68): 'y_theta'}, {}), '(y_theta)', False, 'import math\n'), ((96, 28, 96, 45), 'math.cos', 'math.cos', ({(96, 37, 96, 44): 'z_theta'}, {}), '(z_theta)', False, 'import math\n'), ((96, 52, 96, 69), 'math.sin', 'math.sin', ({(96, 61, 96, 68): 'z_theta'}, {}), '(z_theta)', False, 'import math\n'), ((97, 28, 97, 45), 'math.sin', 'math.sin', ({(97, 37, 97, 44): 'z_theta'}, {}), '(z_theta)', False, 'import math\n'), ((97, 52, 97, 69), 'math.cos', 'math.cos', ({(97, 61, 97, 68): 'z_theta'}, {}), '(z_theta)', False, 'import math\n'), ((167, 9, 167, 21), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((177, 9, 177, 21), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((185, 9, 185, 21), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((226, 7, 226, 28), 'os.path.isdir', 'os.path.isdir', ({(226, 21, 226, 27): '"""logs"""'}, {}), "('logs')", False, 'import os\n'), ((247, 26, 247, 53), 'copy.deepcopy', 'copy.deepcopy', ({(247, 40, 247, 52): 'agent.policy'}, {}), '(agent.policy)', False, 'import copy\n'), ((266, 25, 274, 38), 'mjrl.samplers.core.sample_paths', 'sample_paths', (), '', False, 'from mjrl.samplers.core import sample_paths\n'), ((364, 12, 364, 60), 'exptools.logging.logger.log_scalar', 'exptools.logging.logger.log_scalar', ({(364, 47, 364, 53): '"""Iter"""', (364, 55, 364, 56): 'i', (364, 58, 364, 59): 'i'}, {}), "('Iter', i, i)", False, 'import exptools\n'), ((365, 12, 365, 80), 'exptools.logging.logger.log_scalar', 'exptools.logging.logger.log_scalar', ({(365, 47, 365, 60): '"""SamplingPol"""', (365, 62, 365, 76): 'train_curve[i]', (365, 78, 365, 79): 'i'}, {}), "('SamplingPol', train_curve[i], i)", False, 'import exptools\n'), ((366, 12, 366, 81), 'exptools.logging.logger.log_scalar', 'exptools.logging.logger.log_scalar', ({(366, 47, 366, 62): '"""EvaluationPol"""', (366, 64, 366, 77): 'mean_pol_perf', (366, 79, 366, 80): 'i'}, {}), "('EvaluationPol', mean_pol_perf, i)", False, 'import exptools\n'), ((367, 12, 367, 75), 'exptools.logging.logger.log_scalar', 'exptools.logging.logger.log_scalar', ({(367, 47, 367, 60): '"""BestSampled"""', (367, 62, 367, 71): 'best_perf', (367, 73, 367, 74): 'i'}, {}), "('BestSampled', best_perf, i)", False, 'import exptools\n'), ((368, 12, 368, 47), 'exptools.logging.logger.dump_data', 'exptools.logging.logger.dump_data', ({}, {}), '()', False, 'import exptools\n'), ((246, 25, 246, 79), 'exptools.logging.logger.log_text', 'exptools.logging.logger.log_text', ({(246, 58, 246, 78): '"""update best_policy"""'}, {}), "('update best_policy')", False, 'import exptools\n'), ((294, 16, 294, 93), 'exptools.logging.logger.log_scalar_batch', 'exptools.logging.logger.log_scalar_batch', ({(294, 57, 294, 75): '"""total_num_points"""', (294, 77, 294, 89): 'total_points', (294, 91, 294, 92): 'i'}, {}), "('total_num_points', total_points, i)", False, 'import exptools\n'), ((300, 16, 300, 88), 'mjrl.utils.make_train_plots.make_train_plots', 'make_train_plots', (), '', False, 'from mjrl.utils.make_train_plots import make_train_plots\n'), ((335, 21, 335, 31), 'matplotlib.pyplot.axes', 'plt.axes', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((342, 16, 342, 27), 'matplotlib.pyplot.close', 'plt.close', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((345, 16, 345, 78), 'exptools.logging.logger.record_image', 'exptools.logging.logger.record_image', ({(345, 53, 345, 63): '"""rendered"""', (345, 65, 345, 74): 'video[-1]', (345, 76, 345, 77): 'i'}, {}), "('rendered', video[-1], i)", False, 'import exptools\n'), ((346, 16, 346, 72), 'exptools.logging.logger.record_gif', 'exptools.logging.logger.record_gif', ({(346, 51, 346, 61): '"""rendered"""', (346, 63, 346, 68): 'video', (346, 70, 346, 71): 'i'}, {}), "('rendered', video, i)", False, 'import exptools\n'), ((362, 18, 362, 38), 'tabulate.tabulate', 'tabulate', ({(362, 27, 362, 37): 'print_data'}, {}), '(print_data)', False, 'from tabulate import tabulate\n'), ((275, 37, 275, 60), 'numpy.sum', 'np.sum', ({(275, 44, 275, 59): "path['rewards']"}, {}), "(path['rewards'])", True, 'import numpy as np\n'), ((278, 29, 278, 95), 'exptools.logging.logger.log_scalar', 'exptools.logging.logger.log_scalar', ({(278, 64, 278, 76): '"""eval_score"""', (278, 78, 278, 91): 'mean_pol_perf', (278, 93, 278, 94): 'i'}, {}), "('eval_score', mean_pol_perf, i)", False, 'import exptools\n'), ((293, 20, 293, 69), 'exptools.logging.logger.log_scalar_batch', 'exptools.logging.logger.log_scalar_batch', ({(293, 61, 293, 62): 'k', (293, 64, 293, 65): 'v', (293, 67, 293, 68): 'i'}, {}), '(k, v, i)', False, 'import exptools\n'), ((322, 109, 322, 125), 'numpy.empty', 'np.empty', ({(322, 118, 322, 124): '(0, 3)'}, {}), '((0, 3))', True, 'import numpy as np\n'), ((360, 49, 360, 65), 'numpy.asarray', 'np.asarray', ({(360, 60, 360, 64): 'v[1]'}, {}), '(v[1])', True, 'import numpy as np\n')] |
NicsTr/pretix | src/tests/plugins/banktransfer/test_refund_export.py | e6d2380d9ed1836cc64a688b2be20d00a8500eab | import json
from datetime import timedelta
from decimal import Decimal
import pytest
from django.utils.timezone import now
from pretix.base.models import Event, Order, OrderRefund, Organizer, Team, User
from pretix.plugins.banktransfer.models import RefundExport
from pretix.plugins.banktransfer.views import (
_row_key_func, _unite_transaction_rows,
)
@pytest.fixture
def env():
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), plugins='pretix.plugins.banktransfer,pretix.plugins.paypal'
)
user = User.objects.create_user('[email protected]', 'dummy')
t = Team.objects.create(organizer=event.organizer, can_view_orders=True, can_change_orders=True)
t.members.add(user)
t.limit_events.add(event)
order = Order.objects.create(
code='1Z3AS', event=event, email='admin@localhost',
status=Order.STATUS_PAID,
datetime=now(), expires=now() + timedelta(days=10),
total=23
)
refund = OrderRefund.objects.create(
order=order,
amount=Decimal("23"),
provider='banktransfer',
state=OrderRefund.REFUND_STATE_CREATED,
info=json.dumps({
'payer': "Abc Def",
'iban': "DE27520521540534534466",
'bic': "HELADEF1MEG",
})
)
return event, user, refund
url_prefixes = [
"/control/event/dummy/dummy/",
"/control/organizer/dummy/"
]
@pytest.mark.django_db
@pytest.mark.parametrize("url_prefix", url_prefixes)
def test_export_refunds_as_sepa_xml(client, env, url_prefix):
client.login(email='[email protected]', password='dummy')
r = client.post(f'{url_prefix}banktransfer/refunds/', {"unite_transactions": True}, follow=True)
assert b"SEPA" in r.content
r = client.get(f'{url_prefix}banktransfer/sepa-export/{RefundExport.objects.last().id}/')
assert r.status_code == 200
r = client.post(f'{url_prefix}banktransfer/sepa-export/{RefundExport.objects.last().id}/', {
"account_holder": "Fission Festival",
"iban": "DE71720690050653667120",
"bic": "GENODEF1AIL",
})
assert "DE27520521540534534466" in "".join(str(part) for part in r.streaming_content)
@pytest.mark.django_db
@pytest.mark.parametrize("url_prefix", url_prefixes)
def test_export_refunds(client, env, url_prefix):
client.login(email='[email protected]', password='dummy')
r = client.get(f'{url_prefix}banktransfer/refunds/')
assert r.status_code == 200
r = client.post(f'{url_prefix}banktransfer/refunds/', {"unite_transactions": True}, follow=True)
assert r.status_code == 200
refund = RefundExport.objects.last()
assert refund is not None
assert b"Download CSV" in r.content
r = client.get(f'{url_prefix}banktransfer/export/{refund.id}/')
assert r.status_code == 200
assert "DE27520521540534534466" in "".join(str(part) for part in r.streaming_content)
def test_unite_transaction_rows():
rows = sorted([
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("42.23"),
},
{
'payer': "First Last",
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': "PARTY-R-1",
'amount': Decimal("6.50"),
}
], key=_row_key_func)
assert _unite_transaction_rows(rows) == rows
rows = sorted(rows + [
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("7.77"),
},
{
'payer': "Another Last",
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': "PARTY-R-2",
'amount': Decimal("13.50"),
}
], key=_row_key_func)
assert _unite_transaction_rows(rows) == sorted([
{
'payer': "Abc Def",
'iban': 'DE12345678901234567890',
'bic': 'HARKE9000',
'id': "ROLLA-R-1",
'amount': Decimal("50.00"),
},
{
'payer': 'Another Last, First Last',
'iban': 'DE111111111111111111111',
'bic': 'ikswez2020',
'id': 'PARTY-R-1, PARTY-R-2',
'amount': Decimal('20.00'),
}], key=_row_key_func)
| [((53, 1, 53, 52), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(53, 25, 53, 37): '"""url_prefix"""', (53, 39, 53, 51): 'url_prefixes'}, {}), "('url_prefix', url_prefixes)", False, 'import pytest\n'), ((69, 1, 69, 52), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(69, 25, 69, 37): '"""url_prefix"""', (69, 39, 69, 51): 'url_prefixes'}, {}), "('url_prefix', url_prefixes)", False, 'import pytest\n'), ((17, 8, 17, 60), 'pretix.base.models.Organizer.objects.create', 'Organizer.objects.create', (), '', False, 'from pretix.base.models import Event, Order, OrderRefund, Organizer, Team, User\n'), ((22, 11, 22, 65), 'pretix.base.models.User.objects.create_user', 'User.objects.create_user', ({(22, 36, 22, 55): '"""[email protected]"""', (22, 57, 22, 64): '"""dummy"""'}, {}), "('[email protected]', 'dummy')", False, 'from pretix.base.models import Event, Order, OrderRefund, Organizer, Team, User\n'), ((23, 8, 23, 100), 'pretix.base.models.Team.objects.create', 'Team.objects.create', (), '', False, 'from pretix.base.models import Event, Order, OrderRefund, Organizer, Team, User\n'), ((76, 13, 76, 40), 'pretix.plugins.banktransfer.models.RefundExport.objects.last', 'RefundExport.objects.last', ({}, {}), '()', False, 'from pretix.plugins.banktransfer.models import RefundExport\n'), ((102, 11, 102, 40), 'pretix.plugins.banktransfer.views._unite_transaction_rows', '_unite_transaction_rows', ({(102, 35, 102, 39): 'rows'}, {}), '(rows)', False, 'from pretix.plugins.banktransfer.views import _row_key_func, _unite_transaction_rows\n'), ((121, 11, 121, 40), 'pretix.plugins.banktransfer.views._unite_transaction_rows', '_unite_transaction_rows', ({(121, 35, 121, 39): 'rows'}, {}), '(rows)', False, 'from pretix.plugins.banktransfer.views import _row_key_func, _unite_transaction_rows\n'), ((20, 18, 20, 23), 'django.utils.timezone.now', 'now', ({}, {}), '()', False, 'from django.utils.timezone import now\n'), ((29, 17, 29, 22), 'django.utils.timezone.now', 'now', ({}, {}), '()', False, 'from django.utils.timezone import now\n'), ((34, 15, 34, 28), 'decimal.Decimal', 'Decimal', ({(34, 23, 34, 27): '"""23"""'}, {}), "('23')", False, 'from decimal import Decimal\n'), ((37, 13, 41, 10), 'json.dumps', 'json.dumps', ({(37, 24, 41, 9): "{'payer': 'Abc Def', 'iban': 'DE27520521540534534466', 'bic': 'HELADEF1MEG'}"}, {}), "({'payer': 'Abc Def', 'iban': 'DE27520521540534534466', 'bic':\n 'HELADEF1MEG'})", False, 'import json\n'), ((29, 32, 29, 37), 'django.utils.timezone.now', 'now', ({}, {}), '()', False, 'from django.utils.timezone import now\n'), ((29, 40, 29, 58), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((91, 22, 91, 38), 'decimal.Decimal', 'Decimal', ({(91, 30, 91, 37): '"""42.23"""'}, {}), "('42.23')", False, 'from decimal import Decimal\n'), ((98, 22, 98, 37), 'decimal.Decimal', 'Decimal', ({(98, 30, 98, 36): '"""6.50"""'}, {}), "('6.50')", False, 'from decimal import Decimal\n'), ((58, 59, 58, 86), 'pretix.plugins.banktransfer.models.RefundExport.objects.last', 'RefundExport.objects.last', ({}, {}), '()', False, 'from pretix.plugins.banktransfer.models import RefundExport\n'), ((60, 60, 60, 87), 'pretix.plugins.banktransfer.models.RefundExport.objects.last', 'RefundExport.objects.last', ({}, {}), '()', False, 'from pretix.plugins.banktransfer.models import RefundExport\n'), ((110, 22, 110, 37), 'decimal.Decimal', 'Decimal', ({(110, 30, 110, 36): '"""7.77"""'}, {}), "('7.77')", False, 'from decimal import Decimal\n'), ((117, 22, 117, 38), 'decimal.Decimal', 'Decimal', ({(117, 30, 117, 37): '"""13.50"""'}, {}), "('13.50')", False, 'from decimal import Decimal\n'), ((127, 22, 127, 38), 'decimal.Decimal', 'Decimal', ({(127, 30, 127, 37): '"""50.00"""'}, {}), "('50.00')", False, 'from decimal import Decimal\n'), ((134, 22, 134, 38), 'decimal.Decimal', 'Decimal', ({(134, 30, 134, 37): '"""20.00"""'}, {}), "('20.00')", False, 'from decimal import Decimal\n')] |
ICT4H/dcs-web | datawinners/alldata/urls.py | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.conf.urls.defaults import patterns, url
from datawinners.alldata.views import get_entity_list_by_type
from datawinners.alldata.views import smart_phone_instruction
from datawinners.alldata.views import index, reports
from datawinners.alldata.views import failed_submissions
urlpatterns = patterns('',
url(r'^alldata/$', index, name="alldata_index"),
url(r'^project/$', index),
(r'^questionnaire/entities/(?P<entity_type>.+?)/$', get_entity_list_by_type),
(r'^questionnaire/reports/$', reports),
(r'^alldata/reports/$', reports),
(r'^allfailedsubmissions/$', failed_submissions),
url(r'^smartphoneinstruction$', smart_phone_instruction, name="smart_phone_instruction"),
url(r'^smartphoneinstruction/(?P<project_id>.+?)/$', smart_phone_instruction, name="smart_phone_instruction"),
)
| [((9, 4, 9, 51), 'django.conf.urls.defaults.url', 'url', (), '', False, 'from django.conf.urls.defaults import patterns, url\n'), ((10, 4, 10, 29), 'django.conf.urls.defaults.url', 'url', ({(10, 8, 10, 21): '"""^project/$"""', (10, 23, 10, 28): 'index'}, {}), "('^project/$', index)", False, 'from django.conf.urls.defaults import patterns, url\n'), ((15, 4, 15, 92), 'django.conf.urls.defaults.url', 'url', (), '', False, 'from django.conf.urls.defaults import patterns, url\n'), ((16, 4, 16, 113), 'django.conf.urls.defaults.url', 'url', (), '', False, 'from django.conf.urls.defaults import patterns, url\n')] |
NewLife1324/NewLifeUtils-Dev | NewLifeUtils/LoggerModule.py | d955ad801da879d2888506853b0d0141c15dfafc | from NewLifeUtils.ColorModule import ACC, MCC
from NewLifeUtils.UtilsModule import hex_to_rgb
from NewLifeUtils.FileModule import DataStorage, LogFile
from NewLifeUtils.StringUtilModule import remove_csi
from datetime import datetime
import sys
class Formatter(dict):
def __init__(self, *args, date_format="%d-%m-%Y", time_format="%H:%M:%S", **kwargs):
self.date_format = "%d-%m-%Y"
self.time_format = "%H:%M:%S"
dict.__init__(self, *args, **kwargs)
def __missing__(self, key):
if key == "time":
return datetime.now().strftime(self.time_format)
elif key == "date":
return datetime.now().strftime(self.date_format)
elif key.startswith("#"):
if key == "#reset":
return ACC.RESET
elif key == "#under":
return ACC.UNDERLINE
elif key == "#nounder":
return ACC.NO_UNDERLINE
elif key == "#reverse":
return ACC.REVERSE
elif key == "#noreverse":
return ACC.NO_REVERSE
else:
return ACC.customrgb(*hex_to_rgb(key))
else:
return "{" + key + "}"
def create_logger(
pattern="[{time}] {tag}: {message}",
tag_length=7,
default_tag="Log",
reader=False,
reader_bg="#24416b",
reader_fg="#a0dbf2",
file_log=False,
logfile=None,
time_format = "%d-%m-%Y",
data_format = "%H:%M:%S",
):
def log(message, tag=""):
if reader:
if not any([message.endswith(i) for i in tuple(":> ")]):
title = message + ": "
else:
title = message
message = message.rstrip(" ")
message = message.rstrip(":")
message = message.rstrip(">")
sys.stdout.write(
f"{ACC.bcustomrgb(*hex_to_rgb(reader_bg))}{ACC.customrgb(*hex_to_rgb(reader_fg))}{title}{MCC.ERASE_NXT_LINE}"
)
readed = input()
sys.stdout.write(ACC.RESET + MCC.up() + MCC.ERASE_ALL_LINE)
else:
readed = None
tag = ("{:<" + str(tag_length) + "}").format(tag if tag else default_tag)
log_record = pattern.format_map(
Formatter(tag=tag, message=message, input=readed,
time_format = time_format,
data_format = data_format)
)
sys.stdout.write(ACC.RESET + log_record + ACC.RESET + "\n")
if file_log:
logfile.write(remove_csi(log_record) + "\n")
return readed
return log
def cstm(pattern, **kwargs):
sys.stdout.write(
ACC.RESET + pattern.format_map(Formatter(**kwargs)) + ACC.RESET + "\n"
)
def smart_format(pattern, **kwargs):
return pattern.format_map(Formatter(**kwargs))
def init_from_cfg():
default_config = {
"log_pattern": "{#81f059}[{time}] {#6bd130}{tag}{#fff}: {#1ed476}{message}",
"wrn_pattern": "{#cfa529}[{time}] {#d7e356}{tag}{#fff}: {#b9c726}{message}",
"err_pattern": "{#cf4729}[{time}] {#d93b18}{tag}{#fff}: {#cf2727}{message}",
"tip_pattern": "{#9c1fd1}[{time}] {#471dc4}{tag}{#fff}: {#219ddb}{message}",
"rea_pattern": "{#2141a3}[{time}] {#5a51db}{tag}{#fff}: {#2459d6}{message} {#fff}: {#24d0d6}{input}",
"log_tag": "Log",
"wrn_tag": "Warn",
"err_tag": "Error",
"tip_tag": "Tip",
"rea_tag": "Reader",
"date_format": "%d-%m-%Y",
"time_format": "%H:%M:%S",
"tag_length": 7,
"file_log": True,
"logtime": "%d-%m-%Y-%H",
"logname": "log-{time}",
}
config = DataStorage("config.yml", "logger", default_config)
if config["file_log"]:
now = datetime.now()
logname = config["logname"]
logtime = config["logtime"]
logfile = LogFile(f"{logname.format(time=now.strftime(logtime))}.log", "logs")
else:
logfile = None
log = create_logger(pattern=config["log_pattern"], default_tag=config["log_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
wrn = create_logger(pattern=config["wrn_pattern"], default_tag=config["wrn_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
err = create_logger(pattern=config["err_pattern"], default_tag=config["err_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
tip = create_logger(pattern=config["tip_pattern"], default_tag=config["tip_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length'])
rea = create_logger(
pattern=config["rea_pattern"], default_tag=config["rea_tag"], reader=True
)
return log, wrn, err, tip, rea
log, wrn, err, tip, rea = init_from_cfg() | [((105, 13, 105, 64), 'NewLifeUtils.FileModule.DataStorage', 'DataStorage', ({(105, 25, 105, 37): '"""config.yml"""', (105, 39, 105, 47): '"""logger"""', (105, 49, 105, 63): 'default_config'}, {}), "('config.yml', 'logger', default_config)", False, 'from NewLifeUtils.FileModule import DataStorage, LogFile\n'), ((71, 8, 71, 67), 'sys.stdout.write', 'sys.stdout.write', ({(71, 25, 71, 66): "(ACC.RESET + log_record + ACC.RESET + '\\n')"}, {}), "(ACC.RESET + log_record + ACC.RESET + '\\n')", False, 'import sys\n'), ((107, 14, 107, 28), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((17, 19, 17, 33), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((73, 26, 73, 48), 'NewLifeUtils.StringUtilModule.remove_csi', 'remove_csi', ({(73, 37, 73, 47): 'log_record'}, {}), '(log_record)', False, 'from NewLifeUtils.StringUtilModule import remove_csi\n'), ((19, 19, 19, 33), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((62, 41, 62, 49), 'NewLifeUtils.ColorModule.MCC.up', 'MCC.up', ({}, {}), '()', False, 'from NewLifeUtils.ColorModule import ACC, MCC\n'), ((59, 35, 59, 56), 'NewLifeUtils.UtilsModule.hex_to_rgb', 'hex_to_rgb', ({(59, 46, 59, 55): 'reader_bg'}, {}), '(reader_bg)', False, 'from NewLifeUtils.UtilsModule import hex_to_rgb\n'), ((59, 74, 59, 95), 'NewLifeUtils.UtilsModule.hex_to_rgb', 'hex_to_rgb', ({(59, 85, 59, 94): 'reader_fg'}, {}), '(reader_fg)', False, 'from NewLifeUtils.UtilsModule import hex_to_rgb\n'), ((32, 38, 32, 53), 'NewLifeUtils.UtilsModule.hex_to_rgb', 'hex_to_rgb', ({(32, 49, 32, 52): 'key'}, {}), '(key)', False, 'from NewLifeUtils.UtilsModule import hex_to_rgb\n')] |
elcolie/battleship | config/api_urls.py | 71b0a963c5b24ae243a193749813fec321d5f4d8 | from rest_framework import routers
from boards.api.viewsets import BoardViewSet
from fleets.api.viewsets import FleetViewSet
from missiles.api.viewsets import MissileViewSet
app_name = 'api'
router = routers.DefaultRouter()
router.register(r'boards', BoardViewSet, base_name='board')
router.register(r'fleets', FleetViewSet, base_name='fleet')
router.register(r'missiles', MissileViewSet, base_name='missile')
urlpatterns = router.urls
| [((8, 9, 8, 32), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ({}, {}), '()', False, 'from rest_framework import routers\n')] |
Alhassan20/mealpy | mealpy/evolutionary_based/MA.py | 7ed365c5c495ad1c1e066662c90159b3d5e9b8e3 | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 14:22, 11/04/2020 %
# %
# Email: [email protected] %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import time
import numpy as np
from mealpy.optimizer import Optimizer
class BaseMA(Optimizer):
"""
The original version of: Memetic Algorithm (MA)
(On evolution, search, optimization, genetic algorithms and martial arts: Towards memetic algorithms)
Link:
Clever Algorithms: Nature-Inspired Programming Recipes - Memetic Algorithm (MA)
http://www.cleveralgorithms.com/nature-inspired/physical/memetic_algorithm.html
"""
ID_POS = 0
ID_FIT = 1
ID_BIT = 2
def __init__(self, problem: dict, epoch=1000, pop_size=100, pc=0.98, pm=0.025, p_local=0.5, max_local_gens=10, bits_per_param=16):
"""
Args:
problem (dict): a dictionary of your problem
epoch (int): maximum number of iterations, default = 1000
pop_size (int): number of population size, default = 100
pc (float): cross-over probability, default = 0.95
pm (float): mutation probability, default = 0.025
p_local ():
max_local_gens ():
bits_per_param ():
"""
super().__init__(problem)
self.epoch = epoch
self.pop_size = pop_size
self.pc = pc
self.pm = pm
self.p_local = p_local
self.max_local_gens = max_local_gens
self.bits_per_param = bits_per_param
self.bits_total = self.problem_size * self.bits_per_param
def create_solution(self):
position = np.random.uniform(self.lb, self.ub)
fitness = self.get_fitness_position(position=position)
bitstring = ''.join(["1" if np.random.uniform() < 0.5 else "0" for _ in range(0, self.bits_total)])
return [position, fitness, bitstring]
def _decode__(self, bitstring=None):
"""
Decode the random bitstring into real number
Args:
bitstring (str): "11000000100101000101010" - bits_per_param = 16, 32 bit for 2 variable. eg. x1 and x2
Returns:
list of real number (vector)
"""
vector = np.ones(self.problem_size)
for idx in range(0, self.problem_size):
param = bitstring[idx * self.bits_per_param: (idx + 1) * self.bits_per_param] # Select 16 bit every time
vector[idx] = self.lb[idx] + ((self.ub[idx] - self.lb[idx]) / ((2.0 ** self.bits_per_param) - 1)) * int(param, 2)
return vector
def _crossover__(self, dad=None, mom=None):
if np.random.uniform() >= self.pc:
temp = [dad].copy()
return temp[0]
else:
child = ""
for idx in range(0, self.bits_total):
if np.random.uniform() < 0.5:
child += dad[idx]
else:
child += mom[idx]
return child
def _point_mutation__(self, bitstring=None):
child = ""
for bit in bitstring:
if np.random.uniform() < self.pc:
child += "0" if bit == "1" else "1"
else:
child += bit
return child
def create_next_generation(self, pop: list):
## Binary tournament
children = [self.get_solution_kway_tournament_selection(pop, k_way=2, output=1)[0] for _ in range(self.pop_size)]
## Reproduction
for idx in range(0, self.pop_size):
ancient = pop[idx + 1] if idx % 2 == 0 else pop[idx - 1]
if idx == self.pop_size - 1:
ancient = pop[0]
bitstring_new = self._crossover__(pop[idx][self.ID_BIT], ancient[self.ID_BIT])
bitstring_new = self._point_mutation__(bitstring_new)
pos_new = self._decode__(bitstring_new)
fit_new = self.get_fitness_position(pos_new)
children[idx] = [pos_new, fit_new, bitstring_new]
return children
def _bits_climber__(self, child=None):
current = child.copy()
for idx in range(0, self.max_local_gens):
child = current.copy()
bitstring_new = self._point_mutation__(child[self.ID_BIT])
pos_new = self._decode__(bitstring_new)
fit_new = self.get_fitness_position(pos_new)
current = self.get_better_solution(child, [pos_new, fit_new, bitstring_new])
return current
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_global_best_solution(pop)
self.history_list_g_best = [g_best]
self.history_list_c_best = self.history_list_g_best.copy()
for epoch in range(0, self.epoch):
time_start = time.time()
# Create next generations
pop = self.create_next_generation(pop)
# Searching in local
for i in range(0, self.pop_size):
if np.random.uniform() < self.p_local:
pop[i] = self._bits_climber__(pop[i])
# Sort the population and update the global best solution
pop = self.update_global_best_solution(pop)
## Additional information for the framework
time_start = time.time() - time_start
self.history_list_epoch_time.append(time_start)
self.print_epoch(epoch + 1, time_start)
self.history_list_pop.append(pop.copy())
## Additional information for the framework
self.solution = self.history_list_g_best[-1]
self.save_data()
return self.solution[self.ID_POS], self.solution[self.ID_FIT][self.ID_TAR]
| [((50, 19, 50, 54), 'numpy.random.uniform', 'np.random.uniform', ({(50, 37, 50, 44): 'self.lb', (50, 46, 50, 53): 'self.ub'}, {}), '(self.lb, self.ub)', True, 'import numpy as np\n'), ((64, 17, 64, 43), 'numpy.ones', 'np.ones', ({(64, 25, 64, 42): 'self.problem_size'}, {}), '(self.problem_size)', True, 'import numpy as np\n'), ((71, 11, 71, 30), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n'), ((125, 25, 125, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((86, 15, 86, 34), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n'), ((139, 25, 139, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((77, 19, 77, 38), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n'), ((132, 19, 132, 38), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n'), ((52, 36, 52, 55), 'numpy.random.uniform', 'np.random.uniform', ({}, {}), '()', True, 'import numpy as np\n')] |
OpenSourceEconomics/estimagic | src/estimagic/estimation/estimate_ml.py | 85163b4cdc601d60d654c6ca1f42b9db17a130a3 | from estimagic.inference.ml_covs import cov_cluster_robust
from estimagic.inference.ml_covs import cov_hessian
from estimagic.inference.ml_covs import cov_jacobian
from estimagic.inference.ml_covs import cov_robust
from estimagic.inference.ml_covs import cov_strata_robust
from estimagic.inference.shared import calculate_inference_quantities
from estimagic.inference.shared import check_is_optimized_and_derivative_case
from estimagic.inference.shared import get_derivative_case
from estimagic.inference.shared import get_internal_first_derivative
from estimagic.inference.shared import transform_covariance
from estimagic.optimization.optimize import maximize
from estimagic.parameters.parameter_conversion import get_derivative_conversion_function
from estimagic.parameters.process_constraints import process_constraints
from estimagic.shared.check_option_dicts import check_numdiff_options
from estimagic.shared.check_option_dicts import check_optimization_options
def estimate_ml(
loglike,
params,
optimize_options,
*,
constraints=None,
logging=False,
log_options=None,
loglike_kwargs=None,
derivative=None,
derivative_kwargs=None,
loglike_and_derivative=None,
loglike_and_derivative_kwargs=None,
numdiff_options=None,
jacobian=None,
jacobian_kwargs=None,
hessian=False,
hessian_kwargs=None,
ci_level=0.95,
n_samples=10_000,
bounds_handling="raise",
design_info=None,
):
"""Do a maximum likelihood (ml) estimation.
This is a high level interface of our lower level functions for maximization,
numerical differentiation and inference. It does the full workflow for maximum
likelihood estimation with just one function call.
While we have good defaults, you can still configure each aspect of each step
via the optional arguments of this function. If you find it easier to do the
"difficult" steps (mainly maximization and calculating numerical derivatives
of a potentially noisy function) separately, you can do so and just provide those
results as ``params``, ``jacobian`` and ``hessian``.
The docstring is aspirational and not all options are supported yet.
Args:
loglike (callable): Likelihood function that takes a params DataFrame (and
potentially other keyword arguments) and returns a dictionary that has at
least the entries "value" (a scalar float) and "contributions" (a 1d numpy
array or pandas Series) with the log likelihood contribution per individual.
params (pd.DataFrame): DataFrame where the "value" column contains the
estimated or start parameters of a likelihood model. See :ref:`params` for
details. If the supplied parameters are estimated parameters, set
optimize_options to False.
optimize_options (dict or False): Keyword arguments that govern the numerical
optimization. Valid entries are all arguments of
:func:`~estimagic.optimization.optimize.minimize` except for criterion,
derivative, criterion_and_derivative and params. If you pass False as
optimize_options you signal that ``params`` are already the optimal
parameters and no numerical optimization is needed.
constraints (list): List with constraint dictionaries.
See .. _link: ../../docs/source/how_to_guides/how_to_use_constraints.ipynb
logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has
the file extension ``.db``. If the file does not exist, it will be created.
The dashboard can only be used when logging is used.
log_options (dict): Additional keyword arguments to configure the logging.
- "fast_logging": A boolean that determines if "unsafe" settings are used
to speed up write processes to the database. This should only be used for
very short running criterion functions where the main purpose of the log
is a real-time dashboard and it would not be catastrophic to get a
corrupted database in case of a sudden system shutdown. If one evaluation
of the criterion function (and gradient if applicable) takes more than
100 ms, the logging overhead is negligible.
- "if_table_exists": (str) One of "extend", "replace", "raise". What to
do if the tables we want to write to already exist. Default "extend".
- "if_database_exists": (str): One of "extend", "replace", "raise". What to
do if the database we want to write to already exists. Default "extend".
loglike_kwargs (dict): Additional keyword arguments for loglike.
derivative (callable): Function takes params and potentially other keyword
arguments and calculates the first derivative of loglike. It can either
return a numpy array or pandas Series/DataFrame with the derivative or
a dictionary with derivatives of each output of loglike. If loglike
returns a dict but derivative does not, it is your responsibility to
make sure that the correct derivative for the numerical optimizers you are
using is returned.
derivative_kwargs (dict): Additional keyword arguments for loglike.
loglike_and_derivative (callable): Return a tuple consisting of the result
of loglike and the result of derivative. Only use this if you can exploit
synergies in the calculation of loglike and derivative.
loglike_and_derivative_kwargs (dict): Additional keyword arguments for
loglike_and_derivative.
numdiff_options (dict): Keyword arguments for the calculation of numerical
derivatives for the calculation of standard errors. See
:ref:`first_derivative` for details.
jacobian (callable or pandas.DataFrame or False): A function that takes
``params`` and potentially other keyword arguments and returns the jacobian
of loglike["contributions"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Jacobian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. Note that you
only need to pass a Jacobian function if you have a closed form Jacobian but
decided not to return it as part of ``derivative`` (e.g. because you use
a scalar optimizer and can calculate a gradient in a way that is faster
than calculating and summing the Jacobian). If you pass None, a numerical
Jacobian will be calculated. If you pass ``False``, you signal that no
Jacobian should be calculated. Thus, no result that requires the Jacobian
will be calculated.
jacobian_kwargs (dict): Additional keyword arguments for the Jacobian function.
hessian (callable or pd.DataFrame): A function that takes
``params`` and potentially other keyword arguments and returns the Hessian
of loglike["value"] with respect to the params. Alternatively, you
can pass a pandas.DataFrame with the Hessian at the optimal parameters.
This is only possible if you pass ``optimize_options=False``. If you pass
None, a numerical Hessian will be calculated. If you pass ``False``, you
signal that no Hessian should be calculated. Thus, no result that requires
the Hessian will be calculated.
hessian_kwargs (dict): Additional keyword arguments for the Hessian function.
ci_level (float): Confidence level for the calculation of confidence intervals.
The default is 0.95.
n_samples (int): Number of samples used to transform the covariance matrix of
the internal parameter vector into the covariance matrix of the external
parameters. For background information about internal and external params
see :ref:`implementation_of_constraints`. This is only used if you have
specified constraints.
bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds
are handled. If "clip", confidence intervals are clipped at the bounds.
Standard errors are only adjusted if a sampling step is necessary due to
additional constraints. If "raise" and any lower or upper bound is binding,
we raise an Error. If "ignore", boundary problems are simply ignored.
design_info (pandas.DataFrame): DataFrame with one row per observation that
contains some or all of the variables "psu" (primary sampling unit),
"stratum" and "fpc" (finite population corrector). See
:ref:`robust_likelihood_inference` for details.
Returns:
dict: The estimated parameters, standard errors and covariance matrix of the
parameters.
"""
# ==================================================================================
# Check and process inputs
# ==================================================================================
is_optimized = optimize_options is False
check_optimization_options(
optimize_options,
usage="estimate_ml",
algorithm_mandatory=True,
)
jac_case = get_derivative_case(jacobian)
hess_case = get_derivative_case(hessian)
check_is_optimized_and_derivative_case(is_optimized, jac_case)
check_is_optimized_and_derivative_case(is_optimized, hess_case)
cov_cases = _get_cov_cases(jac_case, hess_case, design_info)
check_numdiff_options(numdiff_options, "estimate_ml")
numdiff_options = {} if numdiff_options in (None, False) else numdiff_options
constraints = [] if constraints is None else constraints
processed_constraints, _ = process_constraints(constraints, params)
# ==================================================================================
# Calculate estimates via maximization (if necessary)
# ==================================================================================
if is_optimized:
estimates = params
else:
opt_res = maximize(
criterion=loglike,
criterion_kwargs=loglike_kwargs,
params=params,
constraints=constraints,
derivative=derivative,
derivative_kwargs=derivative_kwargs,
criterion_and_derivative=loglike_and_derivative,
criterion_and_derivative_kwargs=loglike_and_derivative_kwargs,
logging=logging,
log_options=log_options,
**optimize_options,
)
estimates = opt_res["solution_params"]
# ==================================================================================
# Calculate internal jacobian
# ==================================================================================
deriv_to_internal = get_derivative_conversion_function(
params=params, constraints=constraints
)
if jac_case == "pre-calculated":
int_jac = deriv_to_internal(jacobian)
elif jac_case == "closed-form":
jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs
_jac = jacobian(estimates, **jacobian_kwargs)
int_jac = deriv_to_internal(_jac)
# switch to "numerical" even if jac_case == "skip" because jac is required for ml.
elif jac_case == "numerical":
options = numdiff_options.copy()
options["key"] = "contributions"
deriv_res = get_internal_first_derivative(
func=loglike,
params=estimates,
constraints=constraints,
func_kwargs=loglike_kwargs,
numdiff_options=options,
)
int_jac = deriv_res["derivative"]
jac_numdiff_info = {k: v for k, v in deriv_res.items() if k != "derivative"}
else:
int_jac = None
# ==================================================================================
# Calculate internal Hessian (most of this is not yet implemented)
# ==================================================================================
if hess_case == "skip":
int_hess = None
elif hess_case == "numerical":
raise NotImplementedError("Numerical Hessian calculation is not yet supported.")
hess_numdiff_info = {}
elif hess_case in ("closed-form", "pre-calculated") and constraints:
raise NotImplementedError(
"Closed-form or pre-calculated Hessians are not yet compatible with "
"constraints."
)
else:
int_hess = hessian(estimates, **hessian_kwargs)
# ==================================================================================
# Calculate all available internal cov types
# ==================================================================================
int_covs = {}
if "jacobian" in cov_cases:
int_covs["cov_jacobian"] = cov_jacobian(int_jac)
if "hessian" in cov_cases:
int_covs["cov_hessian"] = cov_hessian(int_hess)
if "robust" in cov_cases:
int_covs["cov_robust"] = cov_robust(jac=int_jac, hess=int_hess)
if "cluster_robust" in cov_cases:
int_covs["cov_cluster_robust"] = cov_cluster_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
if "strata_robust" in cov_cases:
int_covs["cov_strata_robust"] = cov_strata_robust(
jac=int_jac, hess=int_hess, design_info=design_info
)
# ==================================================================================
# Calculate all available external covs and summaries
# ==================================================================================
covs = {}
summaries = {}
for case in cov_cases:
cov = transform_covariance(
params=estimates,
internal_cov=int_covs[f"cov_{case}"],
constraints=constraints,
n_samples=n_samples,
bounds_handling=bounds_handling,
)
summary = calculate_inference_quantities(
params=estimates,
free_cov=cov,
ci_level=ci_level,
)
covs[f"cov_{case}"] = cov
summaries[f"summary_{case}"] = summary
# ==================================================================================
# Calculate external jac and hess (if no transforming constraints)
# ==================================================================================
if not processed_constraints:
ext_jac = int_jac
ext_hess = int_hess
else:
ext_jac = "No external Jacobian defined due to constraints."
ext_hess = "No external Hessian defined due to constraints."
# ==================================================================================
# Construct output
# ==================================================================================
out = {
**summaries,
**covs,
"jacobian": ext_jac,
"hessian": ext_hess,
}
if not is_optimized:
out["optimize_res"] = opt_res
if jac_case == "numerical":
out["jacobian_numdiff_info"] = jac_numdiff_info
if hess_case == "numerical":
out["hessian_numdiff_info"] = hess_numdiff_info
return out
def _get_cov_cases(jac_case, hess_case, design_info):
if jac_case == "skip" and hess_case == "skip":
raise ValueError("Jacobian and Hessian cannot both be False.")
elif jac_case == "skip" and hess_case != "skip":
cases = ["hessian"]
elif hess_case == "skip" and jac_case != "skip":
cases = ["jacobian"]
else:
cases = ["jacobian", "hessian", "robust"]
if design_info is not None:
if "psu" in design_info:
cases.append("cluster_robust")
if {"strata", "psu", "fpc"}.issubset(design_info):
cases.append("strata_robust")
return cases
| [((153, 4, 157, 5), 'estimagic.shared.check_option_dicts.check_optimization_options', 'check_optimization_options', (), '', False, 'from estimagic.shared.check_option_dicts import check_optimization_options\n'), ((159, 15, 159, 44), 'estimagic.inference.shared.get_derivative_case', 'get_derivative_case', ({(159, 35, 159, 43): 'jacobian'}, {}), '(jacobian)', False, 'from estimagic.inference.shared import get_derivative_case\n'), ((160, 16, 160, 44), 'estimagic.inference.shared.get_derivative_case', 'get_derivative_case', ({(160, 36, 160, 43): 'hessian'}, {}), '(hessian)', False, 'from estimagic.inference.shared import get_derivative_case\n'), ((162, 4, 162, 66), 'estimagic.inference.shared.check_is_optimized_and_derivative_case', 'check_is_optimized_and_derivative_case', ({(162, 43, 162, 55): 'is_optimized', (162, 57, 162, 65): 'jac_case'}, {}), '(is_optimized, jac_case)', False, 'from estimagic.inference.shared import check_is_optimized_and_derivative_case\n'), ((163, 4, 163, 67), 'estimagic.inference.shared.check_is_optimized_and_derivative_case', 'check_is_optimized_and_derivative_case', ({(163, 43, 163, 55): 'is_optimized', (163, 57, 163, 66): 'hess_case'}, {}), '(is_optimized, hess_case)', False, 'from estimagic.inference.shared import check_is_optimized_and_derivative_case\n'), ((167, 4, 167, 57), 'estimagic.shared.check_option_dicts.check_numdiff_options', 'check_numdiff_options', ({(167, 26, 167, 41): 'numdiff_options', (167, 43, 167, 56): '"""estimate_ml"""'}, {}), "(numdiff_options, 'estimate_ml')", False, 'from estimagic.shared.check_option_dicts import check_numdiff_options\n'), ((172, 31, 172, 71), 'estimagic.parameters.process_constraints.process_constraints', 'process_constraints', ({(172, 51, 172, 62): 'constraints', (172, 64, 172, 70): 'params'}, {}), '(constraints, params)', False, 'from estimagic.parameters.process_constraints import process_constraints\n'), ((200, 24, 202, 5), 'estimagic.parameters.parameter_conversion.get_derivative_conversion_function', 'get_derivative_conversion_function', (), '', False, 'from estimagic.parameters.parameter_conversion import get_derivative_conversion_function\n'), ((181, 18, 193, 9), 'estimagic.optimization.optimize.maximize', 'maximize', (), '', False, 'from estimagic.optimization.optimize import maximize\n'), ((249, 35, 249, 56), 'estimagic.inference.ml_covs.cov_jacobian', 'cov_jacobian', ({(249, 48, 249, 55): 'int_jac'}, {}), '(int_jac)', False, 'from estimagic.inference.ml_covs import cov_jacobian\n'), ((251, 34, 251, 55), 'estimagic.inference.ml_covs.cov_hessian', 'cov_hessian', ({(251, 46, 251, 54): 'int_hess'}, {}), '(int_hess)', False, 'from estimagic.inference.ml_covs import cov_hessian\n'), ((253, 33, 253, 71), 'estimagic.inference.ml_covs.cov_robust', 'cov_robust', (), '', False, 'from estimagic.inference.ml_covs import cov_robust\n'), ((255, 41, 257, 9), 'estimagic.inference.ml_covs.cov_cluster_robust', 'cov_cluster_robust', (), '', False, 'from estimagic.inference.ml_covs import cov_cluster_robust\n'), ((259, 40, 261, 9), 'estimagic.inference.ml_covs.cov_strata_robust', 'cov_strata_robust', (), '', False, 'from estimagic.inference.ml_covs import cov_strata_robust\n'), ((270, 14, 276, 9), 'estimagic.inference.shared.transform_covariance', 'transform_covariance', (), '', False, 'from estimagic.inference.shared import transform_covariance\n'), ((277, 18, 281, 9), 'estimagic.inference.shared.calculate_inference_quantities', 'calculate_inference_quantities', (), '', False, 'from estimagic.inference.shared import calculate_inference_quantities\n'), ((214, 20, 220, 9), 'estimagic.inference.shared.get_internal_first_derivative', 'get_internal_first_derivative', (), '', False, 'from estimagic.inference.shared import get_internal_first_derivative\n')] |
NunoEdgarGFlowHub/neural_architecture_search_with_reinforcement_learning_appendix_a | neural_architecture_search_appendix_a.py | 67e4876d428e5155f5526ee02875b0a89a52305d | import six
import chainer
import numpy as np
import chainer.links as L
import chainer.functions as F
import nutszebra_chainer
import functools
from collections import defaultdict
class Conv(nutszebra_chainer.Model):
def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)):
super(Conv, self).__init__(
conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad),
)
def weight_initialization(self):
self.conv.W.data = self.weight_relu_initialization(self.conv)
self.conv.b.data = self.bias_initialization(self.conv, constant=0)
def __call__(self, x, train=False):
return self.conv(x)
def count_parameters(self):
return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape)
class Conv_ReLU_BN(nutszebra_chainer.Model):
def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)):
super(Conv_ReLU_BN, self).__init__(
conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad),
bn=L.BatchNormalization(out_channel),
)
def weight_initialization(self):
self.conv.W.data = self.weight_relu_initialization(self.conv)
self.conv.b.data = self.bias_initialization(self.conv, constant=0)
def __call__(self, x, train=False):
return self.bn(F.relu(self.conv(x)), test=not train)
def count_parameters(self):
return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape)
class AppendixA(nutszebra_chainer.Model):
def __init__(self, category_num):
super(AppendixA, self).__init__()
out_channels = [36, 48, 36, 36, 48, 48, 48, 36, 36, 36, 36, 48, 48, 48, 48]
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
skip_connections = [[0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
filters = [(3, 3), (3, 3), (3, 3), (5, 5), (3, 7), (7, 7), (7, 7), (7, 3), (7, 1), (7, 7), (5, 7), (7, 7), (7, 5), (7, 5), (7, 5)]
modules = []
in_channel = 3
for i in six.moves.range(len(out_channels)):
modules += [('conv{}'.format(i), Conv_ReLU_BN(in_channel, out_channels[i], filters[i], 1, 0))]
in_channel = int(np.sum([out_channels[ii] for ii, s in enumerate(skip_connections) if s[i] == 1])) + out_channels[i]
modules += [('linear', Conv(out_channels[-1], category_num, 1, 1, 0))]
# register layers
[self.add_link(*link) for link in modules]
self.modules = modules
self.category_num = category_num
self.out_channels = out_channels
self.skip_connections = skip_connections
self.filters = filters
self.name = 'appndix_a_{}'.format(category_num)
def weight_initialization(self):
[link.weight_initialization() for _, link in self.modules]
def count_parameters(self):
return int(np.sum([link.count_parameters() for _, link in self.modules]))
@staticmethod
def _zero_pads(x, pad, axis):
if type(x.data) is not np.ndarray:
pad.to_gpu()
return F.concat((x, pad), axis=axis)
@staticmethod
def zero_pads(x, sizes):
batch, channel, height, width = x.data.shape
diff_height = sizes[2] - height
diff_width = sizes[3] - width
# pad along with height
if diff_height >= 1:
pad = chainer.Variable(np.zeros((batch, channel, diff_height, width), dtype=x.dtype), volatile=x.volatile)
x = AppendixA._zero_pads(x, pad, axis=2)
_, _, height, _ = x.data.shape
# pad along with width
if diff_width >= 1:
pad = chainer.Variable(np.zeros((batch, channel, height, diff_width), dtype=x.dtype), volatile=x.volatile)
x = AppendixA._zero_pads(x, pad, axis=3)
return x
def _max(a, b):
return (max(a[0], b[0]), max(a[1], b[1]), max(a[2], b[2]), max(a[3], b[3]))
@staticmethod
def concatenate(X):
sizes = (0, 0, 0, 0)
for x in X:
sizes = AppendixA._max(sizes, x.data.shape)
X = [AppendixA.zero_pads(x, sizes) for x in X]
return F.concat(X, axis=1)
def __call__(self, x, train=False):
x = [x]
outputs = []
for i in six.moves.range(len(self.out_channels)):
x = self['conv{}'.format(i)](self.concatenate(x), train=train)
outputs.append(x)
x = [outputs[ii] for ii, s in enumerate(self.skip_connections) if s[i] == 1] + [outputs[i]]
x = outputs[-1]
batch, channels, height, width = x.data.shape
x = F.reshape(F.average_pooling_2d(x, (height, width)), (batch, channels, 1, 1))
return F.reshape(self.linear(x, train), (batch, self.category_num))
def calc_loss(self, y, t):
loss = F.softmax_cross_entropy(y, t)
return loss
def accuracy(self, y, t, xp=np):
y.to_cpu()
t.to_cpu()
indices = np.where((t.data == np.argmax(y.data, axis=1)) == True)[0]
accuracy = defaultdict(int)
for i in indices:
accuracy[t.data[i]] += 1
indices = np.where((t.data == np.argmax(y.data, axis=1)) == False)[0]
false_accuracy = defaultdict(int)
false_y = np.argmax(y.data, axis=1)
for i in indices:
false_accuracy[(t.data[i], false_y[i])] += 1
return accuracy, false_accuracy
| [((26, 15, 26, 75), 'functools.reduce', 'functools.reduce', ({(26, 32, 26, 50): '(lambda a, b: a * b)', (26, 52, 26, 74): 'self.conv.W.data.shape'}, {}), '(lambda a, b: a * b, self.conv.W.data.shape)', False, 'import functools\n'), ((45, 15, 45, 75), 'functools.reduce', 'functools.reduce', ({(45, 32, 45, 50): '(lambda a, b: a * b)', (45, 52, 45, 74): 'self.conv.W.data.shape'}, {}), '(lambda a, b: a * b, self.conv.W.data.shape)', False, 'import functools\n'), ((96, 15, 96, 44), 'chainer.functions.concat', 'F.concat', (), '', True, 'import chainer.functions as F\n'), ((123, 15, 123, 34), 'chainer.functions.concat', 'F.concat', (), '', True, 'import chainer.functions as F\n'), ((138, 15, 138, 44), 'chainer.functions.softmax_cross_entropy', 'F.softmax_cross_entropy', ({(138, 39, 138, 40): 'y', (138, 42, 138, 43): 't'}, {}), '(y, t)', True, 'import chainer.functions as F\n'), ((145, 19, 145, 35), 'collections.defaultdict', 'defaultdict', ({(145, 31, 145, 34): 'int'}, {}), '(int)', False, 'from collections import defaultdict\n'), ((149, 25, 149, 41), 'collections.defaultdict', 'defaultdict', ({(149, 37, 149, 40): 'int'}, {}), '(int)', False, 'from collections import defaultdict\n'), ((150, 18, 150, 43), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n'), ((134, 22, 134, 62), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', ({(134, 43, 134, 44): 'x', (134, 46, 134, 61): '(height, width)'}, {}), '(x, (height, width))', True, 'import chainer.functions as F\n'), ((15, 17, 15, 83), 'chainer.links.Convolution2D', 'L.Convolution2D', ({(15, 33, 15, 43): 'in_channel', (15, 45, 15, 56): 'out_channel', (15, 58, 15, 69): 'filter_size', (15, 71, 15, 77): 'stride', (15, 79, 15, 82): 'pad'}, {}), '(in_channel, out_channel, filter_size, stride, pad)', True, 'import chainer.links as L\n'), ((33, 17, 33, 83), 'chainer.links.Convolution2D', 'L.Convolution2D', ({(33, 33, 33, 43): 'in_channel', (33, 45, 33, 56): 'out_channel', (33, 58, 33, 69): 'filter_size', (33, 71, 33, 77): 'stride', (33, 79, 33, 82): 'pad'}, {}), '(in_channel, out_channel, filter_size, stride, pad)', True, 'import chainer.links as L\n'), ((34, 15, 34, 48), 'chainer.links.BatchNormalization', 'L.BatchNormalization', ({(34, 36, 34, 47): 'out_channel'}, {}), '(out_channel)', True, 'import chainer.links as L\n'), ((105, 35, 105, 96), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((110, 35, 110, 96), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((144, 38, 144, 63), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n'), ((148, 38, 148, 63), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n')] |
quizlet/abracadabra | test/test_proportions_delta.py | eda599bd02f14b96efdc521f53132d93c9100ede | import pytest
from abra import Experiment, HypothesisTest
def test_large_proportions_delta_expermiment(proportions_data_large):
exp = Experiment(proportions_data_large, name='proportions-test')
# run 'A/A' test
test_aa = HypothesisTest(
metric='metric',
control='A', variation='A',
hypothesis='larger',
inference_method='proportions_delta'
)
results_aa = exp.run_test(test_aa)
assert results_aa.test_statistic == 'z'
assert not results_aa.accept_hypothesis
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='B',
hypothesis='larger',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert results_ab.test_statistic == 'z'
assert results_ab.accept_hypothesis
def test_proportions_delta_ab_unequal(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='F',
hypothesis='unequal',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert results_ab.test_statistic == 'z'
assert results_ab.accept_hypothesis
def test_proportions_delta_ab_larger(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='F',
hypothesis='larger',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert results_ab.accept_hypothesis
def test_proportions_delta_ab_smaller(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/B test
test_ab = HypothesisTest(
metric='metric',
control='A', variation='F',
hypothesis='smaller',
inference_method='proportions_delta'
)
results_ab = exp.run_test(test_ab)
assert not results_ab.accept_hypothesis
def test_proportions_delta_aa(proportions_data_small):
exp = Experiment(proportions_data_small, name='proportions-test')
# run A/A test
test_aa = HypothesisTest(
metric='metric',
control='A', variation='A',
hypothesis='larger',
inference_method='proportions_delta'
)
results_aa = exp.run_test(test_aa)
assert not results_aa.accept_hypothesis
def test_proportions_delta_experiment_t(proportions_data_small):
"""Small sample sizes defautl to t-tests"""
exp = Experiment(proportions_data_small.sample(29), name='proportions-test')
test_aa = HypothesisTest(
metric='metric',
control='A', variation='A',
hypothesis='unequal',
inference_method='means_delta'
)
results_aa = exp.run_test(test_aa)
assert results_aa.test_statistic == 't' | [((6, 10, 6, 69), 'abra.Experiment', 'Experiment', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((9, 14, 14, 5), 'abra.HypothesisTest', 'HypothesisTest', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((21, 14, 26, 5), 'abra.HypothesisTest', 'HypothesisTest', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((34, 10, 34, 69), 'abra.Experiment', 'Experiment', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((37, 14, 42, 5), 'abra.HypothesisTest', 'HypothesisTest', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((49, 10, 49, 69), 'abra.Experiment', 'Experiment', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((52, 14, 57, 5), 'abra.HypothesisTest', 'HypothesisTest', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((62, 10, 62, 69), 'abra.Experiment', 'Experiment', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((65, 14, 70, 5), 'abra.HypothesisTest', 'HypothesisTest', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((76, 10, 76, 69), 'abra.Experiment', 'Experiment', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((79, 14, 84, 5), 'abra.HypothesisTest', 'HypothesisTest', (), '', False, 'from abra import Experiment, HypothesisTest\n'), ((93, 14, 98, 5), 'abra.HypothesisTest', 'HypothesisTest', (), '', False, 'from abra import Experiment, HypothesisTest\n')] |
Build-The-Web/bootils | src/bootils/plugins/core/jsw.py | 8ee88f4d0583352f58fbb89c018e7caef8f07ce3 | # -*- coding: utf-8 -*-
# pylint: disable=
""" Tanuki Java Service Wrapper runtime environment.
Debian JSW paths (Wheezy 3.5.3; Jessie 3.5.22)::
/usr/sbin/wrapper – ELF executable
/usr/share/wrapper/daemon.sh
/usr/share/wrapper/make-wrapper-init.sh
/usr/share/wrapper/wrapper.conf
"""
# Copyright © 2015 1&1 Group <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
from ..loader import PluginBase
class JavaServiceWrapper(PluginBase):
"""Tanuki Java Service Wrapper runtime environment."""
def control_start(self, *args, **options):
"""Start a Java service."""
print("*** JSW START ***")
return True # TODO: actually implement this
def control_stop(self, *args, **options):
"""Stop a Java service."""
return False # TODO: actually implement this
| [] |
ChoKyuWon/SchoolProjects | MachineLearning/hw1/models/LinearRegression.py | 71a5decefc85ae941ba2d537c4507ba8e615cc34 | import numpy as np
class LinearRegression:
def __init__(self, num_features):
self.num_features = num_features
self.W = np.zeros((self.num_features, 1))
def train(self, x, y, epochs, batch_size, lr, optim):
final_loss = None # loss of final epoch
# Training should be done for 'epochs' times with minibatch size of 'batch_size'
# The function 'train' should return the loss of final epoch
# Loss of an epoch is calculated as an average of minibatch losses
# ========================= EDIT HERE ========================
# xline 과 n번째 y가 매칭됨. f(xline)=yi
final_loss=0
num_data=len(y)
k=0
def dlossF(k, j):
s=0
size = batch_size
for Xi, Yi in zip(x[k:k+batch_size], y[k:k+batch_size]):
fx=np.transpose(Xi).dot(self.W)
s = s + (fx-Yi)*Xi[j]
if (num_data - k) < batch_size:
size = num_data - k
return s/size
for iterative in range(0, epochs):
k = k + batch_size
if k == num_data:
k = batch_size
grad = np.zeros((self.num_features, 1))
for j in range(0, self.num_features):
grad[j] = dlossF(k, j)
self.W = optim.update(self.W, grad, lr)
# ============================================================
return final_loss
def eval(self, x):
pred = None
# Evaluation Function
# Given the input 'x', the function should return prediction for 'x'
# ========================= EDIT HERE ========================
ylist=[]
for xline in x:
y = np.transpose(xline).dot(self.W)
ylist.append(y[0])
pred = np.array(ylist)
# ============================================================
return pred
| [((6, 17, 6, 49), 'numpy.zeros', 'np.zeros', ({(6, 26, 6, 48): '(self.num_features, 1)'}, {}), '((self.num_features, 1))', True, 'import numpy as np\n'), ((56, 15, 56, 30), 'numpy.array', 'np.array', ({(56, 24, 56, 29): 'ylist'}, {}), '(ylist)', True, 'import numpy as np\n'), ((36, 19, 36, 51), 'numpy.zeros', 'np.zeros', ({(36, 28, 36, 50): '(self.num_features, 1)'}, {}), '((self.num_features, 1))', True, 'import numpy as np\n'), ((54, 16, 54, 35), 'numpy.transpose', 'np.transpose', ({(54, 29, 54, 34): 'xline'}, {}), '(xline)', True, 'import numpy as np\n'), ((26, 19, 26, 35), 'numpy.transpose', 'np.transpose', ({(26, 32, 26, 34): 'Xi'}, {}), '(Xi)', True, 'import numpy as np\n')] |
aurmeneta/ramos-uc | apps/bc_scraper/actions/schedule.py | 364ab3c5a55032ab7ffc08665a2da4c5ff04ae58 | from copy import copy
DEFAULT_SCHEDULE = {}
for day in "lmwjvs":
for mod in "12345678":
DEFAULT_SCHEDULE[day + mod] = "'FREE'"
def process_schedule(text_sc):
"""For a given schedule text in BC format, returns the SQL queries for inserting
the full schedule and schedule info. Those queries have to format ID.
"""
### Full Schedule
data = text_sc.split("\nROW: ")[1:]
# data rows -> day-day:module,module <> type <> room <><>
schedule = copy(DEFAULT_SCHEDULE)
for row in data:
row = row.split("<>")[:2]
horario = row[0].split(":")
days = horario[0].split("-")
modules = horario[1].split(",")
for day in days:
for mod in modules:
if len(day) and len(mod):
schedule[day.lower() + mod] = "'" + row[1] + "'"
cols = ",".join(schedule.keys())
values = ",".join(schedule.values())
full_sc_query = (
f"INSERT INTO courses_fullschedule (section_id, {cols}) VALUES (%s, {values});"
)
### Info Schedule
schedule_info = {"total": 0}
for type in ["AYU", "CLAS", "LAB", "PRA", "SUP", "TAL", "TER", "TES"]:
schedule_info[type] = list(schedule.values()).count("'" + type + "'")
schedule_info["total"] += schedule_info[type]
schedule_info[type] = str(schedule_info[type])
schedule_info["total"] = str(schedule_info["total"])
cols = ",".join(schedule_info.keys())
values = ",".join(schedule_info.values())
info_sc_query = (
f"INSERT INTO courses_scheduleinfo (section_id, {cols}) VALUES (%s, {values});"
)
return full_sc_query, info_sc_query
| [((17, 15, 17, 37), 'copy.copy', 'copy', ({(17, 20, 17, 36): 'DEFAULT_SCHEDULE'}, {}), '(DEFAULT_SCHEDULE)', False, 'from copy import copy\n')] |
NunoEdgarGFlowHub/horizon | openstack_dashboard/dashboards/admin/volumes/views.py | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes and snapshots.
"""
from collections import OrderedDict
from django.conf import settings
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.dashboards.admin.volumes \
import forms as volumes_forms
from openstack_dashboard.dashboards.admin.volumes \
import tables as volumes_tables
from openstack_dashboard.dashboards.admin.volumes \
import tabs as volumes_tabs
from openstack_dashboard.dashboards.project.volumes \
import views as volumes_views
class VolumesView(tables.PagedTableMixin, volumes_views.VolumeTableMixIn,
tables.DataTableView):
table_class = volumes_tables.VolumesTable
page_title = _("Volumes")
FILTERS_MAPPING = {'bootable': {_('yes'): 'true', _('no'): 'false'},
'encrypted': {_('yes'): True, _('no'): False}}
def get_data(self):
default_filters = {'all_tenants': True}
filters = self.get_filters(default_filters.copy())
filter_first = getattr(settings, 'FILTER_DATA_FIRST', {})
volumes = []
self.table.needs_filter_first = False
if filter_first.get('admin.volumes', False) and \
len(filters) == len(default_filters):
self.table.needs_filter_first = True
return volumes
if 'project' in filters:
# Keystone returns a tuple ([],false) where the first element is
# tenant list that's why the 0 is hardcoded below
tenants = keystone.tenant_list(self.request)[0]
tenant_ids = [t.id for t in tenants
if t.name == filters['project']]
if not tenant_ids:
return []
del filters['project']
for id in tenant_ids:
filters['project_id'] = id
volumes += self._get_volumes(search_opts=filters)
else:
volumes = self._get_volumes(search_opts=filters)
attached_instance_ids = self._get_attached_instance_ids(volumes)
instances = self._get_instances(search_opts={'all_tenants': True},
instance_ids=attached_instance_ids)
volume_ids_with_snapshots = self._get_volumes_ids_with_snapshots(
search_opts={'all_tenants': True})
self._set_volume_attributes(
volumes, instances, volume_ids_with_snapshots)
# Gather our tenants to correlate against IDs
try:
tenants, has_more = keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve volume project information.')
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t) for t in tenants])
for volume in volumes:
tenant_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
tenant = tenant_dict.get(tenant_id, None)
volume.tenant_name = getattr(tenant, "name", None)
return volumes
def get_filters(self, filters):
self.table = self._tables['volumes']
self.handle_server_filter(self.request, table=self.table)
self.update_server_filter_action(self.request, table=self.table)
filters = super(VolumesView, self).get_filters(filters,
self.FILTERS_MAPPING)
return filters
class DetailView(volumes_views.DetailView):
tab_group_class = volumes_tabs.VolumeDetailTabs
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
table = volumes_tables.VolumesTable(self.request)
context["actions"] = table.render_row_actions(context["volume"])
return context
def get_search_opts(self, volume):
search_opts = super(DetailView, self).get_search_opts(volume)
search_opts['all_tenants'] = True
return search_opts
def get_redirect_url(self):
return reverse('horizon:admin:volumes:index')
class ManageVolumeView(forms.ModalFormView):
form_class = volumes_forms.ManageVolume
template_name = 'admin/volumes/manage_volume.html'
form_id = "manage_volume_modal"
submit_label = _("Manage")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = reverse_lazy('horizon:admin:volumes:manage')
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Manage Volume")
def get_context_data(self, **kwargs):
context = super(ManageVolumeView, self).get_context_data(**kwargs)
return context
class UnmanageVolumeView(forms.ModalFormView):
form_class = volumes_forms.UnmanageVolume
template_name = 'admin/volumes/unmanage_volume.html'
form_id = "unmanage_volume_modal"
submit_label = _("Unmanage")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = 'horizon:admin:volumes:unmanage'
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Unmanage Volume")
def get_context_data(self, **kwargs):
context = super(UnmanageVolumeView, self).get_context_data(**kwargs)
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'host': getattr(volume, "os-vol-host-attr:host")}
class MigrateVolumeView(forms.ModalFormView):
form_class = volumes_forms.MigrateVolume
template_name = 'admin/volumes/migrate_volume.html'
form_id = "migrate_volume_modal"
submit_label = _("Migrate")
success_url = reverse_lazy('horizon:admin:volumes:index')
submit_url = 'horizon:admin:volumes:migrate'
cancel_url = reverse_lazy("horizon:admin:volumes:index")
page_title = _("Migrate Volume")
def get_context_data(self, **kwargs):
context = super(MigrateVolumeView, self).get_context_data(**kwargs)
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
@memoized.memoized_method
def get_hosts(self):
try:
return cinder.pool_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve pools information.'),
redirect=self.success_url)
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'current_host': getattr(volume, "os-vol-host-attr:host"),
'hosts': self.get_hosts()}
class UpdateStatusView(forms.ModalFormView):
form_class = volumes_forms.UpdateStatus
modal_id = "update_volume_status_modal"
template_name = 'admin/volumes/update_status.html'
submit_label = _("Update Status")
submit_url = "horizon:admin:volumes:update_status"
success_url = reverse_lazy('horizon:admin:volumes:index')
page_title = _("Update Volume Status")
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context["volume_id"] = self.kwargs['volume_id']
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'status': volume.status}
| [((45, 17, 45, 29), 'django.utils.translation.ugettext_lazy', '_', ({(45, 19, 45, 28): '"""Volumes"""'}, {}), "('Volumes')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((134, 19, 134, 30), 'django.utils.translation.ugettext_lazy', '_', ({(134, 21, 134, 29): '"""Manage"""'}, {}), "('Manage')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((135, 18, 135, 61), 'django.urls.reverse_lazy', 'reverse_lazy', ({(135, 31, 135, 60): '"""horizon:admin:volumes:index"""'}, {}), "('horizon:admin:volumes:index')", False, 'from django.urls import reverse_lazy\n'), ((136, 17, 136, 61), 'django.urls.reverse_lazy', 'reverse_lazy', ({(136, 30, 136, 60): '"""horizon:admin:volumes:manage"""'}, {}), "('horizon:admin:volumes:manage')", False, 'from django.urls import reverse_lazy\n'), ((137, 17, 137, 60), 'django.urls.reverse_lazy', 'reverse_lazy', ({(137, 30, 137, 59): '"""horizon:admin:volumes:index"""'}, {}), "('horizon:admin:volumes:index')", False, 'from django.urls import reverse_lazy\n'), ((138, 17, 138, 35), 'django.utils.translation.ugettext_lazy', '_', ({(138, 19, 138, 34): '"""Manage Volume"""'}, {}), "('Manage Volume')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((149, 19, 149, 32), 'django.utils.translation.ugettext_lazy', '_', ({(149, 21, 149, 31): '"""Unmanage"""'}, {}), "('Unmanage')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((150, 18, 150, 61), 'django.urls.reverse_lazy', 'reverse_lazy', ({(150, 31, 150, 60): '"""horizon:admin:volumes:index"""'}, {}), "('horizon:admin:volumes:index')", False, 'from django.urls import reverse_lazy\n'), ((152, 17, 152, 60), 'django.urls.reverse_lazy', 'reverse_lazy', ({(152, 30, 152, 59): '"""horizon:admin:volumes:index"""'}, {}), "('horizon:admin:volumes:index')", False, 'from django.urls import reverse_lazy\n'), ((153, 17, 153, 37), 'django.utils.translation.ugettext_lazy', '_', ({(153, 19, 153, 36): '"""Unmanage Volume"""'}, {}), "('Unmanage Volume')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((183, 19, 183, 31), 'django.utils.translation.ugettext_lazy', '_', ({(183, 21, 183, 30): '"""Migrate"""'}, {}), "('Migrate')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((184, 18, 184, 61), 'django.urls.reverse_lazy', 'reverse_lazy', ({(184, 31, 184, 60): '"""horizon:admin:volumes:index"""'}, {}), "('horizon:admin:volumes:index')", False, 'from django.urls import reverse_lazy\n'), ((186, 17, 186, 60), 'django.urls.reverse_lazy', 'reverse_lazy', ({(186, 30, 186, 59): '"""horizon:admin:volumes:index"""'}, {}), "('horizon:admin:volumes:index')", False, 'from django.urls import reverse_lazy\n'), ((187, 17, 187, 36), 'django.utils.translation.ugettext_lazy', '_', ({(187, 19, 187, 35): '"""Migrate Volume"""'}, {}), "('Migrate Volume')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((227, 19, 227, 37), 'django.utils.translation.ugettext_lazy', '_', ({(227, 21, 227, 36): '"""Update Status"""'}, {}), "('Update Status')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((229, 18, 229, 61), 'django.urls.reverse_lazy', 'reverse_lazy', ({(229, 31, 229, 60): '"""horizon:admin:volumes:index"""'}, {}), "('horizon:admin:volumes:index')", False, 'from django.urls import reverse_lazy\n'), ((230, 17, 230, 42), 'django.utils.translation.ugettext_lazy', '_', ({(230, 19, 230, 41): '"""Update Volume Status"""'}, {}), "('Update Volume Status')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((95, 22, 95, 63), 'collections.OrderedDict', 'OrderedDict', ({(95, 34, 95, 62): '[(t.id, t) for t in tenants]'}, {}), '([(t.id, t) for t in tenants])', False, 'from collections import OrderedDict\n'), ((117, 16, 117, 57), 'openstack_dashboard.dashboards.admin.volumes.tables.VolumesTable', 'volumes_tables.VolumesTable', ({(117, 44, 117, 56): 'self.request'}, {}), '(self.request)', True, 'from openstack_dashboard.dashboards.admin.volumes import tables as volumes_tables\n'), ((127, 15, 127, 53), 'django.urls.reverse', 'reverse', ({(127, 23, 127, 52): '"""horizon:admin:volumes:index"""'}, {}), "('horizon:admin:volumes:index')", False, 'from django.urls import reverse\n'), ((158, 32, 158, 67), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((192, 32, 192, 67), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((236, 32, 236, 67), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((47, 36, 47, 44), 'django.utils.translation.ugettext_lazy', '_', ({(47, 38, 47, 43): '"""yes"""'}, {}), "('yes')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((47, 54, 47, 61), 'django.utils.translation.ugettext_lazy', '_', ({(47, 56, 47, 60): '"""no"""'}, {}), "('no')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((48, 37, 48, 45), 'django.utils.translation.ugettext_lazy', '_', ({(48, 39, 48, 44): '"""yes"""'}, {}), "('yes')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((48, 53, 48, 60), 'django.utils.translation.ugettext_lazy', '_', ({(48, 55, 48, 59): '"""no"""'}, {}), "('no')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((89, 32, 89, 66), 'openstack_dashboard.api.keystone.tenant_list', 'keystone.tenant_list', ({(89, 53, 89, 65): 'self.request'}, {}), '(self.request)', False, 'from openstack_dashboard.api import keystone\n'), ((165, 21, 165, 63), 'openstack_dashboard.api.cinder.volume_get', 'cinder.volume_get', ({(165, 39, 165, 51): 'self.request', (165, 53, 165, 62): 'volume_id'}, {}), '(self.request, volume_id)', False, 'from openstack_dashboard.api import cinder\n'), ((199, 21, 199, 63), 'openstack_dashboard.api.cinder.volume_get', 'cinder.volume_get', ({(199, 39, 199, 51): 'self.request', (199, 53, 199, 62): 'volume_id'}, {}), '(self.request, volume_id)', False, 'from openstack_dashboard.api import cinder\n'), ((209, 19, 209, 49), 'openstack_dashboard.api.cinder.pool_list', 'cinder.pool_list', ({(209, 36, 209, 48): 'self.request'}, {}), '(self.request)', False, 'from openstack_dashboard.api import cinder\n'), ((243, 21, 243, 63), 'openstack_dashboard.api.cinder.volume_get', 'cinder.volume_get', ({(243, 39, 243, 51): 'self.request', (243, 53, 243, 62): 'volume_id'}, {}), '(self.request, volume_id)', False, 'from openstack_dashboard.api import cinder\n'), ((67, 22, 67, 56), 'openstack_dashboard.api.keystone.tenant_list', 'keystone.tenant_list', ({(67, 43, 67, 55): 'self.request'}, {}), '(self.request)', False, 'from openstack_dashboard.api import keystone\n'), ((92, 18, 92, 69), 'django.utils.translation.ugettext_lazy', '_', ({(92, 20, 92, 68): '"""Unable to retrieve volume project information."""'}, {}), "('Unable to retrieve volume project information.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((93, 12, 93, 48), 'horizon.exceptions.handle', 'exceptions.handle', ({(93, 30, 93, 42): 'self.request', (93, 44, 93, 47): 'msg'}, {}), '(self.request, msg)', False, 'from horizon import exceptions\n'), ((168, 30, 168, 69), 'django.utils.translation.ugettext_lazy', '_', ({(168, 32, 168, 68): '"""Unable to retrieve volume details."""'}, {}), "('Unable to retrieve volume details.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((202, 30, 202, 69), 'django.utils.translation.ugettext_lazy', '_', ({(202, 32, 202, 68): '"""Unable to retrieve volume details."""'}, {}), "('Unable to retrieve volume details.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((212, 30, 212, 72), 'django.utils.translation.ugettext_lazy', '_', ({(212, 32, 212, 71): '"""Unable to retrieve pools information."""'}, {}), "('Unable to retrieve pools information.')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((246, 30, 246, 69), 'django.utils.translation.ugettext_lazy', '_', ({(246, 32, 246, 68): '"""Unable to retrieve volume details."""'}, {}), "('Unable to retrieve volume details.')", True, 'from django.utils.translation import ugettext_lazy as _\n')] |
flowolf/yessssms | tests/__init__.py | 438928967aca38d3d2bb07799d3723757e928553 | """Tests for YesssSMS."""
| [] |
bldr-cmd/bldr-cmd | bldr/dep/env.py | 300750fbccc2987efd23f69b7b2d76d8563e2995 | # This is used by Environment to populate its env
# Due to circular dependencies it cannot reference other parts of bldr
import toml
def default(dotbldr_path: str) -> dict:
dep = {
'config': toml.load(f"{dotbldr_path}/dependency.toml"),
'lock': toml.load(f"{dotbldr_path}/dependency.lock.toml")
}
return dep
def save_lock(dotbldr_path: str, lock_env: dict):
with open(f"{dotbldr_path}/dependency.lock.toml", 'w') as toml_file:
return toml.dump(lock_env, toml_file)
def save_config(dotbldr_path: str, config_env: dict):
with open(f"{dotbldr_path}/dependency.toml", 'w') as toml_file:
return toml.dump(config_env, toml_file) | [((7, 18, 7, 62), 'toml.load', 'toml.load', ({(7, 28, 7, 61): 'f"""{dotbldr_path}/dependency.toml"""'}, {}), "(f'{dotbldr_path}/dependency.toml')", False, 'import toml\n'), ((8, 16, 8, 65), 'toml.load', 'toml.load', ({(8, 26, 8, 64): 'f"""{dotbldr_path}/dependency.lock.toml"""'}, {}), "(f'{dotbldr_path}/dependency.lock.toml')", False, 'import toml\n'), ((14, 15, 14, 45), 'toml.dump', 'toml.dump', ({(14, 25, 14, 33): 'lock_env', (14, 35, 14, 44): 'toml_file'}, {}), '(lock_env, toml_file)', False, 'import toml\n'), ((18, 15, 18, 47), 'toml.dump', 'toml.dump', ({(18, 25, 18, 35): 'config_env', (18, 37, 18, 46): 'toml_file'}, {}), '(config_env, toml_file)', False, 'import toml\n')] |
ijufumi/demo-python | rasa-sample/actions.py | b48bdebde172ca581a48346a77b12c30ff202e73 | import re
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import SlotSet
import lark_module
class ActionHelloWorld(Action):
state_map = {}
def name(self) -> Text:
return "action_hello_world"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
print("current_state: {}".format(state))
sender_id = state["sender_id"]
if sender_id not in self.state_map:
self.state_map[sender_id] = 0
self.state_map[sender_id] += 1
dispatcher.utter_message(
text="Hello World!",
json_message={"data": "hogeohge"},
# template="<div></div>",
buttons=[{"title": "OK", "payload": "99!"}])
print("state: {}".format(self.state_map[sender_id]))
return []
class ActionCustomButton(Action):
def name(self) -> Text:
return "action_custom_button"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(
text="Which ?",
buttons=[{"title": "OK", "payload": "1"},
{"title": "NG", "payload": "2"},
{"title": "Unknown", "payload": "9"}])
return []
class ActionJsonMessage(Action):
def name(self) -> Text:
return "action_json_message"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(
text="Which ?",
json_message={"data": {
"key1": "value1",
"key2": "value2",
}}
)
return []
class ActionConversation(Action):
def name(self) -> Text:
return "action_conversation"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
print("current_state: {}".format(state))
input_text = state['latest_message'].get('text')
latest_bot = None
for event in reversed(state['events']):
if event['event'] == 'bot':
data = event.get('data', {}).get('custom', {}).get('data', [])
latest_bot = data[0] if len(data) > 0 else None
break
print("latest_bot: {}".format(latest_bot))
if not latest_bot:
print("use utter_conversation_1")
dispatcher.utter_message(template="utter_conversation_1", json_message={"data": {"key1": "value1",
"key2": "value2"}})
else:
if latest_bot == 'conversation_1':
print("use utter_conversation_2")
dispatcher.utter_message(template="utter_conversation_2", json_message={"data": ["conversation_2"]})
elif latest_bot == 'conversation_2':
result = re.match("\\d+", input_text)
if result:
print("use utter_conversation_3")
dispatcher.utter_message(template="utter_conversation_3", json_message={"data": ["conversation_3"]})
else:
print("use utter_conversation_2")
dispatcher.utter_message(template="utter_conversation_2", json_message={"data": ["conversation_2"]})
elif latest_bot == 'conversation_3':
result = re.match("\\d+", input_text)
if not result:
print("use utter_conversation_3")
dispatcher.utter_message(template="utter_conversation_3", json_message={"data": ["conversation_3"]})
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["conversation_3"]})
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["conversation_3"]})
return []
class ActionConversation2(Action):
action_state = {}
def name(self) -> Text:
return "action_conversation2"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
state = tracker.current_state()
sender_id = state.get("sender_id")
current_action = self.action_state.get(sender_id)
input_text = state['latest_message'].get('text')
print("state: {}, current_action: {}".format(state, current_action))
if current_action:
result = lark_module.execute(input_text)
if result:
dispatcher.utter_message(text=result, json_message={"data": ["step2"]},
elements=[{"data": ["step2"]}])
else:
dispatcher.utter_message(text="Bye", json_message={"data": ["step3"]})
else:
dispatcher.utter_message(text="Where are you from ?", json_message={"data": ["step3"]})
self.action_state[sender_id] = "get_start"
return []
| [((140, 21, 140, 52), 'lark_module.execute', 'lark_module.execute', ({(140, 41, 140, 51): 'input_text'}, {}), '(input_text)', False, 'import lark_module\n'), ((104, 25, 104, 53), 're.match', 're.match', ({(104, 34, 104, 40): '"""\\\\d+"""', (104, 42, 104, 52): 'input_text'}, {}), "('\\\\d+', input_text)", False, 'import re\n'), ((112, 25, 112, 53), 're.match', 're.match', ({(112, 34, 112, 40): '"""\\\\d+"""', (112, 42, 112, 52): 'input_text'}, {}), "('\\\\d+', input_text)", False, 'import re\n')] |
plympton/newsdiffs | parsers/politico.py | 2a055850bda850b9b6c28c989512d4e4b3e9b64e | from baseparser import BaseParser, grab_url, logger
# Different versions of BeautifulSoup have different properties.
# Some work with one site, some with another.
# This is BeautifulSoup 3.2.
from BeautifulSoup import BeautifulSoup
# This is BeautifulSoup 4
import bs4
class PoliticoParser(BaseParser):
domains = ['www.politico.com']
feeder_pat = '^http://www.politico.com/(news/stories|story)/'
feeder_pages = ['http://www.politico.com/']
feeder_bs = bs4.BeautifulSoup
def _parse(self, html):
soup = bs4.BeautifulSoup(html)
print_link = soup.findAll('a', text='Print')[0].get('href')
html2 = grab_url(print_link)
logger.debug('got html 2')
# Now we have to switch back to bs3. Hilarious.
# and the labeled encoding is wrong, so force utf-8.
soup = BeautifulSoup(html2, convertEntities=BeautifulSoup.HTML_ENTITIES,
fromEncoding='utf-8')
self.meta = soup.findAll('meta')
p_tags = soup.findAll('p')[1:]
real_p_tags = [p for p in p_tags if
not p.findAll(attrs={'class':"twitter-follow-button"})]
self.title = soup.find('strong').getText()
entity = soup.find('span', attrs={'class':'author'})
children = list(entity.childGenerator())
try:
self.byline = 'By ' + children[1].getText()
except IndexError:
self.byline = ''
self.date = children[-1].strip()
self.body = '\n'+'\n\n'.join([p.getText() for p in real_p_tags])
| [((20, 15, 20, 38), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', ({(20, 33, 20, 37): 'html'}, {}), '(html)', False, 'import bs4\n'), ((22, 16, 22, 36), 'baseparser.grab_url', 'grab_url', ({(22, 25, 22, 35): 'print_link'}, {}), '(print_link)', False, 'from baseparser import BaseParser, grab_url, logger\n'), ((23, 8, 23, 34), 'baseparser.logger.debug', 'logger.debug', ({(23, 21, 23, 33): '"""got html 2"""'}, {}), "('got html 2')", False, 'from baseparser import BaseParser, grab_url, logger\n'), ((26, 15, 27, 50), 'BeautifulSoup.BeautifulSoup', 'BeautifulSoup', (), '', False, 'from BeautifulSoup import BeautifulSoup\n')] |
fairhopeweb/dolt | integration-tests/bats/server_multiclient_test.py | 276b85b7b1287f883640ef3fcacb0bdb112749b2 | import os
import sys
from queue import Queue
from threading import Thread
from helper.pytest import DoltConnection
# Utility functions
def print_err(e):
print(e, file=sys.stderr)
def query(dc, query_str):
return dc.query(query_str, False)
def query_with_expected_error(dc, non_error_msg , query_str):
try:
dc.query(query_str, False)
raise Exception(non_error_msg)
except:
pass
def row(pk, c1, c2):
return {"pk":str(pk),"c1":str(c1),"c2":str(c2)}
UPDATE_BRANCH_FAIL_MSG = "Failed to update branch"
def commit_and_update_branch(dc, commit_message, expected_hashes, branch_name):
expected_hash = "("
for i, eh in enumerate(expected_hashes):
if i != 0:
expected_hash += " or "
expected_hash += "hash = %s" % eh
expected_hash += ")"
query_str = 'UPDATE dolt_branches SET hash = Commit("-m", "%s") WHERE name = "%s" AND %s' % (commit_message, branch_name, expected_hash)
_, row_count = query(dc, query_str)
if row_count != 1:
raise Exception(UPDATE_BRANCH_FAIL_MSG)
query(dc, 'SET @@repo1_head=HASHOF("%s");' % branch_name)
def query_and_test_results(dc, query_str, expected):
results, _ = query(dc, query_str)
if results != expected:
raise Exception("Unexpected results for query:\n\t%s\nExpected:\n\t%s\nActual:\n\t%s" % (query_str, str(), str(results)))
def resolve_theirs(dc):
query_str = "REPLACE INTO test (pk, c1, c2) SELECT their_pk, their_c1, their_c2 FROM dolt_conflicts_test WHERE their_pk IS NOT NULL;"
query(dc, query_str)
query_str = """DELETE FROM test WHERE pk in (
SELECT base_pk FROM dolt_conflicts_test WHERE their_pk IS NULL
);"""
query(dc, query_str)
query(dc, "DELETE FROM dolt_conflicts_test")
def create_branch(dc, branch_name):
query_str = 'INSERT INTO dolt_branches (name, hash) VALUES ("%s", @@repo1_head);' % branch_name
_, row_count = query(dc, query_str)
if row_count != 1:
raise Exception("Failed to create branch")
# work functions
def connect(dc):
dc.connect()
def create_tables(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, """
CREATE TABLE test (
pk INT NOT NULL,
c1 INT,
c2 INT,
PRIMARY KEY(pk));""")
commit_and_update_branch(dc, "Created tables", ["@@repo1_head"], "master")
query_and_test_results(dc, "SHOW TABLES;", [{"Table": "test"}])
def duplicate_table_create(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query_with_expected_error(dc, "Should have failed creating duplicate table", """
CREATE TABLE test (
pk INT NOT NULL,
c1 INT,
c2 INT,
PRIMARY KEY(pk));""")
def seed_master(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
_, row_count = query(dc, 'INSERT INTO test VALUES (0,0,0),(1,1,1),(2,2,2)')
if row_count != 3:
raise Exception("Failed to update rows")
commit_and_update_branch(dc, "Seeded initial data", ["@@repo1_head"], "master")
expected = [row(0,0,0), row(1,1,1), row(2,2,2)]
query_and_test_results(dc, "SELECT pk, c1, c2 FROM test ORDER BY pk", expected)
def modify_pk0_on_master_and_commit(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, "UPDATE test SET c1=1 WHERE pk=0;")
commit_and_update_branch(dc, "set c1 to 1", ["@@repo1_head"], "master")
def modify_pk0_on_master_no_commit(dc):
query(dc, 'SET @@repo1_head=HASHOF("master");')
query(dc, "UPDATE test SET c1=2 WHERE pk=0")
def fail_to_commit(dc):
try:
commit_and_update_branch(dc, "Created tables", ["@@repo1_head"], "master")
raise Exception("Failed to fail commit")
except Exception as e:
if str(e) != UPDATE_BRANCH_FAIL_MSG:
raise e
def commit_to_feature(dc):
create_branch(dc, "feature")
commit_and_update_branch(dc, "set c1 to 2", ["@@repo1_head"], "feature")
def merge_resolve_commit(dc):
query(dc, 'SET @@repo1_head=Merge("master");')
query_and_test_results(dc, "SELECT * from dolt_conflicts;", [{"table": "test", "num_conflicts": "1"}])
resolve_theirs(dc)
expected = [row(0,1,0), row(1,1,1), row(2,2,2)]
query_and_test_results(dc, "SELECT pk, c1, c2 FROM test ORDER BY pk", expected)
commit_and_update_branch(dc, "resolved conflicts", ['HASHOF("HEAD^1")', 'HASHOF("HEAD^2")'], "master")
# test script
MAX_SIMULTANEOUS_CONNECTIONS = 2
PORT_STR = sys.argv[1]
CONNECTIONS = [None]*MAX_SIMULTANEOUS_CONNECTIONS
for i in range(MAX_SIMULTANEOUS_CONNECTIONS):
CONNECTIONS[i] = DoltConnection(port=int(PORT_STR), database="repo1", user='dolt', auto_commit=False)
WORK_QUEUE = Queue()
# work item run by workers
class WorkItem(object):
def __init__(self, dc, *work_funcs):
self.dc = dc
self.work_funcs = work_funcs
self.exception = None
# worker thread function
def worker():
while True:
try:
item = WORK_QUEUE.get()
for work_func in item.work_funcs:
work_func(item.dc)
WORK_QUEUE.task_done()
except Exception as e:
work_item.exception = e
WORK_QUEUE.task_done()
# start the worker threads
for i in range(MAX_SIMULTANEOUS_CONNECTIONS):
t = Thread(target=worker)
t.daemon = True
t.start()
# This defines the actual test script. Each stage in the script has a list of work items. Each work item
# in a stage should have a different connection associated with it. Each connections work is done in parallel
# each of the work functions for a connection is executed in order.
work_item_stages = [
[WorkItem(CONNECTIONS[0], connect, create_tables)],
[WorkItem(CONNECTIONS[0], seed_master), WorkItem(CONNECTIONS[1], connect, duplicate_table_create)],
[WorkItem(CONNECTIONS[0], modify_pk0_on_master_and_commit), WorkItem(CONNECTIONS[1], modify_pk0_on_master_no_commit)],
[WorkItem(CONNECTIONS[1], fail_to_commit, commit_to_feature, merge_resolve_commit)]
]
# Loop through the work item stages executing each stage by sending the work items for the stage to the worker threads
# and then waiting for all of them to finish before moving on to the next one. Checks for an error after every stage.
for stage, work_items in enumerate(work_item_stages):
print("Running stage %d / %d" % (stage,len(work_item_stages)))
for work_item in work_items:
WORK_QUEUE.put(work_item)
WORK_QUEUE.join()
for work_item in work_items:
if work_item.exception is not None:
print_err(work_item.exception)
sys.exit(1)
| [((146, 13, 146, 20), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((172, 9, 172, 30), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n'), ((198, 12, 198, 23), 'sys.exit', 'sys.exit', ({(198, 21, 198, 22): '(1)'}, {}), '(1)', False, 'import sys\n')] |
marik348/python-messenger | messenger/client/messenger.py | 6c1916b0df439cd997cb6e9376221fe587c3f1c1 | from requests import get, post, exceptions
from datetime import datetime
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import QFont
from qtwidgets import PasswordEdit
from client_commands import (help_client, online, status, myself, reg, role, ban, unban)
from client_content import (get_warning_messages, get_client_commands, get_message_box_text, get_message_style)
from click_label import clickable
from client_ui import Ui_Messenger
from preferences import Preferences
from style_sheet import load_stylesheet
class Messenger(QtWidgets.QMainWindow, Ui_Messenger):
"""
The messenger object acts as the main object and is managed by client.
Shows UI and is responsible for UX.
UI is separated on 3 main parts, which have their indexes: 0 - Login form, 1 - Registration form, 2 - Chat.
Every 5 seconds requests server status.
Every second shows new messages, if user logged in.
Under main label "Python Messenger" there is server status, which displays whether server is working,
if yes, you can hover on it to see full server status.
In case of disconnection from server it'll show server-off message and navigate to login form.
It's possible to change server IP address in preferences menu.
:param translate: properly shows all content
:param password_line1: input line with icons to show/hide password entries on login form
:param password_line2: input line with icons to show/hide password entries on registration form
:param username: user nickname string
:param password: user password string
:param last_message_time: last time of getting messages, defaults to 0
:param max_text_len: maximum text message length to send in chat, defaults to 250
:param server_IP: server IPv4 string
:param message_style: style for messages defined in :func:`get_message_style`
:param warning_messages: dict of warning messages defined in :func:`get_warning_messages`
:param message_box_text: dict of content for message box defined in :func:`get_message_box_text`
:param client_commands: list of dicts with client-side commands defined in :func:`get_client_commands`
:param run_client_command: dict, where key is the name of client command and value is the function of this command
:param server_commands: list of dicts with server-side commands defined in :func:`get_server_commands`
:param run_server_command: dict, where key is the name of server command and value is the function of this command
:param timer_get_messages: timer, which every second runs :func:`get_messages`
:param timer_get_status: timer, which every 5 seconds runs :func:`get_status`
"""
def __init__(self, parent=None):
"""Initialize messenger object."""
super().__init__(parent)
self.setupUi(self)
self.translate = QtCore.QCoreApplication.translate
self.password_line1 = PasswordEdit(True, self.login_page)
self.password_line2 = PasswordEdit(True, self.registration_page)
self.modify_password_lines()
# Connect buttons to the methods.
self.send_button.pressed.connect(self.send)
self.sign_up_button.pressed.connect(self.sign_up_user)
self.login_button.pressed.connect(self.login_user)
# Connect actions to the methods.
self.action_shortcuts.triggered.connect(self.show_shortcuts_box)
self.action_commands.triggered.connect(self.show_commands_box)
self.action_about.triggered.connect(self.show_about_box)
self.action_contacts.triggered.connect(self.show_contacts_box)
self.action_preferences.triggered.connect(self.open_preferences_window)
self.action_logout.triggered.connect(self.logout)
self.action_close.triggered.connect(self.close)
# Filter shortcuts and text overflow.
self.plain_text_edit.installEventFilter(self)
self.username = None
self.password = None
self.last_message_time = 0
self.max_text_len = 250
self.server_IP = '0.0.0.0:9000'
# Load client content.
self.message_style = get_message_style()
self.warning_messages = get_warning_messages()
self.message_box_text = get_message_box_text()
# Load commands.
self.client_commands = get_client_commands()
self.run_client_command = {'close': self.close,
'logout': self.logout,
'reload': self.reload}
self.server_commands = []
self.run_server_command = {}
self.timer_get_messages = QtCore.QTimer()
self.timer_get_messages.timeout.connect(self.get_messages)
self.timer_get_messages.start(1000)
self.timer_get_status = QtCore.QTimer()
self.timer_get_status.timeout.connect(self.get_status)
self.timer_get_status.start(5000)
clickable(self.go_to_sign_up).connect(self.go_to_registration_form)
clickable(self.go_to_login).connect(self.go_to_login_form)
self.get_status()
def eventFilter(self, obj, event):
"""
Filters Enter key press and message text length.
If Enter key pressed, sends user's message.
If length of message is above maximum, doesn't allow writing.
"""
if event.type() == QtCore.QEvent.KeyPress and obj is self.plain_text_edit:
text = self.plain_text_edit.toPlainText()
if event.key() == QtCore.Qt.Key_Return and self.plain_text_edit.hasFocus():
self.send()
return True
elif len(text) > self.max_text_len:
text = text[:self.max_text_len]
self.plain_text_edit.setPlainText(text)
cursor = self.plain_text_edit.textCursor()
cursor.setPosition(self.max_text_len)
self.plain_text_edit.setTextCursor(cursor)
return True
return super().eventFilter(obj, event)
def closeEvent(self, event):
"""
Shows question message box for acception or ignoring to close the messenger.
Asks user does he really wants to close the messenger, if yes,
than marks logout of user and closes the messenger.
Otherwise, ignores closing messenger event.
:param event: event to close the messenger
"""
reply = QMessageBox.question(self, 'Quit', self.message_box_text["close"],
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
# User closes the messenger and is logged in.
if reply == QMessageBox.Yes and self.stacked_widget.currentIndex() == 2:
try:
post(
f'http://{self.server_IP}/logout',
json={"username": self.username}, verify=False
)
except exceptions.RequestException as e:
raise SystemExit
event.accept()
# User closes the messenger and is logged out.
elif reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def logout(self):
"""
Shows question message box for acception or ignoring to log out from account.
Asks user does he really wants to log out, if yes,
than marks logout and navigates to login form.
Otherwise, ignores logout event.
"""
reply = QMessageBox.question(self, 'Logout', self.message_box_text["logout"],
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
try:
post(
f'http://{self.server_IP}/logout',
json={"username": self.username}, verify=False
)
except exceptions.RequestException as e:
self.show_server_off_box()
self.clear_user_data()
return
self.go_to_login_form()
self.clear_user_data()
self.action_logout.setEnabled(False)
self.action_commands.setEnabled(False)
self.action_preferences.setEnabled(True)
else:
return
def modify_password_lines(self):
"""Modifies and appears password lines."""
geometry = QtCore.QRect(60, 200, 291, 41)
font = QFont()
font.setPointSize(14)
self.password_line1.setGeometry(geometry)
self.password_line1.setFont(font)
self.password_line1.setEchoMode(QtWidgets.QLineEdit.Password)
self.password_line1.setObjectName("password_line1")
self.password_line1.setPlaceholderText(self.translate("Messenger", "Password"))
self.password_line2.setGeometry(geometry)
self.password_line2.setFont(font)
self.password_line2.setEchoMode(QtWidgets.QLineEdit.Password)
self.password_line2.setObjectName("password_line2")
self.password_line2.setPlaceholderText(self.translate("Messenger", "Enter Your Password"))
def open_preferences_window(self):
"""Opens settings window."""
settings = Preferences(self)
if settings.exec():
self.server_IP = settings.server_IP.text()
def clear_user_data(self):
"""Clears user data after logout."""
self.username = None
self.plain_text_edit.clear()
self.text_browser.clear()
self.last_message_time = 0
def reload(self):
"""Reloads all messages and deletes commands output."""
self.text_browser.clear()
self.last_message_time = 0
def go_to_registration_form(self):
"""Navigates to registration menu."""
self.stacked_widget.setCurrentIndex(1)
def go_to_login_form(self):
"""Navigates to login menu."""
self.stacked_widget.setCurrentIndex(0)
def go_to_chat(self):
"""Navigates to chat."""
self.get_server_commands()
self.stacked_widget.setCurrentIndex(2)
self.action_logout.setEnabled(True)
self.action_commands.setEnabled(True)
self.action_preferences.setEnabled(False)
self.plain_text_edit.setFocus()
self.clear_credentials()
def clear_credentials(self):
"""Clears login and password lines after log in or sign up."""
self.password_line1.clear()
self.login_line1.clear()
self.password_line2.clear()
self.login_line2.clear()
self.password = None
def show_about_box(self):
"""Shows message box with content about messenger."""
QMessageBox.information(self, 'About', self.message_box_text["about"])
def show_contacts_box(self):
"""Shows message box with contacts information."""
QMessageBox.information(self, 'Contacts', self.message_box_text["contacts"])
def show_server_off_box(self):
"""Shows message box about server off information."""
QMessageBox.critical(self, 'Opsss...', self.message_box_text["server_is_off"])
self.go_to_login_form()
def show_shortcuts_box(self):
"""Shows message box with shortcuts."""
QMessageBox.information(self, 'Shortcuts', self.message_box_text["shortcuts"])
def show_commands_box(self):
"""Shows message box with available commands."""
output = help_client(self.client_commands, self.server_commands, [])
output = output.replace('=', '')
QMessageBox.information(self, 'Commands', output)
def sign_up_user(self):
"""
Registers user.
Verifies correctness of login and password input.
Sends request to sign up user.
"""
# Clear registration form.
self.login_error2.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.password_error2.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.login_line2.setStyleSheet("border: 1px solid #B8B5B2")
self.password_line2.setStyleSheet("border: 1px solid #B8B5B2")
self.username = self.login_line2.text()
self.password = self.password_line2.text()
# Check that form isn't empty.
if not self.username:
if not self.password:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.password_error2.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.login_line2.setStyleSheet("border: 1px solid red")
self.password_line2.setStyleSheet("border: 1px solid red")
return
else:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.login_line2.setStyleSheet("border: 1px solid red")
return
else:
if not self.password:
self.password_error2.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.password_line2.setStyleSheet("border: 1px solid red")
return
if not self.username.isalnum():
self.login_error2.setText(self.translate("Messenger", self.warning_messages['not_alphanumeric']))
self.login_error2.adjustSize()
self.login_line2.setStyleSheet("border: 1px solid red")
return
try:
response = post(
f'http://{self.server_IP}/sign_up',
auth=(self.username, self.password),
verify=False
)
except exceptions.RequestException as e:
self.show_server_off_box()
self.clear_credentials()
return
# Process bad request.
if response.json()['login_out_of_range']:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['login_out_of_range']))
self.login_error2.adjustSize()
self.login_line2.setStyleSheet("border: 1px solid red")
return
elif response.json()['password_out_of_range']:
self.password_error2.setText(self.translate("Messenger", self.warning_messages['password_out_of_range']))
self.password_error2.adjustSize()
self.password_line2.setStyleSheet("border: 1px solid red")
return
elif not response.json()['ok']:
self.login_error2.setText(self.translate("Messenger", self.warning_messages['registered']))
self.login_error2.adjustSize()
self.login_line2.setStyleSheet("border: 1px solid red")
return
self.go_to_chat()
def login_user(self):
"""
Allows user to log in.
Verifies correctness of login and password input.
Sends request to authenticate user.
"""
# Clear login form.
self.login_error1.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.password_error1.setText(self.translate("Messenger", self.warning_messages['empty_str']))
self.login_line1.setStyleSheet("border: 1px solid #B8B5B2")
self.password_line1.setStyleSheet("border: 1px solid #B8B5B2")
self.username = self.login_line1.text()
self.password = self.password_line1.text()
# Check that form isn't empty.
if not self.username:
if not self.password:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.password_error1.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.login_line1.setStyleSheet("border: 1px solid red")
self.password_line1.setStyleSheet("border: 1px solid red")
return
else:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['login_required']))
self.login_line1.setStyleSheet("border: 1px solid red")
return
else:
if not self.password:
self.password_error1.setText(self.translate("Messenger", self.warning_messages['password_required']))
self.password_line1.setStyleSheet("border: 1px solid red")
return
try:
response = post(
f'http://{self.server_IP}/auth',
auth=(self.username, self.password),
verify=False
)
except exceptions.RequestException as e:
self.show_server_off_box()
self.clear_credentials()
return
# Process bad request.
if not response.json()['exist']:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['invalid_login']))
self.login_line1.setStyleSheet("border: 1px solid red")
return
if not response.json()['match']:
self.password_error1.setText(self.translate("Messenger", self.warning_messages['invalid_password']))
self.password_line1.setStyleSheet("border: 1px solid red")
return
if response.json()['banned']:
self.login_error1.setText(self.translate("Messenger", self.warning_messages['banned']))
self.login_line1.setStyleSheet("border: 1px solid red")
return
self.go_to_chat()
def get_server_commands(self):
"""Sends request to get available server-side commands for user."""
try:
response = post(
f'http://{self.server_IP}/command',
json={"username": self.username, "command": 'help'}, verify=False
)
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
if not response.json()['ok']:
self.show_text(response.json()['output'] + "<br>")
self.plain_text_edit.clear()
return
self.server_commands = response.json()['output']
# Connect command name with function.
for cmd in self.server_commands:
if cmd['name'] != 'help': self.run_server_command[f"{cmd['name']}"] = globals()[cmd['name']]
def send(self):
"""Separates and directs messages & commands to relevant function."""
self.plain_text_edit.setFocus()
text = self.plain_text_edit.toPlainText()
text = text.strip()
# Validate text don't execute HTML.
text = text.replace('</', '')
text = text.replace('<', '')
text = text.replace('>', '')
if len(text) > self.max_text_len:
text = text[:self.max_text_len]
if not text:
return
elif text.startswith('/'):
self.send_command(text[1:])
else:
self.send_message(text)
def send_message(self, text):
"""
Stores message on the server.
:param text: text of message
"""
try:
post(
f'http://{self.server_IP}/send_message',
json={"username": self.username, "text": text},
verify=False
)
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
self.plain_text_edit.clear()
self.plain_text_edit.repaint()
def send_command(self, cmd_string):
"""
Executes command.
If it's client-side command, executes directly from client.
If it's server-side command, sends command to execute
on the server and processes the output.
:param cmd_string: command with parameters to execute
"""
command = cmd_string.split()[0]
args = cmd_string.split()[1:] if len(cmd_string) > 1 else None
# Run client-side command.
if command in [cmd['name'] for cmd in self.client_commands]:
self.run_client_command.get(command)()
self.plain_text_edit.clear()
return
# Invalid command name.
elif command not in [cmd['name'] for cmd in self.server_commands]:
self.show_text(f"<b>Error:</b> Command '/{command}' not found.<br>"
f"Try '/help' to list all available commands :)<br>")
self.plain_text_edit.clear()
return
# Process 'help' command.
elif command == 'help':
output = help_client(self.client_commands, self.server_commands, args)
self.show_text(output)
self.plain_text_edit.clear()
return
try:
response = post(
f'http://{self.server_IP}/command',
json={"username": self.username, "command": cmd_string}, verify=False
)
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
if not response.json()['ok']:
self.show_text("<b>Error:</b> " + response.json()['output'] + "<br>")
self.plain_text_edit.clear()
return
# Assign command function & run it with output from server.
run_command = self.run_server_command.get(command)
output = run_command(response.json()['output'], args)
self.show_text(output)
self.plain_text_edit.clear()
self.plain_text_edit.repaint()
def get_messages(self):
"""Sends request to get new messages and appears them in style."""
if not self.stacked_widget.currentIndex() == 2:
return
try:
response = get(
f'http://{self.server_IP}/get_messages',
params={'after': self.last_message_time},
verify=False
)
data = response.json()
except exceptions.RequestException as e:
self.clear_user_data()
self.show_server_off_box()
return
# Generate message.
for message in data['messages']:
# float -> datetime.
beauty_time = datetime.fromtimestamp(message['time'])
beauty_time = beauty_time.strftime('%d/%m %H:%M:%S')
# User will see his messages from the right side.
if message['username'] == self.username:
self.show_text(self.message_style['begin'] + beauty_time + ' ' + message['username']
+ self.message_style['middle'] + message['text'] + self.message_style['end'])
self.last_message_time = message['time']
else:
self.show_text(message['username'] + ' ' + beauty_time)
self.show_text(message['text'] + "<br>")
self.last_message_time = message['time']
def get_status(self):
"""Sends request to get server status."""
try:
response = get(
f'http://{self.server_IP}/status',
verify=False
)
status = response.json()
# Server is off.
except exceptions.RequestException as e:
self.server_status.setText(self.translate("Messenger", '<p style="font-size:12px">'
'<img src="images/server-is-off.png"> Offline</p>'))
tool_tip = f"Can't connect to the server<br>" \
f"Maybe server isn't run or you've entered an invalid IP address in Preferences"
self.server_status.setToolTip(tool_tip)
return
# Server is on.
self.server_status.setText(self.translate("Messenger", '<p style="font-size:12px">'
'<img src="images/server-is-on.png"> Online</p>'))
tool_tip = f"Server is working<br>" \
f"Users online: {status['users_online']}<br>" \
f"Date and time: {status['time']}<br>" \
f"Registered users: {status['users_count']}<br>" \
f"Written messages: {status['messages_count']}"
self.server_status.setToolTip(tool_tip)
def show_text(self, text):
"""Shows given text in messenger chat."""
self.text_browser.append(text)
self.text_browser.repaint()
app = QtWidgets.QApplication([])
window = Messenger()
app.setStyleSheet(load_stylesheet())
window.show()
app.exec_()
| [((626, 6, 626, 32), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', ({(626, 29, 626, 31): '[]'}, {}), '([])', False, 'from PyQt5 import QtWidgets, QtCore\n'), ((628, 18, 628, 35), 'style_sheet.load_stylesheet', 'load_stylesheet', ({}, {}), '()', False, 'from style_sheet import load_stylesheet\n'), ((56, 30, 56, 65), 'qtwidgets.PasswordEdit', 'PasswordEdit', ({(56, 43, 56, 47): 'True', (56, 49, 56, 64): 'self.login_page'}, {}), '(True, self.login_page)', False, 'from qtwidgets import PasswordEdit\n'), ((57, 30, 57, 72), 'qtwidgets.PasswordEdit', 'PasswordEdit', ({(57, 43, 57, 47): 'True', (57, 49, 57, 71): 'self.registration_page'}, {}), '(True, self.registration_page)', False, 'from qtwidgets import PasswordEdit\n'), ((84, 29, 84, 48), 'client_content.get_message_style', 'get_message_style', ({}, {}), '()', False, 'from client_content import get_warning_messages, get_client_commands, get_message_box_text, get_message_style\n'), ((85, 32, 85, 54), 'client_content.get_warning_messages', 'get_warning_messages', ({}, {}), '()', False, 'from client_content import get_warning_messages, get_client_commands, get_message_box_text, get_message_style\n'), ((86, 32, 86, 54), 'client_content.get_message_box_text', 'get_message_box_text', ({}, {}), '()', False, 'from client_content import get_warning_messages, get_client_commands, get_message_box_text, get_message_style\n'), ((89, 31, 89, 52), 'client_content.get_client_commands', 'get_client_commands', ({}, {}), '()', False, 'from client_content import get_warning_messages, get_client_commands, get_message_box_text, get_message_style\n'), ((96, 34, 96, 49), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', ({}, {}), '()', False, 'from PyQt5 import QtWidgets, QtCore\n'), ((100, 32, 100, 47), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', ({}, {}), '()', False, 'from PyQt5 import QtWidgets, QtCore\n'), ((146, 16, 147, 87), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', ({(146, 37, 146, 41): 'self', (146, 43, 146, 49): '"""Quit"""', (146, 51, 146, 81): "self.message_box_text['close']", (147, 37, 147, 69): 'QMessageBox.Yes | QMessageBox.No', (147, 71, 147, 86): 'QMessageBox.Yes'}, {}), "(self, 'Quit', self.message_box_text['close'], \n QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)", False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((177, 16, 178, 87), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', ({(177, 37, 177, 41): 'self', (177, 43, 177, 51): '"""Logout"""', (177, 53, 177, 84): "self.message_box_text['logout']", (178, 37, 178, 69): 'QMessageBox.Yes | QMessageBox.No', (178, 71, 178, 86): 'QMessageBox.Yes'}, {}), "(self, 'Logout', self.message_box_text['logout'], \n QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)", False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((202, 19, 202, 49), 'PyQt5.QtCore.QRect', 'QtCore.QRect', ({(202, 32, 202, 34): '60', (202, 36, 202, 39): '200', (202, 41, 202, 44): '291', (202, 46, 202, 48): '41'}, {}), '(60, 200, 291, 41)', False, 'from PyQt5 import QtWidgets, QtCore\n'), ((203, 15, 203, 22), 'PyQt5.QtGui.QFont', 'QFont', ({}, {}), '()', False, 'from PyQt5.QtGui import QFont\n'), ((221, 19, 221, 36), 'preferences.Preferences', 'Preferences', ({(221, 31, 221, 35): 'self'}, {}), '(self)', False, 'from preferences import Preferences\n'), ((272, 8, 272, 78), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', ({(272, 32, 272, 36): 'self', (272, 38, 272, 45): '"""About"""', (272, 47, 272, 77): "self.message_box_text['about']"}, {}), "(self, 'About', self.message_box_text['about'])", False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((277, 8, 277, 84), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', ({(277, 32, 277, 36): 'self', (277, 38, 277, 48): '"""Contacts"""', (277, 50, 277, 83): "self.message_box_text['contacts']"}, {}), "(self, 'Contacts', self.message_box_text['contacts'])", False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((282, 8, 282, 86), 'PyQt5.QtWidgets.QMessageBox.critical', 'QMessageBox.critical', ({(282, 29, 282, 33): 'self', (282, 35, 282, 45): '"""Opsss..."""', (282, 47, 282, 85): "self.message_box_text['server_is_off']"}, {}), "(self, 'Opsss...', self.message_box_text['server_is_off'])", False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((288, 8, 288, 86), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', ({(288, 32, 288, 36): 'self', (288, 38, 288, 49): '"""Shortcuts"""', (288, 51, 288, 85): "self.message_box_text['shortcuts']"}, {}), "(self, 'Shortcuts', self.message_box_text['shortcuts'])", False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((293, 17, 293, 76), 'client_commands.help_client', 'help_client', ({(293, 29, 293, 49): 'self.client_commands', (293, 51, 293, 71): 'self.server_commands', (293, 73, 293, 75): '[]'}, {}), '(self.client_commands, self.server_commands, [])', False, 'from client_commands import help_client, online, status, myself, reg, role, ban, unban\n'), ((295, 8, 295, 57), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', ({(295, 32, 295, 36): 'self', (295, 38, 295, 48): '"""Commands"""', (295, 50, 295, 56): 'output'}, {}), "(self, 'Commands', output)", False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((339, 23, 343, 13), 'requests.post', 'post', (), '', False, 'from requests import get, post, exceptions\n'), ((404, 23, 408, 13), 'requests.post', 'post', (), '', False, 'from requests import get, post, exceptions\n'), ((434, 23, 437, 13), 'requests.post', 'post', (), '', False, 'from requests import get, post, exceptions\n'), ((485, 12, 489, 13), 'requests.post', 'post', (), '', False, 'from requests import get, post, exceptions\n'), ((533, 23, 536, 13), 'requests.post', 'post', (), '', False, 'from requests import get, post, exceptions\n'), ((562, 23, 566, 13), 'requests.get', 'get', (), '', False, 'from requests import get, post, exceptions\n'), ((576, 26, 576, 65), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', ({(576, 49, 576, 64): "message['time']"}, {}), "(message['time'])", False, 'from datetime import datetime\n'), ((594, 23, 597, 13), 'requests.get', 'get', (), '', False, 'from requests import get, post, exceptions\n'), ((104, 8, 104, 37), 'click_label.clickable', 'clickable', ({(104, 18, 104, 36): 'self.go_to_sign_up'}, {}), '(self.go_to_sign_up)', False, 'from click_label import clickable\n'), ((105, 8, 105, 35), 'click_label.clickable', 'clickable', ({(105, 18, 105, 34): 'self.go_to_login'}, {}), '(self.go_to_login)', False, 'from click_label import clickable\n'), ((152, 16, 155, 17), 'requests.post', 'post', (), '', False, 'from requests import get, post, exceptions\n'), ((182, 16, 185, 17), 'requests.post', 'post', (), '', False, 'from requests import get, post, exceptions\n'), ((527, 21, 527, 82), 'client_commands.help_client', 'help_client', ({(527, 33, 527, 53): 'self.client_commands', (527, 55, 527, 75): 'self.server_commands', (527, 77, 527, 81): 'args'}, {}), '(self.client_commands, self.server_commands, args)', False, 'from client_commands import help_client, online, status, myself, reg, role, ban, unban\n')] |
sainjusajan/django-oscar | oscar/lib/python2.7/site-packages/prompt_toolkit/utils.py | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | from __future__ import unicode_literals
import inspect
import os
import signal
import sys
import threading
import weakref
from wcwidth import wcwidth
from six.moves import range
__all__ = (
'Event',
'DummyContext',
'get_cwidth',
'suspend_to_background_supported',
'is_conemu_ansi',
'is_windows',
'in_main_thread',
'take_using_weights',
'test_callable_args',
)
class Event(object):
"""
Simple event to which event handlers can be attached. For instance::
class Cls:
def __init__(self):
# Define event. The first parameter is the sender.
self.event = Event(self)
obj = Cls()
def handler(sender):
pass
# Add event handler by using the += operator.
obj.event += handler
# Fire event.
obj.event()
"""
def __init__(self, sender, handler=None):
self.sender = sender
self._handlers = []
if handler is not None:
self += handler
def __call__(self):
" Fire event. "
for handler in self._handlers:
handler(self.sender)
def fire(self):
" Alias for just calling the event. "
self()
def __iadd__(self, handler):
"""
Add another handler to this callback.
(Handler should be a callable that takes exactly one parameter: the
sender object.)
"""
# Test handler.
assert callable(handler)
if not test_callable_args(handler, [None]):
raise TypeError("%r doesn't take exactly one argument." % handler)
# Add to list of event handlers.
self._handlers.append(handler)
return self
def __isub__(self, handler):
"""
Remove a handler from this callback.
"""
self._handlers.remove(handler)
return self
# Cache of signatures. Improves the performance of `test_callable_args`.
_signatures_cache = weakref.WeakKeyDictionary()
def test_callable_args(func, args):
"""
Return True when this function can be called with the given arguments.
"""
assert isinstance(args, (list, tuple))
signature = getattr(inspect, 'signature', None)
if signature is not None:
# For Python 3, use inspect.signature.
try:
sig = _signatures_cache[func]
except KeyError:
sig = signature(func)
_signatures_cache[func] = sig
try:
sig.bind(*args)
except TypeError:
return False
else:
return True
else:
# For older Python versions, fall back to using getargspec.
spec = inspect.getargspec(func)
# Drop the 'self'
def drop_self(spec):
args, varargs, varkw, defaults = spec
if args[0:1] == ['self']:
args = args[1:]
return inspect.ArgSpec(args, varargs, varkw, defaults)
spec = drop_self(spec)
# When taking *args, always return True.
if spec.varargs is not None:
return True
# Test whether the given amount of args is between the min and max
# accepted argument counts.
return len(spec.args) - len(spec.defaults or []) <= len(args) <= len(spec.args)
class DummyContext(object):
"""
(contextlib.nested is not available on Py3)
"""
def __enter__(self):
pass
def __exit__(self, *a):
pass
class _CharSizesCache(dict):
"""
Cache for wcwidth sizes.
"""
def __missing__(self, string):
# Note: We use the `max(0, ...` because some non printable control
# characters, like e.g. Ctrl-underscore get a -1 wcwidth value.
# It can be possible that these characters end up in the input
# text.
if len(string) == 1:
result = max(0, wcwidth(string))
else:
result = sum(max(0, wcwidth(c)) for c in string)
# Cache for short strings.
# (It's hard to tell what we can consider short...)
if len(string) < 256:
self[string] = result
return result
_CHAR_SIZES_CACHE = _CharSizesCache()
def get_cwidth(string):
"""
Return width of a string. Wrapper around ``wcwidth``.
"""
return _CHAR_SIZES_CACHE[string]
def suspend_to_background_supported():
"""
Returns `True` when the Python implementation supports
suspend-to-background. This is typically `False' on Windows systems.
"""
return hasattr(signal, 'SIGTSTP')
def is_windows():
"""
True when we are using Windows.
"""
return sys.platform.startswith('win') # E.g. 'win32', not 'darwin' or 'linux2'
def is_conemu_ansi():
"""
True when the ConEmu Windows console is used.
"""
return is_windows() and os.environ.get('ConEmuANSI', 'OFF') == 'ON'
def in_main_thread():
"""
True when the current thread is the main thread.
"""
return threading.current_thread().__class__.__name__ == '_MainThread'
def take_using_weights(items, weights):
"""
Generator that keeps yielding items from the items list, in proportion to
their weight. For instance::
# Getting the first 70 items from this generator should have yielded 10
# times A, 20 times B and 40 times C, all distributed equally..
take_using_weights(['A', 'B', 'C'], [5, 10, 20])
:param items: List of items to take from.
:param weights: Integers representing the weight. (Numbers have to be
integers, not floats.)
"""
assert isinstance(items, list)
assert isinstance(weights, list)
assert all(isinstance(i, int) for i in weights)
assert len(items) == len(weights)
assert len(items) > 0
already_taken = [0 for i in items]
item_count = len(items)
max_weight = max(weights)
i = 0
while True:
# Each iteration of this loop, we fill up until by (total_weight/max_weight).
adding = True
while adding:
adding = False
for item_i, item, weight in zip(range(item_count), items, weights):
if already_taken[item_i] < i * weight / float(max_weight):
yield item
already_taken[item_i] += 1
adding = True
i += 1
| [((86, 20, 86, 47), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ({}, {}), '()', False, 'import weakref\n'), ((187, 11, 187, 41), 'sys.platform.startswith', 'sys.platform.startswith', ({(187, 35, 187, 40): '"""win"""'}, {}), "('win')", False, 'import sys\n'), ((112, 15, 112, 39), 'inspect.getargspec', 'inspect.getargspec', ({(112, 34, 112, 38): 'func'}, {}), '(func)', False, 'import inspect\n'), ((119, 19, 119, 66), 'inspect.ArgSpec', 'inspect.ArgSpec', ({(119, 35, 119, 39): 'args', (119, 41, 119, 48): 'varargs', (119, 50, 119, 55): 'varkw', (119, 57, 119, 65): 'defaults'}, {}), '(args, varargs, varkw, defaults)', False, 'import inspect\n'), ((194, 28, 194, 63), 'os.environ.get', 'os.environ.get', ({(194, 43, 194, 55): '"""ConEmuANSI"""', (194, 57, 194, 62): '"""OFF"""'}, {}), "('ConEmuANSI', 'OFF')", False, 'import os\n'), ((153, 28, 153, 43), 'wcwidth.wcwidth', 'wcwidth', ({(153, 36, 153, 42): 'string'}, {}), '(string)', False, 'from wcwidth import wcwidth\n'), ((201, 11, 201, 37), 'threading.current_thread', 'threading.current_thread', ({}, {}), '()', False, 'import threading\n'), ((234, 44, 234, 61), 'six.moves.range', 'range', ({(234, 50, 234, 60): 'item_count'}, {}), '(item_count)', False, 'from six.moves import range\n'), ((155, 32, 155, 42), 'wcwidth.wcwidth', 'wcwidth', ({(155, 40, 155, 41): 'c'}, {}), '(c)', False, 'from wcwidth import wcwidth\n')] |
hirnimeshrampuresoftware/py-lmdb | lmdb/cffi.py | 9aa7560f8e1a89b437fb3fed7ea36f5888b7a963 | #
# Copyright 2013 The py-lmdb authors, all rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted only as authorized by the OpenLDAP
# Public License.
#
# A copy of this license is available in the file LICENSE in the
# top-level directory of the distribution or, alternatively, at
# <http://www.OpenLDAP.org/license.html>.
#
# OpenLDAP is a registered trademark of the OpenLDAP Foundation.
#
# Individual files and/or contributed packages may be copyright by
# other parties and/or subject to additional restrictions.
#
# This work also contains materials derived from public sources.
#
# Additional information about OpenLDAP can be obtained at
# <http://www.openldap.org/>.
#
"""
CPython/CFFI wrapper for OpenLDAP's "Lightning" MDB database.
Please see https://lmdb.readthedocs.io/
"""
from __future__ import absolute_import
from __future__ import with_statement
import errno
import inspect
import os
import sys
import threading
is_win32 = sys.platform == 'win32'
if is_win32:
import msvcrt
try:
import __builtin__
except ImportError:
import builtins as __builtin__ # type: ignore
import lmdb
try:
from lmdb import _config
except ImportError:
_config = None # type: ignore
__all__ = [
'Cursor',
'Environment',
'Transaction',
'_Database',
'enable_drop_gil',
'version',
]
__all__ += [
'BadDbiError',
'BadRslotError',
'BadTxnError',
'BadValsizeError',
'CorruptedError',
'CursorFullError',
'DbsFullError',
'DiskError',
'Error',
'IncompatibleError',
'InvalidError',
'InvalidParameterError',
'KeyExistsError',
'LockError',
'MapFullError',
'MapResizedError',
'MemoryError',
'NotFoundError',
'PageFullError',
'PageNotFoundError',
'PanicError',
'ReadersFullError',
'ReadonlyError',
'TlsFullError',
'TxnFullError',
'VersionMismatchError',
]
# Handle moronic Python 3 mess.
UnicodeType = getattr(__builtin__, 'unicode', str)
BytesType = getattr(__builtin__, 'bytes', str)
O_0755 = int('0755', 8)
O_0111 = int('0111', 8)
EMPTY_BYTES = UnicodeType().encode()
# Used to track context across CFFI callbacks.
_callbacks = threading.local()
_CFFI_CDEF = '''
typedef int mode_t;
typedef ... MDB_env;
typedef struct MDB_txn MDB_txn;
typedef struct MDB_cursor MDB_cursor;
typedef unsigned int MDB_dbi;
enum MDB_cursor_op {
MDB_FIRST,
MDB_FIRST_DUP,
MDB_GET_BOTH,
MDB_GET_BOTH_RANGE,
MDB_GET_CURRENT,
MDB_GET_MULTIPLE,
MDB_LAST,
MDB_LAST_DUP,
MDB_NEXT,
MDB_NEXT_DUP,
MDB_NEXT_MULTIPLE,
MDB_NEXT_NODUP,
MDB_PREV,
MDB_PREV_DUP,
MDB_PREV_NODUP,
MDB_SET,
MDB_SET_KEY,
MDB_SET_RANGE,
...
};
typedef enum MDB_cursor_op MDB_cursor_op;
struct MDB_val {
size_t mv_size;
void *mv_data;
...;
};
typedef struct MDB_val MDB_val;
struct MDB_stat {
unsigned int ms_psize;
unsigned int ms_depth;
size_t ms_branch_pages;
size_t ms_leaf_pages;
size_t ms_overflow_pages;
size_t ms_entries;
...;
};
typedef struct MDB_stat MDB_stat;
struct MDB_envinfo {
void *me_mapaddr;
size_t me_mapsize;
size_t me_last_pgno;
size_t me_last_txnid;
unsigned int me_maxreaders;
unsigned int me_numreaders;
...;
};
typedef struct MDB_envinfo MDB_envinfo;
typedef int (*MDB_cmp_func)(const MDB_val *a, const MDB_val *b);
typedef void (*MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr,
void *relctx);
char *mdb_strerror(int err);
int mdb_env_create(MDB_env **env);
int mdb_env_open(MDB_env *env, const char *path, unsigned int flags,
mode_t mode);
int mdb_env_copy2(MDB_env *env, const char *path, int flags);
int mdb_env_copyfd2(MDB_env *env, int fd, int flags);
int mdb_env_stat(MDB_env *env, MDB_stat *stat);
int mdb_env_info(MDB_env *env, MDB_envinfo *stat);
int mdb_env_get_maxkeysize(MDB_env *env);
int mdb_env_sync(MDB_env *env, int force);
void mdb_env_close(MDB_env *env);
int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff);
int mdb_env_get_flags(MDB_env *env, unsigned int *flags);
int mdb_env_get_path(MDB_env *env, const char **path);
int mdb_env_set_mapsize(MDB_env *env, size_t size);
int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers);
int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers);
int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs);
int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags,
MDB_txn **txn);
int mdb_txn_commit(MDB_txn *txn);
void mdb_txn_reset(MDB_txn *txn);
int mdb_txn_renew(MDB_txn *txn);
void mdb_txn_abort(MDB_txn *txn);
size_t mdb_txn_id(MDB_txn *txn);
int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags,
MDB_dbi *dbi);
int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat);
int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del_);
int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data);
int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor);
void mdb_cursor_close(MDB_cursor *cursor);
int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags);
int mdb_cursor_count(MDB_cursor *cursor, size_t *countp);
int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val*data, int op);
typedef int (MDB_msg_func)(const char *msg, void *ctx);
int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx);
int mdb_reader_check(MDB_env *env, int *dead);
int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags);
#define MDB_VERSION_MAJOR ...
#define MDB_VERSION_MINOR ...
#define MDB_VERSION_PATCH ...
#define EACCES ...
#define EAGAIN ...
#define EINVAL ...
#define ENOMEM ...
#define ENOSPC ...
#define MDB_BAD_RSLOT ...
#define MDB_BAD_DBI ...
#define MDB_BAD_TXN ...
#define MDB_BAD_VALSIZE ...
#define MDB_CORRUPTED ...
#define MDB_CURSOR_FULL ...
#define MDB_DBS_FULL ...
#define MDB_INCOMPATIBLE ...
#define MDB_INVALID ...
#define MDB_KEYEXIST ...
#define MDB_MAP_FULL ...
#define MDB_MAP_RESIZED ...
#define MDB_NOTFOUND ...
#define MDB_PAGE_FULL ...
#define MDB_PAGE_NOTFOUND ...
#define MDB_PANIC ...
#define MDB_READERS_FULL ...
#define MDB_TLS_FULL ...
#define MDB_TXN_FULL ...
#define MDB_VERSION_MISMATCH ...
#define MDB_APPEND ...
#define MDB_APPENDDUP ...
#define MDB_CP_COMPACT ...
#define MDB_CREATE ...
#define MDB_DUPFIXED ...
#define MDB_DUPSORT ...
#define MDB_INTEGERDUP ...
#define MDB_INTEGERKEY ...
#define MDB_MAPASYNC ...
#define MDB_NODUPDATA ...
#define MDB_NOLOCK ...
#define MDB_NOMEMINIT ...
#define MDB_NOMETASYNC ...
#define MDB_NOOVERWRITE ...
#define MDB_NORDAHEAD ...
#define MDB_NOSUBDIR ...
#define MDB_NOSYNC ...
#define MDB_NOTLS ...
#define MDB_RDONLY ...
#define MDB_REVERSEKEY ...
#define MDB_WRITEMAP ...
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
// CFFI will use PyString_AS_STRING when passed as an argument.
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
char *val_s, size_t vallen);
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
char *val_s, size_t vallen,
unsigned int flags);
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
MDB_val *val_out);
static int pymdb_cursor_get(MDB_cursor *cursor,
char *key_s, size_t key_len,
char *data_s, size_t data_len,
MDB_val *key, MDB_val *data, int op);
static int pymdb_cursor_put(MDB_cursor *cursor,
char *key_s, size_t keylen,
char *val_s, size_t vallen, int flags);
// Prefaults a range
static void preload(int rc, void *x, size_t size);
'''
_CFFI_CDEF_PATCHED = '''
int mdb_env_copy3(MDB_env *env, const char *path, unsigned int flags, MDB_txn *txn);
int mdb_env_copyfd3(MDB_env *env, int fd, unsigned int flags, MDB_txn *txn);
'''
_CFFI_VERIFY = '''
#include <sys/stat.h>
#include "lmdb.h"
#include "preload.h"
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
// CFFI will use PyString_AS_STRING when passed as an argument.
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
MDB_val *val_out)
{
MDB_val key = {keylen, key_s};
int rc = mdb_get(txn, dbi, &key, val_out);
return rc;
}
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
char *val_s, size_t vallen, unsigned int flags)
{
MDB_val key = {keylen, key_s};
MDB_val val = {vallen, val_s};
return mdb_put(txn, dbi, &key, &val, flags);
}
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
char *val_s, size_t vallen)
{
MDB_val key = {keylen, key_s};
MDB_val val = {vallen, val_s};
MDB_val *valptr;
if(vallen == 0) {
valptr = NULL;
} else {
valptr = &val;
}
return mdb_del(txn, dbi, &key, valptr);
}
static int pymdb_cursor_get(MDB_cursor *cursor,
char *key_s, size_t key_len,
char *data_s, size_t data_len,
MDB_val *key, MDB_val *data, int op)
{
MDB_val tmp_key = {key_len, key_s};
MDB_val tmp_data = {data_len, data_s};
int rc = mdb_cursor_get(cursor, &tmp_key, &tmp_data, op);
if(! rc) {
*key = tmp_key;
*data = tmp_data;
}
return rc;
}
static int pymdb_cursor_put(MDB_cursor *cursor, char *key_s, size_t keylen,
char *val_s, size_t vallen, int flags)
{
MDB_val tmpkey = {keylen, key_s};
MDB_val tmpval = {vallen, val_s};
return mdb_cursor_put(cursor, &tmpkey, &tmpval, flags);
}
'''
if not lmdb._reading_docs():
import cffi
# Try to use distutils-bundled CFFI configuration to avoid a recompile and
# potential compile errors during first module import.
_config_vars = _config.CONFIG if _config else {
'extra_compile_args': ['-w'],
'extra_sources': ['lib/mdb.c', 'lib/midl.c'],
'extra_include_dirs': ['lib'],
'extra_library_dirs': [],
'libraries': []
}
_have_patched_lmdb = '-DHAVE_PATCHED_LMDB=1' in _config.CONFIG['extra_compile_args'] # type: ignore
if _have_patched_lmdb:
_CFFI_CDEF += _CFFI_CDEF_PATCHED
_ffi = cffi.FFI()
_ffi.cdef(_CFFI_CDEF)
_lib = _ffi.verify(_CFFI_VERIFY,
modulename='lmdb_cffi',
ext_package='lmdb',
sources=_config_vars['extra_sources'],
extra_compile_args=_config_vars['extra_compile_args'],
include_dirs=_config_vars['extra_include_dirs'],
libraries=_config_vars['libraries'],
library_dirs=_config_vars['extra_library_dirs'])
@_ffi.callback("int(char *, void *)")
def _msg_func(s, _):
"""mdb_msg_func() callback. Appends `s` to _callbacks.msg_func list.
"""
_callbacks.msg_func.append(_ffi.string(s).decode())
return 0
class Error(Exception):
"""Raised when an LMDB-related error occurs, and no more specific
:py:class:`lmdb.Error` subclass exists."""
def __init__(self, what, code=0):
self.what = what
self.code = code
self.reason = _ffi.string(_lib.mdb_strerror(code))
msg = what
if code:
msg = '%s: %s' % (what, self.reason)
hint = getattr(self, 'MDB_HINT', None)
if hint:
msg += ' (%s)' % (hint,)
Exception.__init__(self, msg)
class KeyExistsError(Error):
"""Key/data pair already exists."""
MDB_NAME = 'MDB_KEYEXIST'
class NotFoundError(Error):
"""No matching key/data pair found.
Normally py-lmdb indicates a missing key by returning ``None``, or a
user-supplied default value, however LMDB may return this error where
py-lmdb does not know to convert it into a non-exceptional return.
"""
MDB_NAME = 'MDB_NOTFOUND'
class PageNotFoundError(Error):
"""Request page not found."""
MDB_NAME = 'MDB_PAGE_NOTFOUND'
class CorruptedError(Error):
"""Located page was of the wrong type."""
MDB_NAME = 'MDB_CORRUPTED'
class PanicError(Error):
"""Update of meta page failed."""
MDB_NAME = 'MDB_PANIC'
class VersionMismatchError(Error):
"""Database environment version mismatch."""
MDB_NAME = 'MDB_VERSION_MISMATCH'
class InvalidError(Error):
"""File is not an MDB file."""
MDB_NAME = 'MDB_INVALID'
class MapFullError(Error):
"""Environment map_size= limit reached."""
MDB_NAME = 'MDB_MAP_FULL'
MDB_HINT = 'Please use a larger Environment(map_size=) parameter'
class DbsFullError(Error):
"""Environment max_dbs= limit reached."""
MDB_NAME = 'MDB_DBS_FULL'
MDB_HINT = 'Please use a larger Environment(max_dbs=) parameter'
class ReadersFullError(Error):
"""Environment max_readers= limit reached."""
MDB_NAME = 'MDB_READERS_FULL'
MDB_HINT = 'Please use a larger Environment(max_readers=) parameter'
class TlsFullError(Error):
"""Thread-local storage keys full - too many environments open."""
MDB_NAME = 'MDB_TLS_FULL'
class TxnFullError(Error):
"""Transaciton has too many dirty pages - transaction too big."""
MDB_NAME = 'MDB_TXN_FULL'
MDB_HINT = 'Please do less work within your transaction'
class CursorFullError(Error):
"""Internal error - cursor stack limit reached."""
MDB_NAME = 'MDB_CURSOR_FULL'
class PageFullError(Error):
"""Internal error - page has no more space."""
MDB_NAME = 'MDB_PAGE_FULL'
class MapResizedError(Error):
"""Database contents grew beyond environment map_size=."""
MDB_NAME = 'MDB_MAP_RESIZED'
class IncompatibleError(Error):
"""Operation and DB incompatible, or DB flags changed."""
MDB_NAME = 'MDB_INCOMPATIBLE'
class BadRslotError(Error):
"""Invalid reuse of reader locktable slot."""
MDB_NAME = 'MDB_BAD_RSLOT'
class BadDbiError(Error):
"""The specified DBI was changed unexpectedly."""
MDB_NAME = 'MDB_BAD_DBI'
class BadTxnError(Error):
"""Transaction cannot recover - it must be aborted."""
MDB_NAME = 'MDB_BAD_TXN'
class BadValsizeError(Error):
"""Too big key/data, key is empty, or wrong DUPFIXED size."""
MDB_NAME = 'MDB_BAD_VALSIZE'
class ReadonlyError(Error):
"""An attempt was made to modify a read-only database."""
MDB_NAME = 'EACCES'
class InvalidParameterError(Error):
"""An invalid parameter was specified."""
MDB_NAME = 'EINVAL'
class LockError(Error):
"""The environment was locked by another process."""
MDB_NAME = 'EAGAIN'
class MemoryError(Error):
"""Out of memory."""
MDB_NAME = 'ENOMEM'
class DiskError(Error):
"""No more disk space."""
MDB_NAME = 'ENOSPC'
# Prepare _error_map, a mapping of integer MDB_ERROR_CODE to exception class.
if not lmdb._reading_docs():
_error_map = {}
for obj in list(globals().values()):
if inspect.isclass(obj) and issubclass(obj, Error) and obj is not Error:
_error_map[getattr(_lib, obj.MDB_NAME)] = obj
del obj
def _error(what, rc):
"""Lookup and instantiate the correct exception class for the error code
`rc`, using :py:class:`Error` if no better class exists."""
return _error_map.get(rc, Error)(what, rc)
class Some_LMDB_Resource_That_Was_Deleted_Or_Closed(object):
"""We need this because CFFI on PyPy treats None as cffi.NULL, instead of
throwing an exception it feeds LMDB null pointers. That means simply
replacing native handles with None during _invalidate() will cause NULL
pointer dereferences. Instead use this class, and its weird name to cause a
TypeError, with a very obvious string in the exception text.
The only alternatives to this are inserting a check around every single use
of a native handle to ensure the handle is still valid prior to calling
LMDB, or doing no crash-safety checking at all.
"""
def __nonzero__(self):
return 0
def __bool__(self):
return False
def __repr__(self):
return "<This used to be a LMDB resource but it was deleted or closed>"
_invalid = Some_LMDB_Resource_That_Was_Deleted_Or_Closed()
def _mvbuf(mv):
"""Convert a MDB_val cdata to a CFFI buffer object."""
return _ffi.buffer(mv.mv_data, mv.mv_size)
def _mvstr(mv):
"""Convert a MDB_val cdata to Python bytes."""
return _ffi.buffer(mv.mv_data, mv.mv_size)[:]
def preload(mv):
_lib.preload(0, mv.mv_data, mv.mv_size)
def enable_drop_gil():
"""Deprecated."""
def version(subpatch=False):
"""
Return a tuple of integers `(major, minor, patch)` describing the LMDB
library version that the binding is linked against. The version of the
binding itself is available from ``lmdb.__version__``.
`subpatch`:
If true, returns a 4 integer tuple consisting of the same plus
an extra integer that represents any patches applied by py-lmdb
itself (0 representing no patches).
"""
if subpatch:
return (_lib.MDB_VERSION_MAJOR,
_lib.MDB_VERSION_MINOR,
_lib.MDB_VERSION_PATCH,
1 if _have_patched_lmdb else 0)
return (_lib.MDB_VERSION_MAJOR,
_lib.MDB_VERSION_MINOR,
_lib.MDB_VERSION_PATCH)
class Environment(object):
"""
Structure for a database environment. An environment may contain multiple
databases, all residing in the same shared-memory map and underlying disk
file.
To write to the environment a :py:class:`Transaction` must be created. One
simultaneous write transaction is allowed, however there is no limit on the
number of read transactions even when a write transaction exists.
This class is aliased to `lmdb.open`.
It is a serious error to have open the same LMDB file in the same process at
the same time. Failure to heed this may lead to data corruption and
interpreter crash.
Equivalent to `mdb_env_open()
<http://lmdb.tech/doc/group__mdb.html#ga1fe2740e25b1689dc412e7b9faadba1b>`_
`path`:
Location of directory (if `subdir=True`) or file prefix to store
the database.
`map_size`:
Maximum size database may grow to; used to size the memory mapping.
If database grows larger than ``map_size``, an exception will be
raised and the user must close and reopen :py:class:`Environment`.
On 64-bit there is no penalty for making this huge (say 1TB). Must
be <2GB on 32-bit.
.. note::
**The default map size is set low to encourage a crash**, so
users can figure out a good value before learning about this
option too late.
`subdir`:
If ``True``, `path` refers to a subdirectory to store the data and
lock files in, otherwise it refers to a filename prefix.
`readonly`:
If ``True``, disallow any write operations. Note the lock file is
still modified. If specified, the ``write`` flag to
:py:meth:`begin` or :py:class:`Transaction` is ignored.
`metasync`:
If ``False``, flush system buffers to disk only once per
transaction, omit the metadata flush. Defer that until the system
flushes files to disk, or next commit or :py:meth:`sync`.
This optimization maintains database integrity, but a system crash
may undo the last committed transaction. I.e. it preserves the ACI
(atomicity, consistency, isolation) but not D (durability) database
property.
`sync`:
If ``False``, don't flush system buffers to disk when committing a
transaction. This optimization means a system crash can corrupt the
database or lose the last transactions if buffers are not yet
flushed to disk.
The risk is governed by how often the system flushes dirty buffers
to disk and how often :py:meth:`sync` is called. However, if the
filesystem preserves write order and `writemap=False`, transactions
exhibit ACI (atomicity, consistency, isolation) properties and only
lose D (durability). I.e. database integrity is maintained, but a
system crash may undo the final transactions.
Note that `sync=False, writemap=True` leaves the system with no
hint for when to write transactions to disk, unless :py:meth:`sync`
is called. `map_async=True, writemap=True` may be preferable.
`mode`:
File creation mode.
`create`:
If ``False``, do not create the directory `path` if it is missing.
`readahead`:
If ``False``, LMDB will disable the OS filesystem readahead
mechanism, which may improve random read performance when a
database is larger than RAM.
`writemap`:
If ``True``, use a writeable memory map unless `readonly=True`.
This is faster and uses fewer mallocs, but loses protection from
application bugs like wild pointer writes and other bad updates
into the database. Incompatible with nested transactions.
Processes with and without `writemap` on the same environment do
not cooperate well.
`meminit`:
If ``False`` LMDB will not zero-initialize buffers prior to writing
them to disk. This improves performance but may cause old heap data
to be written saved in the unused portion of the buffer. Do not use
this option if your application manipulates confidential data (e.g.
plaintext passwords) in memory. This option is only meaningful when
`writemap=False`; new pages are always zero-initialized when
`writemap=True`.
`map_async`:
When ``writemap=True``, use asynchronous flushes to disk. As with
``sync=False``, a system crash can then corrupt the database or
lose the last transactions. Calling :py:meth:`sync` ensures
on-disk database integrity until next commit.
`max_readers`:
Maximum number of simultaneous read transactions. Can only be set
by the first process to open an environment, as it affects the size
of the lock file and shared memory area. Attempts to simultaneously
start more than this many *read* transactions will fail.
`max_dbs`:
Maximum number of databases available. If 0, assume environment
will be used as a single database.
`max_spare_txns`:
Read-only transactions to cache after becoming unused. Caching
transactions avoids two allocations, one lock and linear scan
of the shared environment per invocation of :py:meth:`begin`,
:py:class:`Transaction`, :py:meth:`get`, :py:meth:`gets`, or
:py:meth:`cursor`. Should match the process's maximum expected
concurrent transactions (e.g. thread count).
`lock`:
If ``False``, don't do any locking. If concurrent access is
anticipated, the caller must manage all concurrency itself. For
proper operation the caller must enforce single-writer semantics,
and must ensure that no readers are using old transactions while a
writer is active. The simplest approach is to use an exclusive lock
so that no readers may be active at all when a writer begins.
"""
def __init__(self, path, map_size=10485760, subdir=True,
readonly=False, metasync=True, sync=True, map_async=False,
mode=O_0755, create=True, readahead=True, writemap=False,
meminit=True, max_readers=126, max_dbs=0, max_spare_txns=1,
lock=True):
self._max_spare_txns = max_spare_txns
self._spare_txns = []
envpp = _ffi.new('MDB_env **')
rc = _lib.mdb_env_create(envpp)
if rc:
raise _error("mdb_env_create", rc)
self._env = envpp[0]
self._deps = set()
self._creating_db_in_readonly = False
self.set_mapsize(map_size)
rc = _lib.mdb_env_set_maxreaders(self._env, max_readers)
if rc:
raise _error("mdb_env_set_maxreaders", rc)
rc = _lib.mdb_env_set_maxdbs(self._env, max_dbs)
if rc:
raise _error("mdb_env_set_maxdbs", rc)
if create and subdir and not readonly:
try:
os.mkdir(path, mode)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
flags = _lib.MDB_NOTLS
if not subdir:
flags |= _lib.MDB_NOSUBDIR
if readonly:
flags |= _lib.MDB_RDONLY
self.readonly = readonly
if not metasync:
flags |= _lib.MDB_NOMETASYNC
if not sync:
flags |= _lib.MDB_NOSYNC
if map_async:
flags |= _lib.MDB_MAPASYNC
if not readahead:
flags |= _lib.MDB_NORDAHEAD
if writemap:
flags |= _lib.MDB_WRITEMAP
if not meminit:
flags |= _lib.MDB_NOMEMINIT
if not lock:
flags |= _lib.MDB_NOLOCK
if isinstance(path, UnicodeType):
path = path.encode(sys.getfilesystemencoding())
rc = _lib.mdb_env_open(self._env, path, flags, mode & ~O_0111)
if rc:
raise _error(path, rc)
with self.begin(db=object()) as txn:
self._db = _Database(
env=self,
txn=txn,
name=None,
reverse_key=False,
dupsort=False,
create=True,
integerkey=False,
integerdup=False,
dupfixed=False
)
self._dbs = {None: self._db}
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self.close()
def __del__(self):
self.close()
_env = None
_deps = None
_spare_txns = None
_dbs = None
def set_mapsize(self, map_size):
"""Change the maximum size of the map file. This function will fail if
any transactions are active in the current process.
`map_size`:
The new size in bytes.
Equivalent to `mdb_env_set_mapsize()
<http://lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>`_
Warning:
There's a data race in the underlying library that may cause
catastrophic loss of data if you use this method.
You are safe if one of the following are true:
* Only one process accessing a particular LMDB file ever calls
this method.
* You use locking external to this library to ensure that only one
process accessing the current LMDB file can be inside this function.
"""
rc = _lib.mdb_env_set_mapsize(self._env, map_size)
if rc:
raise _error("mdb_env_set_mapsize", rc)
def close(self):
"""Close the environment, invalidating any open iterators, cursors, and
transactions. Repeat calls to :py:meth:`close` have no effect.
Equivalent to `mdb_env_close()
<http://lmdb.tech/doc/group__mdb.html#ga4366c43ada8874588b6a62fbda2d1e95>`_
"""
if self._env:
if self._deps:
while self._deps:
self._deps.pop()._invalidate()
self._deps = None
if self._spare_txns:
while self._spare_txns:
_lib.mdb_txn_abort(self._spare_txns.pop())
self._spare_txns = None
if self._dbs:
self._dbs.clear()
self._dbs = None
self._db = None
_lib.mdb_env_close(self._env)
self._env = _invalid
def path(self):
"""Directory path or file name prefix where this environment is
stored.
Equivalent to `mdb_env_get_path()
<http://lmdb.tech/doc/group__mdb.html#gac699fdd8c4f8013577cb933fb6a757fe>`_
"""
path = _ffi.new('char **')
rc = _lib.mdb_env_get_path(self._env, path)
if rc:
raise _error("mdb_env_get_path", rc)
return _ffi.string(path[0]).decode(sys.getfilesystemencoding())
def copy(self, path, compact=False, txn=None):
"""Make a consistent copy of the environment in the given destination
directory.
`compact`:
If ``True``, perform compaction while copying: omit free pages and
sequentially renumber all pages in output. This option consumes
more CPU and runs more slowly than the default, but may produce a
smaller output database.
`txn`:
If provided, the backup will be taken from the database with
respect to that transaction, otherwise a temporary read-only
transaction will be created. Note: this parameter being non-None
is not available if the module was built with LMDB_PURE. Note:
this parameter may be set only if compact=True.
Equivalent to `mdb_env_copy2() or mdb_env_copy3()
<http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_
"""
flags = _lib.MDB_CP_COMPACT if compact else 0
if txn and not _have_patched_lmdb:
raise TypeError("Non-patched LMDB doesn't support transaction with env.copy")
if txn and not flags:
raise TypeError("txn argument only compatible with compact=True")
encoded = path.encode(sys.getfilesystemencoding())
if _have_patched_lmdb:
rc = _lib.mdb_env_copy3(self._env, encoded, flags, txn._txn if txn else _ffi.NULL)
if rc:
raise _error("mdb_env_copy3", rc)
else:
rc = _lib.mdb_env_copy2(self._env, encoded, flags)
if rc:
raise _error("mdb_env_copy2", rc)
def copyfd(self, fd, compact=False, txn=None):
"""Copy a consistent version of the environment to file descriptor
`fd`.
`compact`:
If ``True``, perform compaction while copying: omit free pages and
sequentially renumber all pages in output. This option consumes
more CPU and runs more slowly than the default, but may produce a
smaller output database.
`txn`:
If provided, the backup will be taken from the database with
respect to that transaction, otherwise a temporary read-only
transaction will be created. Note: this parameter being non-None
is not available if the module was built with LMDB_PURE.
Equivalent to `mdb_env_copyfd2() or mdb_env_copyfd3
<http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_
"""
if txn and not _have_patched_lmdb:
raise TypeError("Non-patched LMDB doesn't support transaction with env.copy")
if is_win32:
# Convert C library handle to kernel handle.
fd = msvcrt.get_osfhandle(fd)
flags = _lib.MDB_CP_COMPACT if compact else 0
if txn and not flags:
raise TypeError("txn argument only compatible with compact=True")
if _have_patched_lmdb:
rc = _lib.mdb_env_copyfd3(self._env, fd, flags, txn._txn if txn else _ffi.NULL)
if rc:
raise _error("mdb_env_copyfd3", rc)
else:
rc = _lib.mdb_env_copyfd2(self._env, fd, flags)
if rc:
raise _error("mdb_env_copyfd2", rc)
def sync(self, force=False):
"""Flush the data buffers to disk.
Equivalent to `mdb_env_sync()
<http://lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>`_
Data is always written to disk when :py:meth:`Transaction.commit` is
called, but the operating system may keep it buffered. MDB always
flushes the OS buffers upon commit as well, unless the environment was
opened with `sync=False` or `metasync=False`.
`force`:
If ``True``, force a synchronous flush. Otherwise if the
environment was opened with `sync=False` the flushes will be
omitted, and with `map_async=True` they will be asynchronous.
"""
rc = _lib.mdb_env_sync(self._env, force)
if rc:
raise _error("mdb_env_sync", rc)
def _convert_stat(self, st):
"""Convert a MDB_stat to a dict.
"""
return {
"psize": st.ms_psize,
"depth": st.ms_depth,
"branch_pages": st.ms_branch_pages,
"leaf_pages": st.ms_leaf_pages,
"overflow_pages": st.ms_overflow_pages,
"entries": st.ms_entries
}
def stat(self):
"""stat()
Return some environment statistics for the default database as a dict:
+--------------------+---------------------------------------+
| ``psize`` | Size of a database page in bytes. |
+--------------------+---------------------------------------+
| ``depth`` | Height of the B-tree. |
+--------------------+---------------------------------------+
| ``branch_pages`` | Number of internal (non-leaf) pages. |
+--------------------+---------------------------------------+
| ``leaf_pages`` | Number of leaf pages. |
+--------------------+---------------------------------------+
| ``overflow_pages`` | Number of overflow pages. |
+--------------------+---------------------------------------+
| ``entries`` | Number of data items. |
+--------------------+---------------------------------------+
Equivalent to `mdb_env_stat()
<http://lmdb.tech/doc/group__mdb.html#gaf881dca452050efbd434cd16e4bae255>`_
"""
st = _ffi.new('MDB_stat *')
rc = _lib.mdb_env_stat(self._env, st)
if rc:
raise _error("mdb_env_stat", rc)
return self._convert_stat(st)
def info(self):
"""Return some nice environment information as a dict:
+--------------------+---------------------------------------------+
| ``map_addr`` | Address of database map in RAM. |
+--------------------+---------------------------------------------+
| ``map_size`` | Size of database map in RAM. |
+--------------------+---------------------------------------------+
| ``last_pgno`` | ID of last used page. |
+--------------------+---------------------------------------------+
| ``last_txnid`` | ID of last committed transaction. |
+--------------------+---------------------------------------------+
| ``max_readers`` | Number of reader slots allocated in the |
| | lock file. Equivalent to the value of |
| | `maxreaders=` specified by the first |
| | process opening the Environment. |
+--------------------+---------------------------------------------+
| ``num_readers`` | Maximum number of reader slots in |
| | simultaneous use since the lock file was |
| | initialized. |
+--------------------+---------------------------------------------+
Equivalent to `mdb_env_info()
<http://lmdb.tech/doc/group__mdb.html#ga18769362c7e7d6cf91889a028a5c5947>`_
"""
info = _ffi.new('MDB_envinfo *')
rc = _lib.mdb_env_info(self._env, info)
if rc:
raise _error("mdb_env_info", rc)
return {
"map_addr": int(_ffi.cast('long', info.me_mapaddr)),
"map_size": info.me_mapsize,
"last_pgno": info.me_last_pgno,
"last_txnid": info.me_last_txnid,
"max_readers": info.me_maxreaders,
"num_readers": info.me_numreaders
}
def flags(self):
"""Return a dict describing Environment constructor flags used to
instantiate this environment."""
flags_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_env_get_flags(self._env, flags_)
if rc:
raise _error("mdb_env_get_flags", rc)
flags = flags_[0]
return {
'subdir': not (flags & _lib.MDB_NOSUBDIR),
'readonly': bool(flags & _lib.MDB_RDONLY),
'metasync': not (flags & _lib.MDB_NOMETASYNC),
'sync': not (flags & _lib.MDB_NOSYNC),
'map_async': bool(flags & _lib.MDB_MAPASYNC),
'readahead': not (flags & _lib.MDB_NORDAHEAD),
'writemap': bool(flags & _lib.MDB_WRITEMAP),
'meminit': not (flags & _lib.MDB_NOMEMINIT),
'lock': not (flags & _lib.MDB_NOLOCK),
}
def max_key_size(self):
"""Return the maximum size in bytes of a record's key part. This
matches the ``MDB_MAXKEYSIZE`` constant set at compile time."""
return _lib.mdb_env_get_maxkeysize(self._env)
def max_readers(self):
"""Return the maximum number of readers specified during open of the
environment by the first process. This is the same as `max_readers=`
specified to the constructor if this process was the first to open the
environment."""
readers_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_env_get_maxreaders(self._env, readers_)
if rc:
raise _error("mdb_env_get_maxreaders", rc)
return readers_[0]
def readers(self):
"""Return a multi line Unicode string describing the current state of
the reader lock table."""
_callbacks.msg_func = []
try:
rc = _lib.mdb_reader_list(self._env, _msg_func, _ffi.NULL)
if rc:
raise _error("mdb_reader_list", rc)
return UnicodeType().join(_callbacks.msg_func)
finally:
del _callbacks.msg_func
def reader_check(self):
"""Search the reader lock table for stale entries, for example due to a
crashed process. Returns the number of stale entries that were cleared.
"""
reaped = _ffi.new('int[]', 1)
rc = _lib.mdb_reader_check(self._env, reaped)
if rc:
raise _error('mdb_reader_check', rc)
return reaped[0]
def open_db(self, key=None, txn=None, reverse_key=False, dupsort=False,
create=True, integerkey=False, integerdup=False,
dupfixed=False):
"""
Open a database, returning an instance of :py:class:`_Database`. Repeat
:py:meth:`Environment.open_db` calls for the same name will return the
same handle. As a special case, the main database is always open.
Equivalent to `mdb_dbi_open()
<http://lmdb.tech/doc/group__mdb.html#gac08cad5b096925642ca359a6d6f0562a>`_
Named databases are implemented by *storing a special descriptor in the
main database*. All databases in an environment *share the same file*.
Because the descriptor is present in the main database, attempts to
create a named database will fail if a key matching the database's name
already exists. Furthermore *the key is visible to lookups and
enumerations*. If your main database keyspace conflicts with the names
you use for named databases, then move the contents of your main
database to another named database.
::
>>> env = lmdb.open('/tmp/test', max_dbs=2)
>>> with env.begin(write=True) as txn
... txn.put('somename', 'somedata')
>>> # Error: database cannot share name of existing key!
>>> subdb = env.open_db('somename')
A newly created database will not exist if the transaction that created
it aborted, nor if another process deleted it. The handle resides in
the shared environment, it is not owned by the current transaction or
process. Only one thread should call this function; it is not
mutex-protected in a read-only transaction.
The `dupsort`, `integerkey`, `integerdup`, and `dupfixed` parameters are
ignored if the database already exists. The state of those settings are
persistent and immutable per database. See :py:meth:`_Database.flags`
to view the state of those options for an opened database. A consequence
of the immutability of these flags is that the default non-named database
will never have these flags set.
Preexisting transactions, other than the current transaction and any
parents, must not use the new handle, nor must their children.
`key`:
Bytestring database name. If ``None``, indicates the main
database should be returned, otherwise indicates a named
database should be created inside the main database.
In other words, *a key representing the database will be
visible in the main database, and the database name cannot
conflict with any existing key.*
`txn`:
Transaction used to create the database if it does not exist.
If unspecified, a temporarily write transaction is used. Do not
call :py:meth:`open_db` from inside an existing transaction
without supplying it here. Note the passed transaction must
have `write=True`.
`reverse_key`:
If ``True``, keys are compared from right to left (e.g. DNS
names).
`dupsort`:
Duplicate keys may be used in the database. (Or, from another
perspective, keys may have multiple data items, stored in
sorted order.) By default keys must be unique and may have only
a single data item.
`create`:
If ``True``, create the database if it doesn't exist, otherwise
raise an exception.
`integerkey`:
If ``True``, indicates keys in the database are C unsigned
or ``size_t`` integers encoded in native byte order. Keys must
all be either unsigned or ``size_t``, they cannot be mixed in a
single database.
`integerdup`:
If ``True``, values in the
database are C unsigned or ``size_t`` integers encode din
native byte order. Implies `dupsort` and `dupfixed` are
``True``.
`dupfixed`:
If ``True``, values for each key
in database are of fixed size, allowing each additional
duplicate value for a key to be stored without a header
indicating its size. Implies `dupsort` is ``True``.
"""
if isinstance(key, UnicodeType):
raise TypeError('key must be bytes')
if key is None and (reverse_key or dupsort or integerkey or integerdup
or dupfixed):
raise ValueError('May not set flags on the main database')
db = self._dbs.get(key)
if db:
return db
if integerdup:
dupfixed = True
if dupfixed:
dupsort = True
if txn:
db = _Database(self, txn, key, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed)
else:
try:
self._creating_db_in_readonly = True
with self.begin(write=not self.readonly) as txn:
db = _Database(self, txn, key, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed)
finally:
self._creating_db_in_readonly = False
self._dbs[key] = db
return db
def begin(self, db=None, parent=None, write=False, buffers=False):
"""Shortcut for :py:class:`lmdb.Transaction`"""
return Transaction(self, db, parent, write, buffers)
class _Database(object):
"""
Internal database handle. This class is opaque, save a single method.
Should not be constructed directly. Use :py:meth:`Environment.open_db`
instead.
"""
def __init__(self, env, txn, name, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed):
env._deps.add(self)
self._deps = set()
self._name = name
flags = 0
if reverse_key:
flags |= _lib.MDB_REVERSEKEY
if dupsort:
flags |= _lib.MDB_DUPSORT
if create:
flags |= _lib.MDB_CREATE
if integerkey:
flags |= _lib.MDB_INTEGERKEY
if integerdup:
flags |= _lib.MDB_INTEGERDUP
if dupfixed:
flags |= _lib.MDB_DUPFIXED
dbipp = _ffi.new('MDB_dbi *')
self._dbi = None
rc = _lib.mdb_dbi_open(txn._txn, name or _ffi.NULL, flags, dbipp)
if rc:
raise _error("mdb_dbi_open", rc)
self._dbi = dbipp[0]
self._load_flags(txn)
def _load_flags(self, txn):
"""Load MDB's notion of the database flags."""
flags_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_dbi_flags(txn._txn, self._dbi, flags_)
if rc:
raise _error("mdb_dbi_flags", rc)
self._flags = flags_[0]
def flags(self, *args):
"""Return the database's associated flags as a dict of _Database
constructor kwargs."""
if len(args) > 1:
raise TypeError('flags takes 0 or 1 arguments')
return {
'reverse_key': bool(self._flags & _lib.MDB_REVERSEKEY),
'dupsort': bool(self._flags & _lib.MDB_DUPSORT),
'integerkey': bool(self._flags & _lib.MDB_INTEGERKEY),
'integerdup': bool(self._flags & _lib.MDB_INTEGERDUP),
'dupfixed': bool(self._flags & _lib.MDB_DUPFIXED),
}
def _invalidate(self):
self._dbi = _invalid
open = Environment
class Transaction(object):
"""
A transaction object. All operations require a transaction handle,
transactions may be read-only or read-write. Write transactions may not
span threads. Transaction objects implement the context manager protocol,
so that reliable release of the transaction happens even in the face of
unhandled exceptions:
.. code-block:: python
# Transaction aborts correctly:
with env.begin(write=True) as txn:
crash()
# Transaction commits automatically:
with env.begin(write=True) as txn:
txn.put('a', 'b')
Equivalent to `mdb_txn_begin()
<http://lmdb.tech/doc/group__mdb.html#gad7ea55da06b77513609efebd44b26920>`_
`env`:
Environment the transaction should be on.
`db`:
Default named database to operate on. If unspecified, defaults to
the environment's main database. Can be overridden on a per-call
basis below.
`parent`:
``None``, or a parent transaction (see lmdb.h).
`write`:
Transactions are read-only by default. To modify the database, you
must pass `write=True`. This flag is ignored if
:py:class:`Environment` was opened with ``readonly=True``.
`buffers`:
If ``True``, indicates :py:func:`buffer` objects should be yielded
instead of bytestrings. This setting applies to the
:py:class:`Transaction` instance itself and any :py:class:`Cursors
<Cursor>` created within the transaction.
This feature significantly improves performance, since MDB has a
zero-copy design, but it requires care when manipulating the
returned buffer objects. The benefit of this facility is diminished
when using small keys and values.
"""
# If constructor fails, then __del__ will attempt to access these
# attributes.
_env = _invalid
_txn = _invalid
_parent = None
_write = False
# Mutations occurred since transaction start. Required to know when Cursor
# key/value must be refreshed.
_mutations = 0
def __init__(self, env, db=None, parent=None, write=False, buffers=False):
env._deps.add(self)
self.env = env # hold ref
self._db = db or env._db
self._env = env._env
self._key = _ffi.new('MDB_val *')
self._val = _ffi.new('MDB_val *')
self._to_py = _mvbuf if buffers else _mvstr
self._deps = set()
if parent:
self._parent = parent
parent_txn = parent._txn
parent._deps.add(self)
else:
parent_txn = _ffi.NULL
if write:
if env.readonly:
msg = 'Cannot start write transaction with read-only env'
raise _error(msg, _lib.EACCES)
txnpp = _ffi.new('MDB_txn **')
rc = _lib.mdb_txn_begin(self._env, parent_txn, 0, txnpp)
if rc:
raise _error("mdb_txn_begin", rc)
self._txn = txnpp[0]
self._write = True
else:
try: # Exception catch in order to avoid racy 'if txns:' test
if env._creating_db_in_readonly: # Don't use spare txns for creating a DB when read-only
raise IndexError
self._txn = env._spare_txns.pop()
env._max_spare_txns += 1
rc = _lib.mdb_txn_renew(self._txn)
if rc:
while self._deps:
self._deps.pop()._invalidate()
_lib.mdb_txn_abort(self._txn)
self._txn = _invalid
self._invalidate()
raise _error("mdb_txn_renew", rc)
except IndexError:
txnpp = _ffi.new('MDB_txn **')
flags = _lib.MDB_RDONLY
rc = _lib.mdb_txn_begin(self._env, parent_txn, flags, txnpp)
if rc:
raise _error("mdb_txn_begin", rc)
self._txn = txnpp[0]
def _invalidate(self):
if self._txn:
self.abort()
self.env._deps.discard(self)
self._parent = None
self._env = _invalid
def __del__(self):
self.abort()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
self.abort()
else:
self.commit()
def id(self):
"""id()
Return the transaction's ID.
This returns the identifier associated with this transaction. For a
read-only transaction, this corresponds to the snapshot being read;
concurrent readers will frequently have the same transaction ID.
"""
return _lib.mdb_txn_id(self._txn)
def stat(self, db):
"""stat(db)
Return statistics like :py:meth:`Environment.stat`, except for a single
DBI. `db` must be a database handle returned by :py:meth:`open_db`.
"""
st = _ffi.new('MDB_stat *')
rc = _lib.mdb_stat(self._txn, db._dbi, st)
if rc:
raise _error('mdb_stat', rc)
return self.env._convert_stat(st)
def drop(self, db, delete=True):
"""Delete all keys in a named database and optionally delete the named
database itself. Deleting the named database causes it to become
unavailable, and invalidates existing cursors.
Equivalent to `mdb_drop()
<http://lmdb.tech/doc/group__mdb.html#gab966fab3840fc54a6571dfb32b00f2db>`_
"""
while db._deps:
db._deps.pop()._invalidate()
rc = _lib.mdb_drop(self._txn, db._dbi, delete)
self._mutations += 1
if rc:
raise _error("mdb_drop", rc)
if db._name in self.env._dbs:
del self.env._dbs[db._name]
def _cache_spare(self):
# In order to avoid taking and maintaining a lock, a race is allowed
# below which may result in more spare txns than desired. It seems
# unlikely the race could ever result in a large amount of spare txns,
# and in any case a correctly configured program should not be opening
# more read-only transactions than there are configured spares.
if self.env._max_spare_txns > 0:
_lib.mdb_txn_reset(self._txn)
self.env._spare_txns.append(self._txn)
self.env._max_spare_txns -= 1
self._txn = _invalid
self._invalidate()
return True
return False
def commit(self):
"""Commit the pending transaction.
Equivalent to `mdb_txn_commit()
<http://lmdb.tech/doc/group__mdb.html#ga846fbd6f46105617ac9f4d76476f6597>`_
"""
while self._deps:
self._deps.pop()._invalidate()
if self._write or not self._cache_spare():
rc = _lib.mdb_txn_commit(self._txn)
self._txn = _invalid
if rc:
raise _error("mdb_txn_commit", rc)
self._invalidate()
def abort(self):
"""Abort the pending transaction. Repeat calls to :py:meth:`abort` have
no effect after a previously successful :py:meth:`commit` or
:py:meth:`abort`, or after the associated :py:class:`Environment` has
been closed.
Equivalent to `mdb_txn_abort()
<http://lmdb.tech/doc/group__mdb.html#ga73a5938ae4c3239ee11efa07eb22b882>`_
"""
if self._txn:
while self._deps:
self._deps.pop()._invalidate()
if self._write or not self._cache_spare():
rc = _lib.mdb_txn_abort(self._txn)
self._txn = _invalid
if rc:
raise _error("mdb_txn_abort", rc)
self._invalidate()
def get(self, key, default=None, db=None):
"""Fetch the first value matching `key`, returning `default` if `key`
does not exist. A cursor must be used to fetch all values for a key in
a `dupsort=True` database.
Equivalent to `mdb_get()
<http://lmdb.tech/doc/group__mdb.html#ga8bf10cd91d3f3a83a34d04ce6b07992d>`_
"""
rc = _lib.pymdb_get(self._txn, (db or self._db)._dbi,
key, len(key), self._val)
if rc:
if rc == _lib.MDB_NOTFOUND:
return default
raise _error("mdb_cursor_get", rc)
preload(self._val)
return self._to_py(self._val)
def put(self, key, value, dupdata=True, overwrite=True, append=False,
db=None):
"""Store a record, returning ``True`` if it was written, or ``False``
to indicate the key was already present and `overwrite=False`.
On success, the cursor is positioned on the new record.
Equivalent to `mdb_put()
<http://lmdb.tech/doc/group__mdb.html#ga4fa8573d9236d54687c61827ebf8cac0>`_
`key`:
Bytestring key to store.
`value`:
Bytestring value to store.
`dupdata`:
If ``False`` and database was opened with `dupsort=True`, will return
``False`` if the key already has that value. In other words, this only
affects the return value.
`overwrite`:
If ``False``, do not overwrite any existing matching key. If
False and writing to a dupsort=True database, this will not add a value
to the key and this function will return ``False``.
`append`:
If ``True``, append the pair to the end of the database without
comparing its order first. Appending a key that is not greater
than the highest existing key will fail and return ``False``.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
flags |= _lib.MDB_APPEND
rc = _lib.pymdb_put(self._txn, (db or self._db)._dbi,
key, len(key), value, len(value), flags)
self._mutations += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
return False
raise _error("mdb_put", rc)
return True
def replace(self, key, value, db=None):
"""Use a temporary cursor to invoke :py:meth:`Cursor.replace`.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
with Cursor(db or self._db, self) as curs:
return curs.replace(key, value)
def pop(self, key, db=None):
"""Use a temporary cursor to invoke :py:meth:`Cursor.pop`.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
with Cursor(db or self._db, self) as curs:
return curs.pop(key)
def delete(self, key, value=EMPTY_BYTES, db=None):
"""Delete a key from the database.
Equivalent to `mdb_del()
<http://lmdb.tech/doc/group__mdb.html#gab8182f9360ea69ac0afd4a4eaab1ddb0>`_
`key`:
The key to delete.
value:
If the database was opened with dupsort=True and value is not
the empty bytestring, then delete elements matching only this
`(key, value)` pair, otherwise all values for key are deleted.
Returns True if at least one key was deleted.
"""
if value is None: # for bug-compatibility with cpython impl
value = EMPTY_BYTES
rc = _lib.pymdb_del(self._txn, (db or self._db)._dbi,
key, len(key), value, len(value))
self._mutations += 1
if rc:
if rc == _lib.MDB_NOTFOUND:
return False
raise _error("mdb_del", rc)
return True
def cursor(self, db=None):
"""Shortcut for ``lmdb.Cursor(db, self)``"""
return Cursor(db or self._db, self)
class Cursor(object):
"""
Structure for navigating a database.
Equivalent to `mdb_cursor_open()
<http://lmdb.tech/doc/group__mdb.html#ga9ff5d7bd42557fd5ee235dc1d62613aa>`_
`db`:
:py:class:`_Database` to navigate.
`txn`:
:py:class:`Transaction` to navigate.
As a convenience, :py:meth:`Transaction.cursor` can be used to quickly
return a cursor:
::
>>> env = lmdb.open('/tmp/foo')
>>> child_db = env.open_db('child_db')
>>> with env.begin() as txn:
... cursor = txn.cursor() # Cursor on main database.
... cursor2 = txn.cursor(child_db) # Cursor on child database.
Cursors start in an unpositioned state. If :py:meth:`iternext` or
:py:meth:`iterprev` are used in this state, iteration proceeds from the
start or end respectively. Iterators directly position using the cursor,
meaning strange behavior results when multiple iterators exist on the same
cursor.
.. note::
From the perspective of the Python binding, cursors return to an
'unpositioned' state once any scanning or seeking method (e.g.
:py:meth:`next`, :py:meth:`prev_nodup`, :py:meth:`set_range`) returns
``False`` or raises an exception. This is primarily to ensure safe,
consistent semantics in the face of any error condition.
When the Cursor returns to an unpositioned state, its :py:meth:`key`
and :py:meth:`value` return empty strings to indicate there is no
active position, although internally the LMDB cursor may still have a
valid position.
This may lead to slightly surprising behaviour when iterating the
values for a `dupsort=True` database's keys, since methods such as
:py:meth:`iternext_dup` will cause Cursor to appear unpositioned,
despite it returning ``False`` only to indicate there are no more
values for the current key. In that case, simply calling
:py:meth:`next` would cause iteration to resume at the next available
key.
This behaviour may change in future.
Iterator methods such as :py:meth:`iternext` and :py:meth:`iterprev` accept
`keys` and `values` arguments. If both are ``True``, then the value of
:py:meth:`item` is yielded on each iteration. If only `keys` is ``True``,
:py:meth:`key` is yielded, otherwise only :py:meth:`value` is yielded.
Prior to iteration, a cursor can be positioned anywhere in the database:
::
>>> with env.begin() as txn:
... cursor = txn.cursor()
... if not cursor.set_range('5'): # Position at first key >= '5'.
... print('Not found!')
... else:
... for key, value in cursor: # Iterate from first key >= '5'.
... print((key, value))
Iteration is not required to navigate, and sometimes results in ugly or
inefficient code. In cases where the iteration order is not obvious, or is
related to the data being read, use of :py:meth:`set_key`,
:py:meth:`set_range`, :py:meth:`key`, :py:meth:`value`, and :py:meth:`item`
may be preferable:
::
>>> # Record the path from a child to the root of a tree.
>>> path = ['child14123']
>>> while path[-1] != 'root':
... assert cursor.set_key(path[-1]), \\
... 'Tree is broken! Path: %s' % (path,)
... path.append(cursor.value())
"""
def __init__(self, db, txn):
db._deps.add(self)
txn._deps.add(self)
self.db = db # hold ref
self.txn = txn # hold ref
self._dbi = db._dbi
self._txn = txn._txn
self._key = _ffi.new('MDB_val *')
self._val = _ffi.new('MDB_val *')
self._valid = False
self._to_py = txn._to_py
curpp = _ffi.new('MDB_cursor **')
self._cur = None
rc = _lib.mdb_cursor_open(self._txn, self._dbi, curpp)
if rc:
raise _error("mdb_cursor_open", rc)
self._cur = curpp[0]
# If Transaction.mutations!=last_mutation, must MDB_GET_CURRENT to
# refresh `key' and `val'.
self._last_mutation = txn._mutations
def _invalidate(self):
if self._cur:
_lib.mdb_cursor_close(self._cur)
self.db._deps.discard(self)
self.txn._deps.discard(self)
self._cur = _invalid
self._dbi = _invalid
self._txn = _invalid
def __del__(self):
self._invalidate()
def close(self):
"""Close the cursor, freeing its associated resources."""
self._invalidate()
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self._invalidate()
def key(self):
"""Return the current key."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
return self._to_py(self._key)
def value(self):
"""Return the current value."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
return self._to_py(self._val)
def item(self):
"""Return the current `(key, value)` pair."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
return self._to_py(self._key), self._to_py(self._val)
def _iter(self, op, keys, values):
if not values:
get = self.key
elif not keys:
get = self.value
else:
get = self.item
cur = self._cur
key = self._key
val = self._val
rc = 0
while self._valid:
yield get()
rc = _lib.mdb_cursor_get(cur, key, val, op)
self._valid = not rc
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
raise _error("mdb_cursor_get", rc)
def iternext(self, keys=True, values=True):
"""Return a forward iterator that yields the current element before
calling :py:meth:`next`, repeating until the end of the database is
reached. As a convenience, :py:class:`Cursor` implements the iterator
protocol by automatically returning a forward iterator when invoked:
::
>>> # Equivalent:
>>> it = iter(cursor)
>>> it = cursor.iternext(keys=True, values=True)
If the cursor is not yet positioned, it is moved to the first key in
the database, otherwise iteration proceeds from the current position.
"""
if not self._valid:
self.first()
return self._iter(_lib.MDB_NEXT, keys, values)
__iter__ = iternext
def iternext_dup(self, keys=False, values=True):
"""Return a forward iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`next_dup`,
repeating until the last value of the current key is reached.
Only meaningful for databases opened with `dupsort=True`.
.. code-block:: python
if not cursor.set_key("foo"):
print("No values found for 'foo'")
else:
for idx, data in enumerate(cursor.iternext_dup()):
print("%d'th value for 'foo': %s" % (idx, data))
"""
return self._iter(_lib.MDB_NEXT_DUP, keys, values)
def iternext_nodup(self, keys=True, values=False):
"""Return a forward iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`next_nodup`,
repeating until the end of the database is reached.
Only meaningful for databases opened with `dupsort=True`.
If the cursor is not yet positioned, it is moved to the first key in
the database, otherwise iteration proceeds from the current position.
.. code-block:: python
for key in cursor.iternext_nodup():
print("Key '%s' has %d values" % (key, cursor.count()))
"""
if not self._valid:
self.first()
return self._iter(_lib.MDB_NEXT_NODUP, keys, values)
def iterprev(self, keys=True, values=True):
"""Return a reverse iterator that yields the current element before
calling :py:meth:`prev`, until the start of the database is reached.
If the cursor is not yet positioned, it is moved to the last key in
the database, otherwise iteration proceeds from the current position.
::
>>> with env.begin() as txn:
... for i, (key, value) in enumerate(txn.cursor().iterprev()):
... print('%dth last item is (%r, %r)' % (1+i, key, value))
"""
if not self._valid:
self.last()
return self._iter(_lib.MDB_PREV, keys, values)
def iterprev_dup(self, keys=False, values=True):
"""Return a reverse iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`prev_dup`,
repeating until the first value of the current key is reached.
Only meaningful for databases opened with `dupsort=True`.
"""
return self._iter(_lib.MDB_PREV_DUP, keys, values)
def iterprev_nodup(self, keys=True, values=False):
"""Return a reverse iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`prev_nodup`,
repeating until the start of the database is reached.
If the cursor is not yet positioned, it is moved to the last key in
the database, otherwise iteration proceeds from the current position.
Only meaningful for databases opened with `dupsort=True`.
"""
if not self._valid:
self.last()
return self._iter(_lib.MDB_PREV_NODUP, keys, values)
def _cursor_get(self, op):
rc = _lib.mdb_cursor_get(self._cur, self._key, self._val, op)
self._valid = v = not rc
self._last_mutation = self.txn._mutations
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
raise _error("mdb_cursor_get", rc)
return v
def _cursor_get_kv(self, op, k, v):
rc = _lib.pymdb_cursor_get(self._cur, k, len(k), v, len(v),
self._key, self._val, op)
self._valid = v = not rc
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
raise _error("mdb_cursor_get", rc)
return v
def first(self):
"""Move to the first key in the database, returning ``True`` on success
or ``False`` if the database is empty.
If the database was opened with `dupsort=True` and the key contains
duplicates, the cursor is positioned on the first value ("duplicate").
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_FIRST
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_FIRST)
def first_dup(self):
"""Move to the first value ("duplicate") for the current key, returning
``True`` on success or ``False`` if the database is empty.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_FIRST_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_FIRST_DUP)
def last(self):
"""Move to the last key in the database, returning ``True`` on success
or ``False`` if the database is empty.
If the database was opened with `dupsort=True` and the key contains
duplicates, the cursor is positioned on the last value ("duplicate").
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_LAST
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_LAST)
def last_dup(self):
"""Move to the last value ("duplicate") for the current key, returning
``True`` on success or ``False`` if the database is empty.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_LAST_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_LAST_DUP)
def prev(self):
"""Move to the previous element, returning ``True`` on success or
``False`` if there is no previous item.
For databases opened with `dupsort=True`, moves to the previous data
item ("duplicate") for the current key if one exists, otherwise moves
to the previous key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV)
def prev_dup(self):
"""Move to the previous value ("duplicate") of the current key,
returning ``True`` on success or ``False`` if there is no previous
value.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV_DUP)
def prev_nodup(self):
"""Move to the last value ("duplicate") of the previous key, returning
``True`` on success or ``False`` if there is no previous key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV_NODUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV_NODUP)
def next(self):
"""Move to the next element, returning ``True`` on success or ``False``
if there is no next element.
For databases opened with `dupsort=True`, moves to the next value
("duplicate") for the current key if one exists, otherwise moves to the
first value of the next key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT)
def next_dup(self):
"""Move to the next value ("duplicate") of the current key, returning
``True`` on success or ``False`` if there is no next value.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT_DUP)
def next_nodup(self):
"""Move to the first value ("duplicate") of the next key, returning
``True`` on success or ``False`` if there is no next key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT_NODUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT_NODUP)
def set_key(self, key):
"""Seek exactly to `key`, returning ``True`` on success or ``False`` if
the exact key was not found. It is an error to :py:meth:`set_key` the
empty bytestring.
For databases opened with `dupsort=True`, moves to the first value
("duplicate") for the key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_SET_KEY
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES)
def set_key_dup(self, key, value):
"""Seek exactly to `(key, value)`, returning ``True`` on success or
``False`` if the exact key and value was not found. It is an error
to :py:meth:`set_key` the empty bytestring.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_GET_BOTH
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get_kv(_lib.MDB_GET_BOTH, key, value)
def get(self, key, default=None):
"""Equivalent to :py:meth:`set_key()`, except :py:meth:`value` is
returned when `key` is found, otherwise `default`.
"""
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
return self.value()
return default
def getmulti(self, keys, dupdata=False, dupfixed_bytes=None, keyfixed=False):
"""Returns an iterable of `(key, value)` 2-tuples containing results
for each key in the iterable `keys`.
`keys`:
Iterable to read keys from.
`dupdata`:
If ``True`` and database was opened with `dupsort=True`, read
all duplicate values for each matching key.
`dupfixed_bytes`:
If database was opened with `dupsort=True` and `dupfixed=True`,
accepts the size of each value, in bytes, and applies an
optimization reducing the number of database lookups.
`keyfixed`:
If `dupfixed_bytes` is set and database key size is fixed,
setting keyfixed=True will result in this function returning
a memoryview to the results as a structured array of bytes.
The structured array can be instantiated by passing the
memoryview buffer to NumPy:
.. code-block:: python
key_bytes, val_bytes = 4, 8
dtype = np.dtype([(f'S{key_bytes}', f'S{val_bytes}}')])
arr = np.frombuffer(
cur.getmulti(keys, dupdata=True, dupfixed_bytes=val_bytes, keyfixed=True)
)
"""
if dupfixed_bytes and dupfixed_bytes < 0:
raise _error("dupfixed_bytes must be a positive integer.")
elif (dupfixed_bytes or keyfixed) and not dupdata:
raise _error("dupdata is required for dupfixed_bytes/key_bytes.")
elif keyfixed and not dupfixed_bytes:
raise _error("dupfixed_bytes is required for key_bytes.")
if dupfixed_bytes:
get_op = _lib.MDB_GET_MULTIPLE
next_op = _lib.MDB_NEXT_MULTIPLE
else:
get_op = _lib.MDB_GET_CURRENT
next_op = _lib.MDB_NEXT_DUP
a = bytearray()
lst = list()
for key in keys:
if self.set_key(key):
while self._valid:
self._cursor_get(get_op)
preload(self._val)
key = self._to_py(self._key)
val = self._to_py(self._val)
if dupfixed_bytes:
gen = (
(key, val[i:i + dupfixed_bytes])
for i in range(0, len(val), dupfixed_bytes))
if keyfixed:
for k, v in gen:
a.extend(k + v)
else:
for k, v in gen:
lst.append((k, v))
else:
lst.append((key, val))
if dupdata:
self._cursor_get(next_op)
else:
break
if keyfixed:
return memoryview(a)
else:
return lst
def set_range(self, key):
"""Seek to the first key greater than or equal to `key`, returning
``True`` on success, or ``False`` to indicate key was past end of
database. Behaves like :py:meth:`first` if `key` is the empty
bytestring.
For databases opened with `dupsort=True`, moves to the first value
("duplicate") for the key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_SET_RANGE
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
if not key:
return self.first()
return self._cursor_get_kv(_lib.MDB_SET_RANGE, key, EMPTY_BYTES)
def set_range_dup(self, key, value):
"""Seek to the first key/value pair greater than or equal to `key`,
returning ``True`` on success, or ``False`` to indicate that `value` was past the
last value of `key` or that `(key, value)` was past the end end of database.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_GET_BOTH_RANGE
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
rc = self._cursor_get_kv(_lib.MDB_GET_BOTH_RANGE, key, value)
# issue #126: MDB_GET_BOTH_RANGE does not satisfy its documentation,
# and fails to update `key` and `value` on success. Therefore
# explicitly call MDB_GET_CURRENT after MDB_GET_BOTH_RANGE.
self._cursor_get(_lib.MDB_GET_CURRENT)
return rc
def delete(self, dupdata=False):
"""Delete the current element and move to the next, returning ``True``
on success or ``False`` if the database was empty.
If `dupdata` is ``True``, delete all values ("duplicates") for the
current key, otherwise delete only the currently positioned value. Only
meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_del()
<http://lmdb.tech/doc/group__mdb.html#ga26a52d3efcfd72e5bf6bd6960bf75f95>`_
"""
v = self._valid
if v:
flags = _lib.MDB_NODUPDATA if dupdata else 0
rc = _lib.mdb_cursor_del(self._cur, flags)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_del", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
v = rc == 0
return v
def count(self):
"""Return the number of values ("duplicates") for the current key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_count()
<http://lmdb.tech/doc/group__mdb.html#ga4041fd1e1862c6b7d5f10590b86ffbe2>`_
"""
countp = _ffi.new('size_t *')
rc = _lib.mdb_cursor_count(self._cur, countp)
if rc:
raise _error("mdb_cursor_count", rc)
return countp[0]
def put(self, key, val, dupdata=True, overwrite=True, append=False):
"""Store a record, returning ``True`` if it was written, or ``False``
to indicate the key was already present and `overwrite=False`. On
success, the cursor is positioned on the key.
Equivalent to `mdb_cursor_put()
<http://lmdb.tech/doc/group__mdb.html#ga1f83ccb40011837ff37cc32be01ad91e>`_
`key`:
Bytestring key to store.
`val`:
Bytestring value to store.
`dupdata`:
If ``False`` and database was opened with `dupsort=True`, will return
``False`` if the key already has that value. In other words, this only
affects the return value.
`overwrite`:
If ``False``, do not overwrite the value for the key if it
exists, just return ``False``. For databases opened with
`dupsort=True`, ``False`` will always be returned if a
duplicate key/value pair is inserted, regardless of the setting
for `overwrite`.
`append`:
If ``True``, append the pair to the end of the database without
comparing its order first. Appending a key that is not greater
than the highest existing key will fail and return ``False``.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
if self.txn._db._flags & _lib.MDB_DUPSORT:
flags |= _lib.MDB_APPENDDUP
else:
flags |= _lib.MDB_APPEND
rc = _lib.pymdb_cursor_put(self._cur, key, len(key), val, len(val), flags)
self.txn._mutations += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
return False
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return True
def putmulti(self, items, dupdata=True, overwrite=True, append=False):
"""Invoke :py:meth:`put` for each `(key, value)` 2-tuple from the
iterable `items`. Elements must be exactly 2-tuples, they may not be of
any other type, or tuple subclass.
Returns a tuple `(consumed, added)`, where `consumed` is the number of
elements read from the iterable, and `added` is the number of new
entries added to the database. `added` may be less than `consumed` when
`overwrite=False`.
`items`:
Iterable to read records from.
`dupdata`:
If ``True`` and database was opened with `dupsort=True`, add
pair as a duplicate if the given key already exists. Otherwise
overwrite any existing matching key.
`overwrite`:
If ``False``, do not overwrite the value for the key if it
exists, just return ``False``. For databases opened with
`dupsort=True`, ``False`` will always be returned if a
duplicate key/value pair is inserted, regardless of the setting
for `overwrite`.
`append`:
If ``True``, append records to the end of the database without
comparing their order first. Appending a key that is not
greater than the highest existing key will cause corruption.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
if self.txn._db._flags & _lib.MDB_DUPSORT:
flags |= _lib.MDB_APPENDDUP
else:
flags |= _lib.MDB_APPEND
added = 0
skipped = 0
for key, value in items:
rc = _lib.pymdb_cursor_put(self._cur, key, len(key),
value, len(value), flags)
self.txn._mutations += 1
added += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
skipped += 1
else:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return added, added - skipped
def replace(self, key, val):
"""Store a record, returning its previous value if one existed. Returns
``None`` if no previous value existed. This uses the best available
mechanism to minimize the cost of a `set-and-return-previous`
operation.
For databases opened with `dupsort=True`, only the first data element
("duplicate") is returned if it existed, all data elements are removed
and the new `(key, data)` pair is inserted.
`key`:
Bytestring key to store.
`value`:
Bytestring value to store.
"""
if self.db._flags & _lib.MDB_DUPSORT:
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
preload(self._val)
old = _mvstr(self._val)
self.delete(True)
else:
old = None
self.put(key, val)
return old
flags = _lib.MDB_NOOVERWRITE
keylen = len(key)
rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), flags)
self.txn._mutations += 1
if not rc:
return
if rc != _lib.MDB_KEYEXIST:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
old = _mvstr(self._val)
rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), 0)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return old
def pop(self, key):
"""Fetch a record's value then delete it. Returns ``None`` if no
previous value existed. This uses the best available mechanism to
minimize the cost of a `delete-and-return-previous` operation.
For databases opened with `dupsort=True`, the first data element
("duplicate") for the key will be popped.
`key`:
Bytestring key to delete.
"""
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
preload(self._val)
old = _mvstr(self._val)
rc = _lib.mdb_cursor_del(self._cur, 0)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_del", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return old
def _iter_from(self, k, reverse):
"""Helper for centidb. Please do not rely on this interface, it may be
removed in future.
"""
if not k and not reverse:
found = self.first()
else:
found = self.set_range(k)
if reverse:
if not found:
self.last()
return self.iterprev()
else:
if not found:
return iter(())
return self.iternext()
| [((103, 13, 103, 30), 'threading.local', 'threading.local', ({}, {}), '()', False, 'import threading\n'), ((352, 7, 352, 27), 'lmdb._reading_docs', 'lmdb._reading_docs', ({}, {}), '()', False, 'import lmdb\n'), ((370, 11, 370, 21), 'cffi.FFI', 'cffi.FFI', ({}, {}), '()', False, 'import cffi\n'), ((513, 7, 513, 27), 'lmdb._reading_docs', 'lmdb._reading_docs', ({}, {}), '()', False, 'import lmdb\n'), ((516, 11, 516, 31), 'inspect.isclass', 'inspect.isclass', ({(516, 27, 516, 30): 'obj'}, {}), '(obj)', False, 'import inspect\n'), ((869, 43, 869, 70), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ({}, {}), '()', False, 'import sys\n'), ((898, 30, 898, 57), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ({}, {}), '()', False, 'import sys\n'), ((931, 17, 931, 41), 'msvcrt.get_osfhandle', 'msvcrt.get_osfhandle', ({(931, 38, 931, 40): 'fd'}, {}), '(fd)', False, 'import msvcrt\n'), ((745, 16, 745, 36), 'os.mkdir', 'os.mkdir', ({(745, 25, 745, 29): 'path', (745, 31, 745, 35): 'mode'}, {}), '(path, mode)', False, 'import os\n'), ((772, 31, 772, 58), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ({}, {}), '()', False, 'import sys\n')] |
AtomScott/image_folder_datasets | .virtual_documents/00_core.ipynb.py | 935580929abc9d8ec9eeaf944a0d3c670a09d04d | # default_exp core
#hide
from nbdev.showdoc import *
from fastcore.test import *
# export
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import warnings
import torchvision
from torchvision.datasets import MNIST, ImageFolder
from torchvision.transforms import ToTensor, Resize, Compose, CenterCrop, Normalize
import pytorch_lightning as pl
# from pytorch_lightning.metrics.functional import classification, f1
from pytorch_lightning.loggers import TensorBoardLogger
import fastai.vision.augment
import fastai.vision.data
# from fastai.vision.data import ImageDataLoaders
# from fastai.vision.augment import Resize
#export
class ImageFolderDataModule(pl.LightningDataModule):
def __init__(self, data_dir, batch_size, transform):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.transform = transform
# Compose([
# Resize(256, interpolation=2),
# CenterCrop(224),
# ToTensor(),
# # TODO: check whether normalize is the same for imagenet and fractalDB
# Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# ])
def prepare_data(self, stage=None):
pass
def setup(self, stage=None):
data_dir = self.data_dir
transform = self.transform
self.dls = fastai.vision.data.ImageDataLoaders.from_folder(data_dir, item_tfms=fastai.vision.augment.Resize(224))
self.trainset = ImageFolder(os.path.join(data_dir, 'train'), transform)
self.valset = ImageFolder(os.path.join(data_dir, 'valid'), transform)
def train_dataloader(self):
return DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True)
def val_dataloader(self):
return DataLoader(self.valset, batch_size=self.batch_size, shuffle=False)
def test_dataloader(self):
pass
data_dir = 'Datasets/cifar10'
transform = Compose([
Resize(256, interpolation=2),
CenterCrop(224),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
dm = ImageFolderDataModule(data_dir, 128, transform)
dm.setup()
for x,y in dm.train_dataloader():
test_eq(type(x), torch.Tensor)
test_eq(type(y), torch.Tensor)
break
#export
class CNNModule(pl.LightningModule):
def __init__(self, model=None, pretrained=False, freeze_extractor=False, log_level=10, num_classes=None, weight_path=None):
super().__init__()
self.num_classes = num_classes
self.pretrained = pretrained
self.freeze_extractor = freeze_extractor
assert model is not None, 'Select model from torchvision'
assert num_classes is not None, 'Must configure number of classes with num_classes'
if not model.startswith('resnet'):
warnings.warn('models other than resnet variants may need different setup for finetuning to work.')
# Prepare model for finetuning
if weight_path is not None:
param = torch.load(weight_path)
backbone = eval(f'torchvision.models.{model}(pretrained={False})')
backbone.load_state_dict(param)
else:
backbone = eval(f'torchvision.models.{model}(pretrained={pretrained})')
num_filters = backbone.fc.in_features
layers = list(backbone.children())[:-1]
self.feature_extractor = torch.nn.Sequential(*layers)
self.classifier = nn.Linear(num_filters, num_classes)
def forward(self, x):
if self.freeze_extractor:
self.feature_extractor.eval()
with torch.no_grad():
representations = self.feature_extractor(x).flatten(1)
else:
representations = self.feature_extractor(x).flatten(1)
y = self.classifier(representations)
return y
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
outputs = self.calculate_metrics(y_hat=y_hat, y=y)
return outputs
def training_epoch_end(self, outputs):
avg_metrics = {}
for metric in outputs[0].keys():
val = torch.stack([x[metric] for x in outputs]).mean()
self.logger.experiment.add_scalar(f"{metric}/train", val, self.current_epoch)
avg_metrics[metric] = val
# epoch_dictionary = {'loss': avg_metrics['loss']}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
outputs = self.calculate_metrics(y_hat=y_hat, y=y)
return outputs
def validation_epoch_end(self, outputs):
avg_metrics = {}
for metric in outputs[0].keys():
val = torch.stack([x[metric] for x in outputs]).mean()
self.logger.experiment.add_scalar(f"{metric}/validation", val, self.current_epoch)
avg_metrics[metric] = val
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02, weight_decay=1e-04)
# > return torch.optim.SGF(self.parameters(), lr=self.lr, aldsfk'a)
def calculate_metrics(self, y, y_hat):
loss = F.cross_entropy(y_hat, y)
y_pred = y_hat.argmax(dim=1)
acc = classification.accuracy(y_pred, y)
f1_score = f1(y_pred, y, self.num_classes)
return {
"loss":loss,
"acc": acc,
"f1": f1_score
}
def on_sanity_check_start(self):
self.logger.disable()
def on_sanity_check_end(self):
self.logger.enable()
modelname = 'resnet18'
logger = TensorBoardLogger('tb_logs', name=modelname)
trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5)
model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes))
test_eq(trainer.fit(model, dm), 1)
weight_path = 'FractalDB-1000_resnet50_epoch90.pth'
modelname = 'resnet50'
logger = TensorBoardLogger('tb_logs', name=modelname)
trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5)
model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes), weight_path=weight_path)
test_eq(trainer.fit(model, dm), 1)
| [((183, 9, 183, 53), 'pytorch_lightning.loggers.TensorBoardLogger', 'TensorBoardLogger', (), '', False, 'from pytorch_lightning.loggers import TensorBoardLogger\n'), ((184, 10, 184, 86), 'pytorch_lightning.Trainer', 'pl.Trainer', (), '', True, 'import pytorch_lightning as pl\n'), ((191, 9, 191, 53), 'pytorch_lightning.loggers.TensorBoardLogger', 'TensorBoardLogger', (), '', False, 'from pytorch_lightning.loggers import TensorBoardLogger\n'), ((192, 10, 192, 86), 'pytorch_lightning.Trainer', 'pl.Trainer', (), '', True, 'import pytorch_lightning as pl\n'), ((59, 15, 59, 82), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((62, 15, 62, 81), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((71, 8, 71, 36), 'torchvision.transforms.Resize', 'Resize', (), '', False, 'from torchvision.transforms import ToTensor, Resize, Compose, CenterCrop, Normalize\n'), ((72, 8, 72, 23), 'torchvision.transforms.CenterCrop', 'CenterCrop', ({(72, 19, 72, 22): '224'}, {}), '(224)', False, 'from torchvision.transforms import ToTensor, Resize, Compose, CenterCrop, Normalize\n'), ((73, 8, 73, 18), 'torchvision.transforms.ToTensor', 'ToTensor', ({}, {}), '()', False, 'from torchvision.transforms import ToTensor, Resize, Compose, CenterCrop, Normalize\n'), ((74, 8, 75, 51), 'torchvision.transforms.Normalize', 'Normalize', (), '', False, 'from torchvision.transforms import ToTensor, Resize, Compose, CenterCrop, Normalize\n'), ((114, 33, 114, 61), 'torch.nn.Sequential', 'torch.nn.Sequential', ({(114, 53, 114, 60): '*layers'}, {}), '(*layers)', False, 'import torch\n'), ((115, 26, 115, 61), 'torch.nn.Linear', 'nn.Linear', ({(115, 36, 115, 47): 'num_filters', (115, 49, 115, 60): 'num_classes'}, {}), '(num_filters, num_classes)', False, 'from torch import nn\n'), ((164, 15, 164, 40), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', ({(164, 31, 164, 36): 'y_hat', (164, 38, 164, 39): 'y'}, {}), '(y_hat, y)', True, 'from torch.nn import functional as F\n'), ((55, 36, 55, 67), 'os.path.join', 'os.path.join', ({(55, 49, 55, 57): 'data_dir', (55, 59, 55, 66): '"""train"""'}, {}), "(data_dir, 'train')", False, 'import os\n'), ((56, 34, 56, 65), 'os.path.join', 'os.path.join', ({(56, 47, 56, 55): 'data_dir', (56, 57, 56, 64): '"""valid"""'}, {}), "(data_dir, 'valid')", False, 'import os\n'), ((101, 12, 101, 111), 'warnings.warn', 'warnings.warn', ({(101, 26, 101, 110): '"""models other than resnet variants may need different setup for finetuning to work."""'}, {}), "(\n 'models other than resnet variants may need different setup for finetuning to work.'\n )", False, 'import warnings\n'), ((105, 20, 105, 43), 'torch.load', 'torch.load', ({(105, 31, 105, 42): 'weight_path'}, {}), '(weight_path)', False, 'import torch\n'), ((120, 17, 120, 32), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((138, 18, 138, 59), 'torch.stack', 'torch.stack', ({(138, 30, 138, 58): '[x[metric] for x in outputs]'}, {}), '([x[metric] for x in outputs])', False, 'import torch\n'), ((154, 18, 154, 59), 'torch.stack', 'torch.stack', ({(154, 30, 154, 58): '[x[metric] for x in outputs]'}, {}), '([x[metric] for x in outputs])', False, 'import torch\n')] |
pgorecki/python-ddd | src/modules/iam/module.py | 0073ccce35c651be263f5d7d3d63f9a49bc0b78a | from seedwork.application.modules import BusinessModule
from modules.iam.application.services import AuthenticationService
class IdentityAndAccessModule(BusinessModule):
def __init__(self, authentication_service: AuthenticationService):
self.authentication_service = authentication_service
# @staticmethod
# def create(container):
# assert False
# """Factory method for creating a module by using dependencies from a DI container"""
# return IdentityAndAccessModule(
# logger=container.logger(),
# authentication_service=container.authentication_service(),
# )
| [] |
ufora/ufora | test_scripts/pyfora2/containerTests.py | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pyfora
import ufora.config.Setup as Setup
import ufora.FORA.python.PurePython.DictTestCases as DictTestCases
import ufora.FORA.python.PurePython.ListTestCases as ListTestCases
import ufora.FORA.python.PurePython.TupleTestCases as TupleTestCases
import ufora.FORA.python.PurePython.ExecutorTestCommon as ExecutorTestCommon
import ufora.test.ClusterSimulation as ClusterSimulation
class ExecutorSimulationTest(
unittest.TestCase,
ExecutorTestCommon.ExecutorTestCommon,
DictTestCases.DictTestCases,
ListTestCases.ListTestCases,
TupleTestCases.TupleTestCases):
@classmethod
def setUpClass(cls):
cls.config = Setup.config()
cls.executor = None
cls.simulation = ClusterSimulation.Simulator.createGlobalSimulator()
cls.simulation.startService()
cls.simulation.getDesirePublisher().desireNumberOfWorkers(1)
@classmethod
def tearDownClass(cls):
cls.simulation.stopService()
@classmethod
def create_executor(cls, allowCached=True):
if not allowCached:
return pyfora.connect('http://localhost:30000')
if cls.executor is None:
cls.executor = pyfora.connect('http://localhost:30000')
cls.executor.stayOpenOnExit = True
return cls.executor
if __name__ == '__main__':
import ufora.config.Mainline as Mainline
Mainline.UnitTestMainline()
| [((58, 4, 58, 31), 'ufora.config.Mainline.UnitTestMainline', 'Mainline.UnitTestMainline', ({}, {}), '()', True, 'import ufora.config.Mainline as Mainline\n'), ((35, 21, 35, 35), 'ufora.config.Setup.config', 'Setup.config', ({}, {}), '()', True, 'import ufora.config.Setup as Setup\n'), ((37, 25, 37, 76), 'ufora.test.ClusterSimulation.Simulator.createGlobalSimulator', 'ClusterSimulation.Simulator.createGlobalSimulator', ({}, {}), '()', True, 'import ufora.test.ClusterSimulation as ClusterSimulation\n'), ((48, 19, 48, 59), 'pyfora.connect', 'pyfora.connect', ({(48, 34, 48, 58): '"""http://localhost:30000"""'}, {}), "('http://localhost:30000')", False, 'import pyfora\n'), ((51, 27, 51, 67), 'pyfora.connect', 'pyfora.connect', ({(51, 42, 51, 66): '"""http://localhost:30000"""'}, {}), "('http://localhost:30000')", False, 'import pyfora\n')] |
quochungto/SIIM-COVID19-Detection | src/utils/torch_common.py | 88bc10d7b01d277d223c4dddd4c223a782616611 | import os
import gc
import random
import numpy as np
import torch
def seed_everything(seed):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def memory_cleanup():
"""
Cleans up GPU memory
https://github.com/huggingface/transformers/issues/1742
"""
for obj in gc.get_objects():
if torch.is_tensor(obj):
del obj
gc.collect()
torch.cuda.empty_cache()
| [((9, 4, 9, 21), 'random.seed', 'random.seed', ({(9, 16, 9, 20): 'seed'}, {}), '(seed)', False, 'import random\n'), ((10, 4, 10, 24), 'numpy.random.seed', 'np.random.seed', ({(10, 19, 10, 23): 'seed'}, {}), '(seed)', True, 'import numpy as np\n'), ((11, 4, 11, 27), 'torch.manual_seed', 'torch.manual_seed', ({(11, 22, 11, 26): 'seed'}, {}), '(seed)', False, 'import torch\n'), ((12, 4, 12, 32), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', ({(12, 27, 12, 31): 'seed'}, {}), '(seed)', False, 'import torch\n'), ((22, 15, 22, 31), 'gc.get_objects', 'gc.get_objects', ({}, {}), '()', False, 'import gc\n'), ((26, 4, 26, 16), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((27, 4, 27, 28), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ({}, {}), '()', False, 'import torch\n'), ((23, 11, 23, 31), 'torch.is_tensor', 'torch.is_tensor', ({(23, 27, 23, 30): 'obj'}, {}), '(obj)', False, 'import torch\n')] |
marsven/conan-center-index | recipes/freeimage/all/conanfile.py | d8bb4ad617cee02d8664e8341fa32cdf702e4284 | from conans import ConanFile, CMake, tools
import os
import shutil
required_conan_version = ">=1.43.0"
class FreeImageConan(ConanFile):
name = "freeimage"
description = "Open Source library project for developers who would like to support popular graphics image formats"\
"like PNG, BMP, JPEG, TIFF and others as needed by today's multimedia applications."
homepage = "https://freeimage.sourceforge.io"
url = "https://github.com/conan-io/conan-center-index"
license = "FreeImage", "GPL-3.0-or-later", "GPL-2.0-or-later"
topics = ("freeimage", "image", "decoding", "graphics")
generators = "cmake", "cmake_find_package"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_jpeg": [False, "libjpeg", "libjpeg-turbo"],
"with_png": [True, False],
"with_tiff": [True, False],
"with_jpeg2000": [True, False],
"with_openexr": [True, False],
"with_eigen": [True, False],
"with_webp": [True, False],
"with_raw": [True, False],
"with_jxr": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_jpeg": "libjpeg",
"with_png": True,
"with_tiff": True,
"with_jpeg2000": True,
"with_openexr": True,
"with_eigen": True,
"with_webp": True,
"with_raw": True,
"with_jxr": True,
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
tools.check_min_cppstd(self, "11")
if self.options.shared:
del self.options.fPIC
self.output.warn("G3 plugin and JPEGTransform are disabled.")
if self.options.with_jpeg is not None:
if self.options.with_tiff:
self.options["libtiff"].jpeg = self.options.with_jpeg
def requirements(self):
self.requires("zlib/1.2.11")
if self.options.with_jpeg == "libjpeg":
self.requires("libjpeg/9d")
elif self.options.with_jpeg == "libjpeg-turbo":
self.requires("libjpeg-turbo/2.1.2")
if self.options.with_jpeg2000:
self.requires("openjpeg/2.4.0")
if self.options.with_png:
self.requires("libpng/1.6.37")
if self.options.with_webp:
self.requires("libwebp/1.2.2")
if self.options.with_openexr:
self.requires("openexr/2.5.7")
if self.options.with_raw:
self.requires("libraw/0.20.2")
if self.options.with_jxr:
self.requires("jxrlib/cci.20170615")
if self.options.with_tiff:
self.requires("libtiff/4.3.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["WITH_JPEG"] = self.options.with_jpeg != False
self._cmake.definitions["WITH_OPENJPEG"] = self.options.with_jpeg2000
self._cmake.definitions["WITH_PNG"] = self.options.with_png
self._cmake.definitions["WITH_WEBP"] = self.options.with_webp
self._cmake.definitions["WITH_OPENEXR"] = self.options.with_openexr
self._cmake.definitions["WITH_RAW"] = self.options.with_raw
self._cmake.definitions["WITH_JXR"] = self.options.with_jxr
self._cmake.definitions["WITH_TIFF"] = self.options.with_tiff
self._cmake.configure(build_dir=self._build_subfolder)
return self._cmake
def build(self):
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibPNG"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibTIFF4"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibOpenJPEG"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibJXR"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibWebP"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibRawLite"))
tools.rmdir(os.path.join(self._source_subfolder, "Source", "OpenEXR"))
for patch in self.conan_data.get("patches", {}).get(self.version, {}):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("license-fi.txt", dst="licenses", src=self._source_subfolder)
self.copy("license-gplv3.txt", dst="licenses", src=self._source_subfolder)
self.copy("license-gplv2.txt", dst="licenses", src=self._source_subfolder)
def package_info(self):
def imageformats_deps():
components = []
components.append("zlib::zlib")
if self.options.with_jpeg:
components.append("{0}::{0}".format(self.options.with_jpeg))
if self.options.with_jpeg2000:
components.append("openjpeg::openjpeg")
if self.options.with_png:
components.append("libpng::libpng")
if self.options.with_webp:
components.append("libwebp::libwebp")
if self.options.with_openexr:
components.append("openexr::openexr")
if self.options.with_raw:
components.append("libraw::libraw")
if self.options.with_jxr:
components.append("jxrlib::jxrlib")
if self.options.with_tiff:
components.append("libtiff::libtiff")
return components
self.cpp_info.names["pkg_config"] = "freeimage"
self.cpp_info.names["cmake_find_package"] = "FreeImage"
self.cpp_info.names["cmake_find_package_multi"] = "FreeImage"
self.cpp_info.components["FreeImage"].libs = ["freeimage"]
self.cpp_info.components["FreeImage"].requires = imageformats_deps()
self.cpp_info.components["FreeImagePlus"].libs = ["freeimageplus"]
self.cpp_info.components["FreeImagePlus"].requires = ["FreeImage"]
if not self.options.shared:
self.cpp_info.components["FreeImage"].defines.append("FREEIMAGE_LIB")
| [((65, 8, 65, 42), 'conans.tools.check_min_cppstd', 'tools.check_min_cppstd', ({(65, 31, 65, 35): 'self', (65, 37, 65, 41): '"""11"""'}, {}), "(self, '11')", False, 'from conans import ConanFile, CMake, tools\n'), ((95, 8, 96, 70), 'conans.tools.get', 'tools.get', (), '', False, 'from conans import ConanFile, CMake, tools\n'), ((101, 22, 101, 33), 'conans.CMake', 'CMake', ({(101, 28, 101, 32): 'self'}, {}), '(self)', False, 'from conans import ConanFile, CMake, tools\n'), ((114, 20, 114, 76), 'os.path.join', 'os.path.join', ({(114, 33, 114, 55): 'self._source_subfolder', (114, 57, 114, 65): '"""Source"""', (114, 67, 114, 75): '"""LibPNG"""'}, {}), "(self._source_subfolder, 'Source', 'LibPNG')", False, 'import os\n'), ((115, 20, 115, 78), 'os.path.join', 'os.path.join', ({(115, 33, 115, 55): 'self._source_subfolder', (115, 57, 115, 65): '"""Source"""', (115, 67, 115, 77): '"""LibTIFF4"""'}, {}), "(self._source_subfolder, 'Source', 'LibTIFF4')", False, 'import os\n'), ((116, 20, 116, 81), 'os.path.join', 'os.path.join', ({(116, 33, 116, 55): 'self._source_subfolder', (116, 57, 116, 65): '"""Source"""', (116, 67, 116, 80): '"""LibOpenJPEG"""'}, {}), "(self._source_subfolder, 'Source', 'LibOpenJPEG')", False, 'import os\n'), ((117, 20, 117, 76), 'os.path.join', 'os.path.join', ({(117, 33, 117, 55): 'self._source_subfolder', (117, 57, 117, 65): '"""Source"""', (117, 67, 117, 75): '"""LibJXR"""'}, {}), "(self._source_subfolder, 'Source', 'LibJXR')", False, 'import os\n'), ((118, 20, 118, 77), 'os.path.join', 'os.path.join', ({(118, 33, 118, 55): 'self._source_subfolder', (118, 57, 118, 65): '"""Source"""', (118, 67, 118, 76): '"""LibWebP"""'}, {}), "(self._source_subfolder, 'Source', 'LibWebP')", False, 'import os\n'), ((119, 20, 119, 80), 'os.path.join', 'os.path.join', ({(119, 33, 119, 55): 'self._source_subfolder', (119, 57, 119, 65): '"""Source"""', (119, 67, 119, 79): '"""LibRawLite"""'}, {}), "(self._source_subfolder, 'Source', 'LibRawLite')", False, 'import os\n'), ((120, 20, 120, 77), 'os.path.join', 'os.path.join', ({(120, 33, 120, 55): 'self._source_subfolder', (120, 57, 120, 65): '"""Source"""', (120, 67, 120, 76): '"""OpenEXR"""'}, {}), "(self._source_subfolder, 'Source', 'OpenEXR')", False, 'import os\n'), ((123, 12, 123, 32), 'conans.tools.patch', 'tools.patch', ({}, {}), '(**patch)', False, 'from conans import ConanFile, CMake, tools\n')] |
myelin/appengine-python-standard | src/google/appengine/datastore/datastore_query.py | 2a99acd114f7cdd66fbad9bfd185384eef847c84 | #!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A thin wrapper around datastore query RPC calls.
This provides wrappers around the internal only datastore_pb library and is
designed to be the lowest-level API to be used by all Python datastore client
libraries for executing queries. It provides a layer of protection so the actual
RPC syntax can change without affecting client libraries.
Any class, function, field or argument starting with an '_' is for INTERNAL use
only and should not be used by developers!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
import collections
import functools
import pickle
import six
from google.appengine.api import cmp_compat
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import datastore_rpc
from google.protobuf import message
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2
__all__ = ['Batch',
'Batcher',
'CompositeFilter',
'CompositeOrder',
'CorrelationFilter',
'Cursor',
'FetchOptions',
'FilterPredicate',
'Order',
'PropertyFilter',
'PropertyOrder',
'Query',
'QueryOptions',
'ResultsIterator',
'make_filter',
'apply_query',
'inject_results']
if datastore_pbs._CLOUD_DATASTORE_ENABLED:
from google.appengine.datastore.datastore_pbs import googledatastore
class _BaseComponent(object):
"""A base class for query components.
Currently just implements basic == and != functions.
"""
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self is other or self.__dict__ == other.__dict__
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return equal
return not equal
def make_filter(name, op, values):
"""Constructs a FilterPredicate from the given name, op and values.
Args:
name: A non-empty string, the name of the property to filter.
op: One of PropertyFilter._OPERATORS.keys(), the operator to use.
values: A supported value, the value to compare against.
Returns:
if values is a list, a CompositeFilter that uses AND to combine all
values, otherwise a PropertyFilter for the single value.
Raises:
datastore_errors.BadPropertyError: if the property name is invalid.
datastore_errors.BadValueError: if the property did not validate correctly
or the value was an empty list.
Other exception types (like OverflowError): if the property value does not
meet type-specific criteria.
"""
datastore_types.ValidateProperty(name, values)
properties = datastore_types.ToPropertyPb(name, values)
if isinstance(properties, list):
filters = [PropertyFilter(op, prop) for prop in properties]
return CompositeFilter(CompositeFilter.AND, filters)
else:
return PropertyFilter(op, properties)
def _make_key_value_map(entity, property_names):
"""Extracts key values from the given entity.
Args:
entity: The entity_pb2.EntityProto to extract values from.
property_names: The names of the properties from which to extract values.
Returns:
A dict mapping property names to a lists of key values.
"""
value_map = dict((six.ensure_text(name), []) for name in property_names)
for prop in entity.property:
prop_name = six.ensure_text(prop.name)
if prop_name in value_map:
value_map[prop_name].append(
datastore_types.PropertyValueToKeyValue(prop.value))
key_prop = six.ensure_text(datastore_types.KEY_SPECIAL_PROPERTY)
if key_prop in value_map:
value_map[key_prop] = [datastore_types.ReferenceToKeyValue(entity.key)]
return value_map
class _PropertyComponent(_BaseComponent):
"""A component that operates on a specific set of properties."""
def _get_prop_names(self):
"""Returns a set of property names used by the filter."""
raise NotImplementedError
class FilterPredicate(_PropertyComponent):
"""An abstract base class for all query filters.
All sub-classes must be immutable as these are often stored without creating a
defensive copy.
"""
def __call__(self, entity):
"""Applies the filter predicate to the given entity.
Args:
entity: the datastore_pb.EntityProto to test.
Returns:
True if the given entity matches the filter, False otherwise.
"""
return self._apply(_make_key_value_map(entity, self._get_prop_names()))
def _apply(self, key_value_map):
"""Apply the given component to the comparable value map.
A filter matches a list of values if at least one value in the list
matches the filter, for example:
'prop: [1, 2]' matches both 'prop = 1' and 'prop = 2' but not 'prop = 3'
Note: the values are actually represented as tuples whose first item
encodes the type; see datastore_types.PropertyValueToKeyValue().
Args:
key_value_map: A dict mapping property names to a list of
comparable values.
Return:
A boolean indicating if the given map matches the filter.
"""
raise NotImplementedError
def _prune(self, key_value_map):
"""Removes values from the given map that do not match the filter.
When doing a scan in the datastore, only index values that match the filters
are seen. When multiple values that point to the same entity are seen, the
entity only appears where the first value is found. This function removes
all values that don't match the query so that the first value in the map
is the same one the datastore would see first.
Args:
key_value_map: the comparable value map from which to remove
values. Does not need to contain values for all filtered properties.
Returns:
A value that evaluates to False if every value in a single list was
completely removed. This effectively applies the filter but is less
efficient than _apply().
"""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a pb."""
raise NotImplementedError(
'This filter only supports in memory operations (%r)' % self)
def _to_pbs(self):
"""Internal only function to generate a list of pbs."""
return [self._to_pb()]
def _to_pb_v1(self, adapter):
"""Internal only function to generate a v1 pb.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
raise NotImplementedError(
'This filter only supports in memory operations (%r)' % self)
class _SinglePropertyFilter(FilterPredicate):
"""Base class for a filter that operates on a single property."""
def _get_prop_name(self):
"""Returns the name of the property being filtered."""
raise NotImplementedError
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
raise NotImplementedError
def _get_prop_names(self):
return set([self._get_prop_name()])
def _apply(self, value_map):
for other_value in value_map[self._get_prop_name()]:
if self._apply_to_value(other_value):
return True
return False
def _prune(self, value_map):
if self._get_prop_name() not in value_map:
return True
values = [value for value in value_map[self._get_prop_name()]
if self._apply_to_value(value)]
value_map[self._get_prop_name()] = values
return bool(values)
class PropertyFilter(_SinglePropertyFilter):
"""An immutable filter predicate that constrains a single property."""
_OPERATORS = {
'<': datastore_pb.Query.Filter.LESS_THAN,
'<=': datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL,
'>': datastore_pb.Query.Filter.GREATER_THAN,
'>=': datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL,
'=': datastore_pb.Query.Filter.EQUAL,
}
_OPERATORS_INVERSE = dict((value, key)
for key, value in _OPERATORS.items())
_OPERATORS_TO_PYTHON_OPERATOR = {
datastore_pb.Query.Filter.LESS_THAN: '<',
datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL: '<=',
datastore_pb.Query.Filter.GREATER_THAN: '>',
datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL: '>=',
datastore_pb.Query.Filter.EQUAL: '==',
}
_INEQUALITY_OPERATORS = frozenset(['<', '<=', '>', '>='])
_INEQUALITY_OPERATORS_ENUM = frozenset([
datastore_pb.Query.Filter.LESS_THAN,
datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL,
datastore_pb.Query.Filter.GREATER_THAN,
datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL,
])
_UPPERBOUND_INEQUALITY_OPERATORS = frozenset(['<', '<='])
def __init__(self, op, value):
"""Constructor.
Args:
op: A string representing the operator to use.
value: A entity_pb2.Property, the property and value to compare against.
Raises:
datastore_errors.BadArgumentError if op has an unsupported value or value
is not an entity_pb2.Property.
"""
if op not in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator: %r' % (op,))
if not isinstance(value, entity_pb2.Property):
raise datastore_errors.BadArgumentError(
'value argument should be entity_pb2.Property (%r)' % (value,))
super(PropertyFilter, self).__init__()
self._filter = datastore_pb.Query.Filter()
self._filter.op = self._OPERATORS[op]
self._filter.property.add().CopyFrom(value)
@property
def op(self):
raw_op = self._filter.op
return self._OPERATORS_INVERSE.get(raw_op, str(raw_op))
@property
def value(self):
return self._filter.property[0]
def __repr__(self):
prop = self.value
name = prop.name
value = datastore_types.FromPropertyPb(prop)
if six.PY2 and isinstance(value, long):
value = int(value)
return '%s(%r, <%r, %r>)' % (self.__class__.__name__, six.ensure_str(
self.op), six.ensure_str(name), value)
def _get_prop_name(self):
return self._filter.property[0].name
def _apply_to_value(self, value):
if not hasattr(self, '_cmp_value'):
if self._filter.op == datastore_pb.Query.Filter.EXISTS:
return True
self._cmp_value = datastore_types.PropertyValueToKeyValue(
self._filter.property[0].value)
self._condition = ('value %s self._cmp_value' %
self._OPERATORS_TO_PYTHON_OPERATOR[self._filter.op])
return eval(self._condition)
def _has_inequality(self):
"""Returns True if the filter predicate contains inequalities filters."""
return self._filter.op in self._INEQUALITY_OPERATORS_ENUM
@classmethod
def _from_pb(cls, filter_pb):
self = cls.__new__(cls)
self._filter = filter_pb
return self
def _to_pb(self):
"""Returns the internal only pb representation."""
return self._filter
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter representation of the filter.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
filter_pb = googledatastore.Filter()
prop_filter_pb = filter_pb.property_filter
adapter.get_query_converter()._v3_filter_to_v1_property_filter(
self._filter, prop_filter_pb)
return filter_pb
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.PropertyFilter is unsupported.')
def __eq__(self, other):
if self.__class__ is not other.__class__:
if other.__class__ is _PropertyRangeFilter:
return [self._filter] == other._to_pbs()
return NotImplemented
return self._filter == other._filter
class _PropertyRangeFilter(_SinglePropertyFilter):
"""A filter predicate that represents a range of values.
Since we allow multi-valued properties there is a large difference between
"x > 0 AND x < 1" and "0 < x < 1." An entity with x = [-1, 2] will match the
first but not the second.
Since the datastore only allows a single inequality filter, multiple
in-equality filters are merged into a single range filter in the
datastore (unlike equality filters). This class is used by
datastore_query.CompositeFilter to implement the same logic.
"""
_start_key_value = None
_end_key_value = None
@datastore_rpc._positional(1)
def __init__(self, start=None, start_incl=True, end=None, end_incl=True):
"""Constructs a range filter using start and end properties.
Args:
start: A entity_pb2.Property to use as a lower bound or None to indicate
no lower bound.
start_incl: A boolean that indicates if the lower bound is inclusive.
end: A entity_pb2.Property to use as an upper bound or None to indicate
no upper bound.
end_incl: A boolean that indicates if the upper bound is inclusive.
"""
if start is not None and not isinstance(start, entity_pb2.Property):
raise datastore_errors.BadArgumentError(
'start argument should be entity_pb2.Property (%r)' % (start,))
if end is not None and not isinstance(end, entity_pb2.Property):
raise datastore_errors.BadArgumentError(
'start argument should be entity_pb2.Property (%r)' % (end,))
if start and end and start.name != end.name:
raise datastore_errors.BadArgumentError(
'start and end arguments must be on the same property (%s != %s)' %
(start.name, end.name))
if not start and not end:
raise datastore_errors.BadArgumentError(
'Unbounded ranges are not supported.')
super(_PropertyRangeFilter, self).__init__()
self._start = start
self._start_incl = start_incl
self._end = end
self._end_incl = end_incl
@classmethod
def from_property_filter(cls, prop_filter):
op = prop_filter._filter.op
if op == datastore_pb.Query.Filter.GREATER_THAN:
return cls(start=prop_filter._filter.property[0], start_incl=False)
elif op == datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL:
return cls(start=prop_filter._filter.property[0])
elif op == datastore_pb.Query.Filter.LESS_THAN:
return cls(end=prop_filter._filter.property[0], end_incl=False)
elif op == datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL:
return cls(end=prop_filter._filter.property[0])
else:
raise datastore_errors.BadArgumentError(
'Unsupported operator (%s)' % (op,))
def intersect(self, other):
"""Returns a filter representing the intersection of self and other."""
if isinstance(other, PropertyFilter):
other = self.from_property_filter(other)
elif not isinstance(other, _PropertyRangeFilter):
raise datastore_errors.BadArgumentError(
'other argument should be a _PropertyRangeFilter (%r)' % (other,))
if other._get_prop_name() != self._get_prop_name():
raise datastore_errors.BadArgumentError(
'other argument must be on the same property (%s != %s)' %
(other._get_prop_name(), self._get_prop_name()))
start_source = None
if other._start:
if self._start:
result = cmp_compat.cmp(
self._get_start_key_value(), other._get_start_key_value())
if result == 0:
result = cmp_compat.cmp(other._start_incl, self._start_incl)
if result > 0:
start_source = self
elif result < 0:
start_source = other
else:
start_source = other
elif self._start:
start_source = self
end_source = None
if other._end:
if self._end:
result = cmp_compat.cmp(
self._get_end_key_value(), other._get_end_key_value())
if result == 0:
result = cmp_compat.cmp(self._end_incl, other._end_incl)
if result < 0:
end_source = self
elif result > 0:
end_source = other
else:
end_source = other
elif self._end:
end_source = self
if start_source:
if end_source in (start_source, None):
return start_source
result = _PropertyRangeFilter(start=start_source._start,
start_incl=start_source._start_incl,
end=end_source._end,
end_incl=end_source._end_incl)
result._start_key_value = start_source._start_key_value
result._end_key_value = end_source._end_key_value
return result
else:
return end_source or self
def _get_start_key_value(self):
if self._start_key_value is None:
self._start_key_value = datastore_types.PropertyValueToKeyValue(
self._start.value)
return self._start_key_value
def _get_end_key_value(self):
if self._end_key_value is None:
self._end_key_value = datastore_types.PropertyValueToKeyValue(
self._end.value)
return self._end_key_value
def _apply_to_value(self, value):
"""Apply the filter to the given value.
Args:
value: The comparable value to check.
Returns:
A boolean indicating if the given value matches the filter.
"""
if self._start:
result = cmp_compat.cmp(self._get_start_key_value(), value)
if result > 0 or (result == 0 and not self._start_incl):
return False
if self._end:
result = cmp_compat.cmp(self._get_end_key_value(), value)
if result < 0 or (result == 0 and not self._end_incl):
return False
return True
def _get_prop_name(self):
if self._start:
return self._start.name
if self._end:
return self._end.name
assert False
def _to_pbs(self):
pbs = []
if self._start:
if self._start_incl:
op = datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL
else:
op = datastore_pb.Query.Filter.GREATER_THAN
pb = datastore_pb.Query.Filter()
pb.op = op
pb.property.add().CopyFrom(self._start)
pbs.append(pb)
if self._end:
if self._end_incl:
op = datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL
else:
op = datastore_pb.Query.Filter.LESS_THAN
pb = datastore_pb.Query.Filter()
pb.op = op
pb.property.add().CopyFrom(self._end)
pbs.append(pb)
return pbs
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter representation of the filter.
Args:
adapter: A datastore_rpc.AbstractAdapter.
"""
filter_pb = googledatastore.Filter()
composite_filter = filter_pb.composite_filter
composite_filter.op = googledatastore.CompositeFilter.AND
if self._start:
if self._start_incl:
op = googledatastore.PropertyFilter.GREATER_THAN_OR_EQUAL
else:
op = googledatastore.PropertyFilter.GREATER_THAN
pb = composite_filter.filters.add().property_filter
pb.op = op
pb.property.name = self._start.name
adapter.get_entity_converter().v3_property_to_v1_value(
self._start, True, pb.value)
if self._end:
if self._end_incl:
op = googledatastore.PropertyFilter.LESS_THAN_OR_EQUAL
else:
op = googledatastore.PropertyFilter.LESS_THAN
pb = composite_filter.filters.add().property_filter
pb.op = op
pb.property.name = self._end.name
adapter.get_entity_converter().v3_property_to_v1_value(
self._end, True, pb.value)
return filter_pb
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return (self._start == other._start and
self._end == other._end and
(self._start_incl == other._start_incl or self._start is None) and
(self._end_incl == other._end_incl or self._end is None))
class _PropertyExistsFilter(FilterPredicate):
"""A FilterPredicate that matches entities containing specific properties.
Only works as an in-memory filter. Used internally to filter out entities
that don't have all properties in a given Order.
"""
def __init__(self, names):
super(_PropertyExistsFilter, self).__init__()
self._names = frozenset(names)
def _apply(self, value_map):
for name in self._names:
if not value_map.get(name):
return False
return True
def _get_prop_names(self):
return self._names
def _prune(self, _):
raise NotImplementedError
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
class CorrelationFilter(FilterPredicate):
"""A filter that isolates correlated values and applies a sub-filter on them.
This filter assumes that every property used by the sub-filter should be
grouped before being passed to the sub-filter. The default grouping puts
each value in its own group. Consider:
e = {a: [1, 2], b: [2, 1, 3], c: 4}
A correlation filter with a sub-filter that operates on (a, b) will be tested
against the following 3 sets of values:
{a: 1, b: 2}
{a: 2, b: 1}
{b: 3}
In this case CorrelationFilter('a = 2 AND b = 2') won't match this entity but
CorrelationFilter('a = 2 AND b = 1') will. To apply an uncorrelated filter on
c, the filter must be applied in parallel to the correlation filter. For
example:
CompositeFilter(AND, [CorrelationFilter('a = 2 AND b = 1'), 'c = 3'])
If 'c = 3' was included in the correlation filter, c would be grouped as well.
This would result in the following values:
{a: 1, b: 2, c: 3}
{a: 2, b: 1}
{b: 3}
If any set of correlated values match the sub-filter then the entity matches
the correlation filter.
"""
def __init__(self, subfilter):
"""Constructor.
Args:
subfilter: A FilterPredicate to apply to the correlated values
"""
self._subfilter = subfilter
@property
def subfilter(self):
return self._subfilter
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.subfilter)
def _apply(self, value_map):
base_map = dict((prop, []) for prop in self._get_prop_names())
value_maps = []
for prop in base_map:
grouped = self._group_values(prop, value_map[prop])
while len(value_maps) < len(grouped):
value_maps.append(base_map.copy())
for value, m in six.moves.zip(grouped, value_maps):
m[prop] = value
return self._apply_correlated(value_maps)
def _apply_correlated(self, value_maps):
"""Applies sub-filter to the correlated value maps.
The default implementation matches when any value_map in value_maps
matches the sub-filter.
Args:
value_maps: A list of correlated value_maps.
Returns:
True if any the entity matches the correlation filter.
"""
for map in value_maps:
if self._subfilter._apply(map):
return True
return False
def _group_values(self, prop, values):
"""A function that groups the given values.
Override this function to introduce custom grouping logic. The default
implementation assumes each value belongs in its own group.
Args:
prop: The name of the property who's values are being grouped.
values: A list of opaque values.
Returns:
A list of lists of grouped values.
"""
return [[value] for value in values]
def _get_prop_names(self):
return self._subfilter._get_prop_names()
class CompositeFilter(FilterPredicate):
"""An immutable filter predicate that combines other predicates.
This class proactively merges sub-filters that are combined using the same
operator. For example:
CompositeFilter(AND, [f1, f2, CompositeFilter(AND, [f3, f4]), f5, f6])
is equivalent to:
CompositeFilter(AND, [f1, f2, f3, f4, f5, f6])
Currently filters can only be combined using an AND operator.
"""
AND = 'and'
_OPERATORS = frozenset([AND])
def __init__(self, op, filters):
"""Constructor.
Args:
op: The operator to use to combine the given filters
filters: A list of one or more filters to combine
Raises:
datastore_errors.BadArgumentError if op is not in CompsiteFilter.OPERATORS
or filters is not a non-empty list containing only FilterPredicates.
"""
if not op in self._OPERATORS:
raise datastore_errors.BadArgumentError('unknown operator (%s)' % (op,))
if not filters or not isinstance(filters, (list, tuple)):
raise datastore_errors.BadArgumentError(
'filters argument should be a non-empty list (%r)' % (filters,))
super(CompositeFilter, self).__init__()
self._op = op
flattened = []
for f in filters:
if isinstance(f, CompositeFilter) and f._op == self._op:
flattened.extend(f._filters)
elif isinstance(f, FilterPredicate):
flattened.append(f)
else:
raise datastore_errors.BadArgumentError(
'filters argument must be a list of FilterPredicates, found (%r)' %
(f,))
if op == self.AND:
filters = flattened
flattened = []
ineq_map = {}
for f in filters:
if (isinstance(f, _PropertyRangeFilter) or
(isinstance(f, PropertyFilter) and f._has_inequality())):
name = f._get_prop_name()
index = ineq_map.get(name)
if index is not None:
range_filter = flattened[index]
flattened[index] = range_filter.intersect(f)
else:
if isinstance(f, PropertyFilter):
range_filter = _PropertyRangeFilter.from_property_filter(f)
else:
range_filter = f
ineq_map[name] = len(flattened)
flattened.append(range_filter)
else:
flattened.append(f)
self._filters = tuple(flattened)
@property
def op(self):
return self._op
@property
def filters(self):
return self._filters
def __repr__(self):
op = self.op
if op == self.AND:
op = 'AND'
else:
op = str(op)
return '%s(%s, %r)' % (self.__class__.__name__, op, list(self.filters))
def _get_prop_names(self):
names = set()
for f in self._filters:
names |= f._get_prop_names()
return names
def _apply(self, value_map):
if self._op == self.AND:
for f in self._filters:
if not f._apply(value_map):
return False
return True
raise NotImplementedError
def _prune(self, value_map):
if self._op == self.AND:
matches = collections.defaultdict(set)
for f in self._filters:
props = f._get_prop_names()
local_value_map = dict((k, v) for k, v in value_map.items()
if k in props)
if not f._prune(local_value_map):
return False
for (prop, values) in local_value_map.items():
matches[prop].update(values)
for prop, value_set in matches.items():
value_map[prop] = sorted(value_set)
return True
raise NotImplementedError
def _to_pbs(self):
"""Returns the internal only pb representation."""
pbs = []
for f in self._filters:
pbs.extend(f._to_pbs())
return pbs
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.Filter.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
if not self._filters:
return None
if len(self._filters) == 1:
return self._filters[0]._to_pb_v1(adapter)
pb = googledatastore.Filter()
comp_pb = pb.composite_filter
if self.op == self.AND:
comp_pb.op = googledatastore.CompositeFilter.AND
else:
raise datastore_errors.BadArgumentError(
'Datastore V4 only supports CompositeFilter with AND operator.')
for f in self._filters:
comp_pb.filters.add().CopyFrom(f._to_pb_v1(adapter))
return pb
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeFilter, self).__eq__(other)
if len(self._filters) == 1:
result = self._filters[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._filters[0])
return result
return NotImplemented
class _IgnoreFilter(_SinglePropertyFilter):
"""A filter that removes all entities with the given keys."""
def __init__(self, key_value_set):
super(_IgnoreFilter, self).__init__()
self._keys = key_value_set
def _get_prop_name(self):
return datastore_types.KEY_SPECIAL_PROPERTY
def _apply_to_value(self, value):
return value not in self._keys
class _DedupingFilter(_IgnoreFilter):
"""A filter that removes duplicate keys."""
def __init__(self, key_value_set=None):
super(_DedupingFilter, self).__init__(key_value_set or set())
def _apply_to_value(self, value):
if super(_DedupingFilter, self)._apply_to_value(value):
self._keys.add(value)
return True
return False
class Order(_PropertyComponent):
"""A base class that represents a sort order on a query.
All sub-classes must be immutable as these are often stored without creating a
defensive copying.
This class can be used as either the cmp or key arg in sorted() or
list.sort(). To provide a stable ordering a trailing key ascending order is
always used.
"""
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
"""Constructs an order representing the reverse of the current order.
This function takes into account the effects of orders on properties not in
the group_by clause of a query. For example, consider:
SELECT A, First(B) ... GROUP BY A ORDER BY A, B
Changing the order of B would effect which value is listed in the 'First(B)'
column which would actually change the results instead of just reversing
them.
Args:
group_by: If specified, only orders on properties in group_by will be
reversed.
Returns:
A new order representing the reverse direction.
"""
raise NotImplementedError
def _key(self, lhs_value_map):
"""Creates a key for the given value map."""
raise NotImplementedError
def _cmp(self, lhs_value_map, rhs_value_map):
"""Compares the given value maps."""
raise NotImplementedError
def _to_pb(self):
"""Internal only function to generate a filter pb."""
raise NotImplementedError
def _to_pb_v1(self, adapter):
"""Internal only function to generate a v1 filter pb.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
raise NotImplementedError
def key_for_filter(self, filter_predicate):
if filter_predicate:
return lambda x: self.key(x, filter_predicate)
return self.key
def cmp_for_filter(self, filter_predicate):
if filter_predicate:
return lambda x, y: self.cmp(x, y, filter_predicate)
return self.cmp
def key(self, entity, filter_predicate=None):
"""Constructs a "key" value for the given entity based on the current order.
This function can be used as the key argument for list.sort() and sorted().
Args:
entity: The entity_pb2.EntityProto to convert
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
A key value that identifies the position of the entity when sorted by
the current order.
"""
names = self._get_prop_names()
names.add(datastore_types.KEY_SPECIAL_PROPERTY)
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
value_map = _make_key_value_map(entity, names)
if filter_predicate is not None:
filter_predicate._prune(value_map)
return (self._key(value_map),
value_map[datastore_types.KEY_SPECIAL_PROPERTY])
def cmp(self, lhs, rhs, filter_predicate=None):
"""Compares the given values taking into account any filters.
This function can be used as the cmp argument for list.sort() and sorted().
This function is slightly more efficient that Order.key when comparing two
entities, however it is much less efficient when sorting a list of entities.
Args:
lhs: An entity_pb2.EntityProto
rhs: An entity_pb2.EntityProto
filter_predicate: A FilterPredicate used to prune values before comparing
entities or None.
Returns:
An integer <, = or > 0 representing the operator that goes in between lhs
and rhs that to create a true statement.
"""
names = self._get_prop_names()
if filter_predicate is not None:
names |= filter_predicate._get_prop_names()
lhs_value_map = _make_key_value_map(lhs, names)
rhs_value_map = _make_key_value_map(rhs, names)
if filter_predicate is not None:
filter_predicate._prune(lhs_value_map)
filter_predicate._prune(rhs_value_map)
result = self._cmp(lhs_value_map, rhs_value_map)
if result:
return result
if not lhs.HasField('key') and not rhs.HasField('key'):
return 0
lhs_key = (lhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(lhs.key))
rhs_key = (rhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or
datastore_types.ReferenceToKeyValue(rhs.key))
return cmp_compat.cmp(lhs_key, rhs_key)
@cmp_compat.total_ordering_from_cmp
class _ReverseOrder(_BaseComponent):
"""Reverses the comparison for the given object."""
def __init__(self, obj):
"""Constructor for _ReverseOrder.
Args:
obj: Any comparable and hashable object.
"""
super(_ReverseOrder, self).__init__()
self._obj = obj
def __hash__(self):
return hash(self._obj)
def __cmp__(self, other):
assert self.__class__ == other.__class__, (
'A datastore_query._ReverseOrder object can only be compared to '
'an object of the same type.')
return -cmp_compat.cmp(self._obj, other._obj)
class PropertyOrder(Order):
"""An immutable class that represents a sort order for a single property."""
ASCENDING = datastore_pb.Query.Order.ASCENDING
DESCENDING = datastore_pb.Query.Order.DESCENDING
_DIRECTIONS = frozenset([ASCENDING, DESCENDING])
def __init__(self, prop, direction=ASCENDING):
"""Constructor.
Args:
prop: the name of the prop by which to sort.
direction: the direction in which to sort the given prop.
Raises:
datastore_errors.BadArgumentError if the prop name or direction is
invalid.
"""
datastore_types.ValidateString(prop,
'prop',
datastore_errors.BadArgumentError)
if not direction in self._DIRECTIONS:
raise datastore_errors.BadArgumentError('unknown direction: %r' %
(direction,))
super(PropertyOrder, self).__init__()
self.__order = datastore_pb.Query.Order()
self.__order.property = six.ensure_binary(prop, 'utf-8')
self.__order.direction = direction
@property
def prop(self):
return self.__order.property
@property
def direction(self):
return self.__order.direction
def __repr__(self):
extra = ''
if self.direction == self.DESCENDING:
extra = ', DESCENDING'
name = repr(six.ensure_str(self.prop))[1:-1]
return '%s(<%s>%s)' % (self.__class__.__name__, name, extra)
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
if group_by and self.__order.property not in group_by:
return self
if self.__order.direction == self.ASCENDING:
return PropertyOrder(
six.ensure_text(self.__order.property), self.DESCENDING)
else:
return PropertyOrder(
six.ensure_text(self.__order.property), self.ASCENDING)
def _get_prop_names(self):
return set([self.__order.property])
def _key(self, lhs_value_map):
lhs_values = lhs_value_map[self.__order.property]
if not lhs_values:
raise datastore_errors.BadArgumentError(
'Missing value for property (%s)' % self.__order.property)
if self.__order.direction == self.ASCENDING:
return min(lhs_values)
else:
return _ReverseOrder(max(lhs_values))
def _cmp(self, lhs_value_map, rhs_value_map):
lhs_values = lhs_value_map[self.__order.property]
rhs_values = rhs_value_map[self.__order.property]
if not lhs_values and not rhs_values:
return 0
if not lhs_values:
raise datastore_errors.BadArgumentError(
'LHS missing value for property (%s)' % self.__order.property)
if not rhs_values:
raise datastore_errors.BadArgumentError(
'RHS missing value for property (%s)' % self.__order.property)
if self.__order.direction == self.ASCENDING:
return cmp_compat.cmp(min(lhs_values), min(rhs_values))
else:
return cmp_compat.cmp(max(rhs_values), max(lhs_values))
@classmethod
def _from_pb(cls, order_pb):
self = cls.__new__(cls)
self.__order = order_pb
return self
def _to_pb(self):
"""Returns the internal only pb representation."""
return self.__order
def _to_pb_v1(self, adapter):
"""Returns a googledatastore.PropertyOrder representation of the order.
Args:
adapter: A datastore_rpc.AbstractAdapter.
"""
v1_order = googledatastore.PropertyOrder()
adapter.get_query_converter().v3_order_to_v1_order(self.__order, v1_order)
return v1_order
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.PropertyOrder is unsupported.')
class CompositeOrder(Order):
"""An immutable class that represents a sequence of Orders.
This class proactively flattens sub-orders that are of type CompositeOrder.
For example:
CompositeOrder([O1, CompositeOrder([02, 03]), O4])
is equivalent to:
CompositeOrder([O1, 02, 03, O4])
"""
def __init__(self, orders):
"""Constructor.
Args:
orders: A list of Orders which are applied in order.
"""
if not isinstance(orders, (list, tuple)):
raise datastore_errors.BadArgumentError(
'orders argument should be list or tuple (%r)' % (orders,))
super(CompositeOrder, self).__init__()
flattened = []
for order in orders:
if isinstance(order, CompositeOrder):
flattened.extend(order._orders)
elif isinstance(order, Order):
flattened.append(order)
else:
raise datastore_errors.BadArgumentError(
'orders argument should only contain Order (%r)' % (order,))
self._orders = tuple(flattened)
@property
def orders(self):
return self._orders
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(self.orders))
@datastore_rpc._positional(1)
def reversed(self, group_by=None):
return CompositeOrder([order.reversed(group_by=group_by)
for order in self._orders])
def _get_prop_names(self):
names = set()
for order in self._orders:
names |= order._get_prop_names()
return names
def _key(self, lhs_value_map):
result = []
for order in self._orders:
result.append(order._key(lhs_value_map))
return tuple(result)
def _cmp(self, lhs_value_map, rhs_value_map):
for order in self._orders:
result = order._cmp(lhs_value_map, rhs_value_map)
if result != 0:
return result
return 0
def size(self):
"""Returns the number of sub-orders the instance contains."""
return len(self._orders)
def _to_pbs(self):
"""Returns an ordered list of internal only pb representations."""
return [order._to_pb() for order in self._orders]
def _to_pb_v1(self, adapter):
"""Returns an ordered list of googledatastore.PropertyOrder.
Args:
adapter: A datastore_rpc.AbstractAdapter
"""
return [order._to_pb_v1(adapter) for order in self._orders]
def __eq__(self, other):
if self.__class__ is other.__class__:
return super(CompositeOrder, self).__eq__(other)
if len(self._orders) == 1:
result = self._orders[0].__eq__(other)
if result is NotImplemented and hasattr(other, '__eq__'):
return other.__eq__(self._orders[0])
return result
return NotImplemented
class FetchOptions(datastore_rpc.Configuration):
"""An immutable class that contains all options for fetching results.
These options apply to any request that pulls results from a query.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see option documentation below
for details.
"""
@datastore_rpc.ConfigOption
def produce_cursors(value):
"""If a Cursor should be returned with the fetched results.
Raises:
datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'produce_cursors argument should be bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def offset(value):
"""The number of results to skip before returning the first result.
Only applies to the first request it is used with and is ignored if present
on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a integer or is less
than zero.
"""
datastore_types.ValidateInteger(value,
'offset',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def batch_size(value):
"""The number of results to attempt to retrieve in a batch.
Raises:
datastore_errors.BadArgumentError if value is not a integer or is not
greater than zero.
"""
datastore_types.ValidateInteger(value,
'batch_size',
datastore_errors.BadArgumentError)
return value
class QueryOptions(FetchOptions):
"""An immutable class that contains all options for running a query.
This class contains options that control execution process (deadline,
batch_size, read_policy, etc) and what part of the query results are returned
(keys_only, projection, offset, limit, etc) Options that control the contents
of the query results are specified on the datastore_query.Query directly.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below and in FetchOptions and
datastore_rpc.Configuration.
This object can be used as the default config for a datastore_rpc.Connection
but in that case some options will be ignored, see below for details.
"""
ORDER_FIRST = datastore_pb.Query.ORDER_FIRST
ANCESTOR_FIRST = datastore_pb.Query.ANCESTOR_FIRST
FILTER_FIRST = datastore_pb.Query.FILTER_FIRST
_HINTS = frozenset([ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST])
@datastore_rpc.ConfigOption
def keys_only(value):
"""If the query should only return keys.
Raises:
datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'keys_only argument should be bool (%r)' % (value,))
return value
@datastore_rpc.ConfigOption
def projection(value):
"""A list or tuple of property names to project.
If None, the entire entity is returned.
Specifying a projection:
- may change the index requirements for the given query;
- will cause a partial entity to be returned;
- will cause only entities that contain those properties to be returned;
A partial entities only contain the property name and value for properties
in the projection (meaning and multiple will not be set). They will also
only contain a single value for any multi-valued property. However, if a
multi-valued property is specified in the order, an inequality property, or
the projected properties, the entity will be returned multiple times. Once
for each unique combination of values.
However, projection queries are significantly faster than normal queries.
Raises:
datastore_errors.BadArgumentError if value is empty or not a list or tuple
of strings.
"""
if isinstance(value, list):
value = tuple(value)
elif not isinstance(value, tuple):
raise datastore_errors.BadArgumentError(
'projection argument should be a list or tuple (%r)' % (value,))
if not value:
raise datastore_errors.BadArgumentError(
'projection argument cannot be empty')
for prop in value:
if not isinstance(prop, six.string_types + (six.binary_type,)):
raise datastore_errors.BadArgumentError(
'projection argument should contain only strings (%r)' % (prop,))
return value
@datastore_rpc.ConfigOption
def limit(value):
"""Limit on the number of results to return.
Raises:
datastore_errors.BadArgumentError if value is not an integer or is less
than zero.
"""
datastore_types.ValidateInteger(value,
'limit',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def prefetch_size(value):
"""Number of results to attempt to return on the initial request.
Raises:
datastore_errors.BadArgumentError if value is not an integer or is not
greater than zero.
"""
datastore_types.ValidateInteger(value,
'prefetch_size',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@datastore_rpc.ConfigOption
def start_cursor(value):
"""Cursor to use a start position.
Ignored if present on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a Cursor.
"""
if not isinstance(value, Cursor):
raise datastore_errors.BadArgumentError(
'start_cursor argument should be datastore_query.Cursor (%r)' %
(value,))
return value
@datastore_rpc.ConfigOption
def end_cursor(value):
"""Cursor to use as an end position.
Ignored if present on datastore_rpc.Connection.config.
Raises:
datastore_errors.BadArgumentError if value is not a Cursor.
"""
if not isinstance(value, Cursor):
raise datastore_errors.BadArgumentError(
'end_cursor argument should be datastore_query.Cursor (%r)' %
(value,))
return value
@datastore_rpc.ConfigOption
def hint(value):
"""Hint on how the datastore should plan the query.
Raises:
datastore_errors.BadArgumentError if value is not a known hint.
"""
if value not in QueryOptions._HINTS:
raise datastore_errors.BadArgumentError('Unknown query hint (%r)' %
(value,))
return value
class Cursor(_BaseComponent):
"""An immutable class that represents a relative position in a query.
The position denoted by a Cursor is relative to a result in a query even
if the result has been removed from the given query. Usually to position
immediately after the last result returned by a batch.
A cursor should only be used on a query with an identical signature to the
one that produced it or on a query with its sort order reversed.
"""
@datastore_rpc._positional(1)
def __init__(self, urlsafe=None, _cursor_bytes=None):
"""Constructor.
A Cursor constructed with no arguments points the first result of any
query. If such a Cursor is used as an end_cursor no results will ever be
returned.
"""
super(Cursor, self).__init__()
if urlsafe is not None:
if _cursor_bytes is not None:
raise datastore_errors.BadArgumentError(
'Can only specify one of urlsafe and _cursor_bytes')
_cursor_bytes = self._urlsafe_to_bytes(urlsafe)
if _cursor_bytes is not None:
self.__cursor_bytes = _cursor_bytes
else:
self.__cursor_bytes = six.binary_type()
def __repr__(self):
arg = six.ensure_str(self.to_websafe_string())
if arg:
arg = '<%s>' % arg
return '%s(%s)' % (self.__class__.__name__, arg)
def reversed(self):
"""DEPRECATED. It is no longer necessary to call reversed() on cursors.
A cursor returned by a query may also be used in a query whose sort order
has been reversed. This method returns a copy of the original cursor.
"""
return Cursor(_cursor_bytes=self.__cursor_bytes)
def to_bytes(self):
"""Serialize cursor as a byte string."""
return self.__cursor_bytes
@staticmethod
def from_bytes(cursor):
"""Gets a Cursor given its byte string serialized form.
The serialized form of a cursor may change in a non-backwards compatible
way. In this case cursors must be regenerated from a new Query request.
Args:
cursor: A serialized cursor as returned by .to_bytes.
Returns:
A Cursor.
Raises:
datastore_errors.BadValueError if the cursor argument does not represent a
serialized cursor.
"""
return Cursor(_cursor_bytes=cursor)
def urlsafe(self):
"""Serialize cursor as a websafe string.
Returns:
A base64-encoded serialized cursor.
"""
return base64.urlsafe_b64encode(self.to_bytes())
to_websafe_string = urlsafe
@staticmethod
def from_websafe_string(cursor):
"""Gets a Cursor given its websafe serialized form.
The serialized form of a cursor may change in a non-backwards compatible
way. In this case cursors must be regenerated from a new Query request.
Args:
cursor: A serialized cursor as returned by .to_websafe_string.
Returns:
A Cursor.
Raises:
datastore_errors.BadValueError if the cursor argument is not a string
type of does not represent a serialized cursor.
"""
decoded_bytes = Cursor._urlsafe_to_bytes(cursor)
return Cursor.from_bytes(decoded_bytes)
@staticmethod
def _urlsafe_to_bytes(cursor):
if not isinstance(cursor, six.string_types + (six.binary_type,)):
raise datastore_errors.BadValueError(
'cursor argument should be str or unicode (%r)' % (cursor,))
try:
decoded_bytes = base64.urlsafe_b64decode(
six.ensure_binary(cursor, 'ascii'))
except (ValueError, TypeError) as e:
raise datastore_errors.BadValueError(
'Invalid cursor %s. Details: %s' % (cursor, e))
return decoded_bytes
def advance(self, offset, query, conn):
"""Advances a Cursor by the given offset.
Args:
offset: The amount to advance the current query.
query: A Query identical to the one this cursor was created from.
conn: The datastore_rpc.Connection to use.
Returns:
A new cursor that is advanced by offset using the given query.
"""
datastore_types.ValidateInteger(offset,
'offset',
datastore_errors.BadArgumentError)
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
query_options = QueryOptions(
start_cursor=self, offset=offset, limit=0, produce_cursors=True)
return query.run(conn, query_options).next_batch(
Batcher.AT_LEAST_OFFSET).cursor(0)
def __setstate__(self, state):
if '_Cursor__compiled_cursor' in state:
self.__cursor_bytes = state['_Cursor__compiled_cursor'].SerializeToString()
else:
self.__dict__ = state
class _QueryKeyFilter(_BaseComponent):
"""A class that implements the key filters available on a Query."""
@datastore_rpc._positional(1)
def __init__(self, app=None, namespace=None, kind=None, ancestor=None):
"""Constructs a _QueryKeyFilter.
If app/namespace and ancestor are not defined, the app/namespace set in the
environment is used.
Args:
app: a string representing the required app id or None.
namespace: a string representing the required namespace or None.
kind: a string representing the required kind or None.
ancestor: a entity_pb2.Reference representing the required ancestor or
None.
Raises:
datastore_erros.BadArgumentError if app and ancestor.app() do not match or
an unexpected type is passed in for any argument.
"""
if kind is not None:
datastore_types.ValidateString(
kind, 'kind', datastore_errors.BadArgumentError)
if ancestor is not None:
if not isinstance(ancestor, entity_pb2.Reference):
raise datastore_errors.BadArgumentError(
'ancestor argument should be entity_pb2.Reference (%r)' %
(ancestor,))
ancestor_app = six.ensure_binary(ancestor.app)
if app is None:
app = ancestor_app
elif six.ensure_binary(app) != ancestor_app:
raise datastore_errors.BadArgumentError(
'ancestor argument should match app ("%r" != "%r")' %
(ancestor.app, app))
ancestor_namespace = six.ensure_binary(ancestor.name_space)
if namespace is None:
namespace = ancestor_namespace
elif six.ensure_binary(namespace) != ancestor_namespace:
raise datastore_errors.BadArgumentError(
'ancestor argument should match namespace ("%r" != "%r")' %
(six.ensure_binary(namespace), ancestor_namespace))
pb = entity_pb2.Reference()
pb.CopyFrom(ancestor)
ancestor = pb
self.__ancestor = ancestor
self.__path = list(ancestor.path.element)
else:
self.__ancestor = None
self.__path = None
super(_QueryKeyFilter, self).__init__()
self.__app = six.ensure_text(datastore_types.ResolveAppId(app), 'utf-8')
self.__namespace = (
six.ensure_text(datastore_types.ResolveNamespace(namespace), 'utf-8'))
self.__kind = kind
@property
def app(self):
return self.__app
@property
def namespace(self):
return self.__namespace
@property
def kind(self):
return self.__kind
@property
def ancestor(self):
return self.__ancestor
def __call__(self, entity_or_reference):
"""Apply the filter.
Accepts either an entity or a reference to avoid the need to extract keys
from entities when we have a list of entities (which is a common case).
Args:
entity_or_reference: Either an entity_pb2.EntityProto or
entity_pb2.Reference.
"""
if isinstance(entity_or_reference, entity_pb2.Reference):
key = entity_or_reference
elif isinstance(entity_or_reference, entity_pb2.EntityProto):
key = entity_or_reference.key
else:
raise datastore_errors.BadArgumentError(
'entity_or_reference argument must be an entity_pb2.EntityProto ' +
six.ensure_str('or entity_pb2.Reference (%r)' %
(entity_or_reference), 'utf-8'))
return (six.ensure_text(key.app, 'utf-8') == self.__app and
six.ensure_text(key.name_space, 'utf-8') == self.__namespace and
(not self.__kind or key.path.element[-1].type == self.__kind) and
(not self.__path or
key.path.element[0:len(self.__path)] == self.__path))
def _to_pb(self):
"""Returns an internal pb representation."""
pb = datastore_pb.Query()
pb.app = self.__app
datastore_types.SetNamespace(pb, self.__namespace)
if self.__kind is not None:
pb.kind = self.__kind
if self.__ancestor:
ancestor = pb.ancestor
ancestor.CopyFrom(self.__ancestor)
return pb
def _to_pb_v1(self, adapter):
"""Returns a v1 internal proto representation of the query key filter.
Args:
adapter: A datastore_rpc.AbstractAdapter.
Returns:
A tuple (googledatastore.RunQueryRequest, googledatastore.Filter).
The second tuple value is a Filter representing the ancestor portion of the
query. If there is no ancestor constraint, this value will be None
"""
pb = googledatastore.RunQueryRequest()
partition_id = pb.partition_id
partition_id.project_id = (
adapter.get_entity_converter().app_to_project_id(self.__app))
if self.__namespace:
partition_id.namespace_id = self.__namespace
if self.__kind is not None:
pb.query.kind.add().name = self.__kind
ancestor_filter = None
if self.__ancestor:
ancestor_filter = googledatastore.Filter()
ancestor_prop_filter = ancestor_filter.property_filter
ancestor_prop_filter.op = (
googledatastore.PropertyFilter.HAS_ANCESTOR)
prop_pb = ancestor_prop_filter.property
prop_pb.name = datastore_types.KEY_SPECIAL_PROPERTY
adapter.get_entity_converter().v3_to_v1_key(
self.ancestor,
ancestor_prop_filter.value.key_value)
return pb, ancestor_filter
class _BaseQuery(_BaseComponent):
"""A base class for query implementations."""
def run(self, conn, query_options=None):
"""Runs the query using provided datastore_rpc.Connection.
Args:
conn: The datastore_rpc.Connection to use
query_options: Optional query options to use
Returns:
A Batcher that implicitly fetches query results asynchronously.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
return Batcher(query_options, self.run_async(conn, query_options))
def run_async(self, conn, query_options=None):
"""Runs the query using the provided datastore_rpc.Connection.
Args:
conn: the datastore_rpc.Connection on which to run the query.
query_options: Optional QueryOptions with which to run the query.
Returns:
An async object that can be used to grab the first Batch. Additional
batches can be retrieved by calling Batch.next_batch/next_batch_async.
Raises:
datastore_errors.BadArgumentError if any of the arguments are invalid.
"""
raise NotImplementedError
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of %r is unsupported.' % self)
class Query(_BaseQuery):
"""An immutable class that represents a query signature.
A query signature consists of a source of entities (specified as app,
namespace and optionally kind and ancestor) as well as a FilterPredicate,
grouping and a desired ordering.
"""
@datastore_rpc._positional(1)
def __init__(self, app=None, namespace=None, kind=None, ancestor=None,
filter_predicate=None, group_by=None, order=None,
read_time_us=None):
"""Constructor.
Args:
app: Optional app to query, derived from the environment if not specified.
namespace: Optional namespace to query, derived from the environment if
not specified.
kind: Optional kind to query.
ancestor: Optional ancestor to query, an entity_pb2.Reference.
filter_predicate: Optional FilterPredicate by which to restrict the query.
group_by: Optional list of properties to group the results by.
order: Optional Order in which to return results.
read_time_us: Optional timestamp to read the storage from. Internal use
only.
Raises:
datastore_errors.BadArgumentError if any argument is invalid.
"""
super(Query, self).__init__()
if filter_predicate is not None and not isinstance(filter_predicate,
FilterPredicate):
raise datastore_errors.BadArgumentError(
'filter_predicate should be datastore_query.FilterPredicate (%r)' %
(filter_predicate,))
if isinstance(order, CompositeOrder):
if order.size() == 0:
order = None
elif isinstance(order, Order):
order = CompositeOrder([order])
elif order is not None:
raise datastore_errors.BadArgumentError(
'order should be Order (%r)' % (order,))
if group_by is not None:
if isinstance(group_by, list):
group_by = tuple(group_by)
elif not isinstance(group_by, tuple):
raise datastore_errors.BadArgumentError(
'group_by argument should be a list or tuple (%r)' % (group_by,))
if not group_by:
raise datastore_errors.BadArgumentError(
'group_by argument cannot be empty')
for prop in group_by:
if not isinstance(prop, six.string_types + (six.binary_type,)):
raise datastore_errors.BadArgumentError(
'group_by argument should contain only strings (%r)' % (prop,))
self._key_filter = _QueryKeyFilter(app=app, namespace=namespace, kind=kind,
ancestor=ancestor)
self._order = order
self._filter_predicate = filter_predicate
self._group_by = group_by
self._read_time_us = read_time_us
@property
def app(self):
return self._key_filter.app
@property
def namespace(self):
return self._key_filter.namespace
@property
def kind(self):
return self._key_filter.kind
@property
def ancestor(self):
return self._key_filter.ancestor
@property
def filter_predicate(self):
return self._filter_predicate
@property
def order(self):
return self._order
@property
def group_by(self):
return self._group_by
@property
def read_time_us(self):
return self._read_time_us
def __repr__(self):
args = []
args.append('app=%r' % six.ensure_str(self.app))
ns = self.namespace
if ns:
args.append('namespace=%r' % six.ensure_str(ns))
kind = self.kind
if kind is not None:
args.append('kind=%r' % six.ensure_str(kind))
ancestor = self.ancestor
if ancestor is not None:
websafe = base64.urlsafe_b64encode(ancestor.SerializeToString())
args.append('ancestor=<%s>' % six.ensure_str(websafe))
filter_predicate = self.filter_predicate
if filter_predicate is not None:
args.append('filter_predicate=%r' % filter_predicate)
order = self.order
if order is not None:
args.append('order=%r' % order)
group_by = self.group_by
if group_by is not None:
args.append('group_by=%r' % (tuple(six.ensure_str(x) for x in group_by),))
read_time_us = self.read_time_us
if read_time_us is not None:
args.append('read_time_us=%r' % (read_time_us,))
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
req = self._to_pb_v1(conn, query_options)
else:
req = self._to_pb(conn, query_options)
return Batch.create_async(self, query_options, conn, req,
start_cursor=start_cursor)
@classmethod
def _from_pb(cls, query_pb):
kind = query_pb.HasField('kind') and query_pb.kind or None
ancestor = query_pb.HasField('ancestor') and query_pb.ancestor or None
filter_predicate = None
if query_pb.filter:
filter_predicate = CompositeFilter(
CompositeFilter.AND,
[PropertyFilter._from_pb(filter_pb) for filter_pb in query_pb.filter])
order = None
if query_pb.order:
order = CompositeOrder(
[PropertyOrder._from_pb(order_pb) for order_pb in query_pb.order])
group_by = None
if query_pb.group_by_property_name:
group_by = tuple(
six.ensure_text(name) for name in query_pb.group_by_property_name)
read_time_us = None
if query_pb.HasField('read_time_us'):
read_time_us = query_pb.read_time_us
return Query(
app=query_pb.app,
namespace=query_pb.name_space,
kind=kind,
ancestor=ancestor,
filter_predicate=filter_predicate,
order=order,
group_by=group_by,
read_time_us=read_time_us)
def _to_pb_v1(self, conn, query_options):
"""Returns a googledatastore.RunQueryRequest."""
v1_req, v1_ancestor_filter = self._key_filter._to_pb_v1(conn.adapter)
v1_query = v1_req.query
if self.filter_predicate:
filter_predicate_pb = self._filter_predicate._to_pb_v1(conn.adapter)
if self.filter_predicate and v1_ancestor_filter:
comp_filter_pb = v1_query.filter.composite_filter
comp_filter_pb.op = googledatastore.CompositeFilter.AND
comp_filter_pb.filters.add().CopyFrom(filter_predicate_pb)
comp_filter_pb.filters.add().CopyFrom(v1_ancestor_filter)
elif self.filter_predicate:
v1_query.filter.CopyFrom(filter_predicate_pb)
elif v1_ancestor_filter:
v1_query.filter.CopyFrom(v1_ancestor_filter)
if self._order:
for order in self._order._to_pb_v1(conn.adapter):
v1_query.order.add().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
prop_ref_pb = v1_query.projection.add().property
prop_ref_pb.name = datastore_pbs.PROPERTY_NAME_KEY
projection = QueryOptions.projection(query_options, conn.config)
self._validate_projection_and_group_by(projection, self._group_by)
if projection:
for prop in projection:
prop_ref_pb = v1_query.projection.add().property
prop_ref_pb.name = prop
if self._group_by:
for group_by in self._group_by:
v1_query.distinct_on.add().name = group_by
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
v1_query.limit.value = limit
count = QueryOptions.batch_size(query_options, conn.config)
if count is None:
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is not None:
pass
if query_options.offset:
v1_query.offset = query_options.offset
if query_options.start_cursor is not None:
v1_query.start_cursor = query_options.start_cursor.to_bytes()
if query_options.end_cursor is not None:
v1_query.end_cursor = query_options.end_cursor.to_bytes()
conn._set_request_read_policy(v1_req, query_options)
conn._set_request_transaction(v1_req)
return v1_req
def _to_pb(self, conn, query_options):
"""Returns the internal only pb representation."""
pb = self._key_filter._to_pb()
if self._filter_predicate:
for f in self._filter_predicate._to_pbs():
pb.filter.add().CopyFrom(f)
if self._order:
for order in self._order._to_pbs():
pb.order.add().CopyFrom(order)
if QueryOptions.keys_only(query_options, conn.config):
pb.keys_only = True
projection = QueryOptions.projection(query_options, conn.config)
self._validate_projection_and_group_by(projection, self._group_by)
if projection:
pb.property_name.extend(projection)
if self._group_by:
pb.group_by_property_name.extend(self._group_by)
if QueryOptions.produce_cursors(query_options, conn.config):
pb.compile = True
limit = QueryOptions.limit(query_options, conn.config)
if limit is not None:
pb.limit = limit
count = QueryOptions.prefetch_size(query_options, conn.config)
if count is None:
count = QueryOptions.batch_size(query_options, conn.config)
if count is not None:
pb.count = count
if query_options.offset:
pb.offset = query_options.offset
if query_options.start_cursor is not None:
try:
pb.compiled_cursor.ParseFromString(
query_options.start_cursor.to_bytes())
except message.DecodeError:
raise datastore_errors.BadValueError('invalid cursor')
if query_options.end_cursor is not None:
try:
pb.end_compiled_cursor.ParseFromString(
query_options.end_cursor.to_bytes())
except message.DecodeError:
raise datastore_errors.BadValueError('invalid cursor')
if ((query_options.hint == QueryOptions.ORDER_FIRST and len(pb.order)) or
(query_options.hint == QueryOptions.ANCESTOR_FIRST and
pb.HasField('ancestor')) or
(query_options.hint == QueryOptions.FILTER_FIRST and pb.filter)):
pb.hint = query_options.hint
if self.read_time_us is not None:
pb.read_time_us = self.read_time_us
conn._set_request_read_policy(pb, query_options)
conn._set_request_transaction(pb)
return pb
def _validate_projection_and_group_by(self, projection, group_by):
"""Validates that a query's projection and group by match.
Args:
projection: A set of string property names in the projection.
group_by: A set of string property names in the group by.
Raises:
datastore_errors.BadRequestError: if the projection and group
by sets are not equal.
"""
if projection:
if group_by:
extra = set(projection) - set(group_by)
if extra:
raise datastore_errors.BadRequestError(
'projections includes properties not in the group_by argument: %s'
% extra)
elif group_by:
raise datastore_errors.BadRequestError(
'cannot specify group_by without a projection')
def apply_query(query, entities, _key=None):
"""Performs the given query on a set of in-memory results.
This function can perform queries impossible in the datastore (e.g a query
with multiple inequality filters on different properties) because all
operations are done in memory. For queries that can also be executed on the
the datastore, the results produced by this function may not use the same
implicit ordering as the datastore. To ensure compatibility, explicit
ordering must be used (e.g. 'ORDER BY ineq_prop, ..., __key__').
Order by __key__ should always be used when a consistent result is desired
(unless there is a sort order on another globally unique property).
Args:
query: a datastore_query.Query to apply
entities: a list of results, of arbitrary type, on which to apply the query.
_key: a function that takes an element of the result array as an argument
and must return an entity_pb2.EntityProto. If not specified, the
identity function is used (and entities must be a list of
entity_pb2.EntityProto).
Returns:
A subset of entities, filtered and ordered according to the query.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument must be a datastore_query.Query (%r)' % (query,))
if not isinstance(entities, list):
raise datastore_errors.BadArgumentError(
'entities argument must be a list (%r)' % (entities,))
key = _key or (lambda x: x)
filtered_results = [r for r in entities if query._key_filter(key(r))]
if not query._order:
if query._filter_predicate:
return [r for r in filtered_results if query._filter_predicate(key(r))]
return filtered_results
names = query._order._get_prop_names()
if query._filter_predicate:
names |= query._filter_predicate._get_prop_names()
exists_filter = _PropertyExistsFilter(names)
value_maps = []
for result in filtered_results:
value_map = _make_key_value_map(key(result), names)
if exists_filter._apply(value_map) and (
not query._filter_predicate or
query._filter_predicate._prune(value_map)):
value_map['__result__'] = result
value_maps.append(value_map)
value_maps.sort(key=functools.cmp_to_key(query._order._cmp))
return [value_map['__result__'] for value_map in value_maps]
class _AugmentedQuery(_BaseQuery):
"""A query that combines a datastore query with in-memory filters/results."""
@datastore_rpc._positional(2)
def __init__(self, query, in_memory_results=None, in_memory_filter=None,
max_filtered_count=None):
"""Constructor for _AugmentedQuery.
Do not call directly. Use the utility functions instead (e.g.
datastore_query.inject_results)
Args:
query: A datastore_query.Query object to augment.
in_memory_results: a list of pre- sorted and filtered result to add to the
stream of datastore results or None .
in_memory_filter: a set of in-memory filters to apply to the datastore
results or None.
max_filtered_count: the maximum number of datastore entities that will be
filtered out by in_memory_filter if known.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
if (in_memory_filter is not None and
not isinstance(in_memory_filter, FilterPredicate)):
raise datastore_errors.BadArgumentError(
'in_memory_filter argument should be ' + six.ensure_str(
'datastore_query.FilterPredicate (%r)' %
(in_memory_filter,), 'utf-8'))
if (in_memory_results is not None and
not isinstance(in_memory_results, list)):
raise datastore_errors.BadArgumentError(
'in_memory_results argument should be a list of' +
six.ensure_str('datastore_pv.EntityProto (%r)' %
(in_memory_results,), 'utf-8'))
datastore_types.ValidateInteger(max_filtered_count,
'max_filtered_count',
empty_ok=True,
zero_ok=True)
self._query = query
self._max_filtered_count = max_filtered_count
self._in_memory_filter = in_memory_filter
self._in_memory_results = in_memory_results
@property
def app(self):
return self._query._key_filter.app
@property
def namespace(self):
return self._query._key_filter.namespace
@property
def kind(self):
return self._query._key_filter.kind
@property
def ancestor(self):
return self._query._key_filter.ancestor
@property
def filter_predicate(self):
return self._query._filter_predicate
@property
def order(self):
return self._query._order
@property
def group_by(self):
return self._query._group_by
def run_async(self, conn, query_options=None):
if not isinstance(conn, datastore_rpc.BaseConnection):
raise datastore_errors.BadArgumentError(
'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))
if not QueryOptions.is_configuration(query_options):
query_options = QueryOptions(config=query_options)
if self._query._order:
changes = {'keys_only': False}
else:
changes = {}
if self._in_memory_filter or self._in_memory_results:
in_memory_offset = query_options.offset
in_memory_limit = query_options.limit
if in_memory_limit is not None:
if self._in_memory_filter is None:
changes['limit'] = in_memory_limit
elif self._max_filtered_count is not None:
changes['limit'] = in_memory_limit + self._max_filtered_count
else:
changes['limit'] = None
if in_memory_offset:
changes['offset'] = None
if changes.get('limit', None) is not None:
changes['limit'] += in_memory_offset
else:
in_memory_offset = None
else:
in_memory_offset = None
in_memory_limit = None
modified_query_options = QueryOptions(config=query_options, **changes)
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
req = self._query._to_pb_v1(conn, modified_query_options)
else:
req = self._query._to_pb(conn, modified_query_options)
start_cursor = query_options.start_cursor
if not start_cursor and query_options.produce_cursors:
start_cursor = Cursor()
return _AugmentedBatch.create_async(self, modified_query_options, conn, req,
in_memory_offset=in_memory_offset,
in_memory_limit=in_memory_limit,
start_cursor=start_cursor)
@datastore_rpc._positional(1)
def inject_results(query, updated_entities=None, deleted_keys=None):
"""Creates a query object that will inject changes into results.
Args:
query: The datastore_query.Query to augment
updated_entities: A list of entity_pb2.EntityProto's that have been updated
and should take priority over any values returned by query.
deleted_keys: A list of entity_pb2.Reference's for entities that have been
deleted and should be removed from query results.
Returns:
A datastore_query.AugmentedQuery if in memory filtering is required,
query otherwise.
"""
if not isinstance(query, Query):
raise datastore_errors.BadArgumentError(
'query argument should be datastore_query.Query (%r)' % (query,))
overridden_keys = set()
if deleted_keys is not None:
if not isinstance(deleted_keys, list):
raise datastore_errors.BadArgumentError(
'deleted_keys argument must be a list (%r)' % (deleted_keys,))
deleted_keys = list(six.moves.filter(query._key_filter, deleted_keys))
for key in deleted_keys:
overridden_keys.add(datastore_types.ReferenceToKeyValue(key))
if updated_entities is not None:
if not isinstance(updated_entities, list):
raise datastore_errors.BadArgumentError(
'updated_entities argument must be a list (%r)' % (updated_entities,))
updated_entities = list(
six.moves.filter(query._key_filter, updated_entities))
for entity in updated_entities:
overridden_keys.add(datastore_types.ReferenceToKeyValue(entity.key))
updated_entities = apply_query(query, updated_entities)
else:
updated_entities = []
if not overridden_keys:
return query
return _AugmentedQuery(query,
in_memory_filter=_IgnoreFilter(overridden_keys),
in_memory_results=updated_entities,
max_filtered_count=len(overridden_keys))
class _BatchShared(object):
"""Data shared among the batches of a query."""
def __init__(self, query, query_options, conn,
augmented_query=None, initial_offset=None):
self.__query = query
self.__query_options = query_options
self.__conn = conn
self.__augmented_query = augmented_query
self.__was_first_result_processed = False
if initial_offset is None:
initial_offset = query_options.offset or 0
self.__expected_offset = initial_offset
self.__remaining_limit = query_options.limit
@property
def query(self):
return self.__query
@property
def query_options(self):
return self.__query_options
@property
def conn(self):
return self.__conn
@property
def augmented_query(self):
return self.__augmented_query
@property
def keys_only(self):
return self.__keys_only
@property
def compiled_query(self):
return self.__compiled_query
@property
def expected_offset(self):
return self.__expected_offset
@property
def remaining_limit(self):
return self.__remaining_limit
@property
def index_list(self):
"""Returns the list of indexes used by the query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self.__index_list
def process_batch(self, batch):
if self.conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
skipped_results = batch.skipped_results
num_results = len(batch.entity_results)
else:
skipped_results = batch.skipped_results
num_results = len(batch.result)
self.__expected_offset -= skipped_results
if self.__remaining_limit is not None:
self.__remaining_limit -= num_results
if not self.__was_first_result_processed:
self.__was_first_result_processed = True
if self.conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
result_type = batch.entity_result_type
self.__keys_only = result_type == googledatastore.EntityResult.KEY_ONLY
self.__compiled_query = None
self.__index_list = None
else:
self.__keys_only = batch.keys_only
if batch.HasField('compiled_query'):
self.__compiled_query = batch.compiled_query
else:
self.__compiled_query = None
try:
self.__index_list = [
self.__conn.adapter.pb_to_index(index_pb)
for index_pb in batch.index
]
except NotImplementedError:
self.__index_list = None
class Batch(object):
"""A batch of results returned by a query.
This class contains a batch of results returned from the datastore and
relevant metadata. This metadata includes:
query: The query that produced this batch
query_options: The QueryOptions used to run the query. This does not
contained any options passed to the .next_batch() call that created the
current batch.
start_cursor, end_cursor: These are the cursors that can be used
with a query to re-fetch this batch. They can also be used to
find all entities before or after the given batch (by use start_cursor as
an end cursor or vice versa). start_cursor can also be advanced to
point to a position within the batch using Cursor.advance().
skipped_results: the number of result skipped because of the offset
given to the request that generated it. This can be set either on
the original Query.run() request or in subsequent .next_batch() calls.
more_results: If this is true there are more results that can be retrieved
either by .next_batch() or Batcher.next().
This class is also able to fetch the next batch of the query using
.next_batch(). As batches of results must be fetched serially, .next_batch()
can only be called once. Additional calls to .next_batch() will return None.
When there are no more batches .next_batch() will return None as well. Note
that batches returned by iterating over Batcher will always return None for
.next_batch() as the Bather handles fetching the next batch automatically.
A Batch typically represents the result of a single RPC request. The datastore
operates on a "best effort" basis so the batch returned by .next_batch()
or Query.run_async().get_result() may not have satisfied the requested offset
or number of results (specified through FetchOptions.offset and
FetchOptions.batch_size respectively). To satisfy these restrictions
additional batches may be needed (with FetchOptions that specify the remaining
offset or results needed). The Batcher class hides these limitations.
"""
__skipped_cursor = None
__end_cursor = None
@classmethod
@datastore_rpc._positional(5)
def create_async(cls, query, query_options, conn, req,
start_cursor):
batch_shared = _BatchShared(query, query_options, conn)
batch0 = cls(batch_shared, start_cursor=start_cursor)
return batch0._make_query_rpc_call(query_options, req)
@datastore_rpc._positional(2)
def __init__(self, batch_shared, start_cursor=Cursor()):
"""Constructor.
This class is constructed in stages (one when an RPC is sent and another
when an rpc is completed) and should not be constructed directly!!
Use Query.run_async().get_result() to create a Batch or Query.run()
to use a batcher.
This constructor does not perform verification.
Args:
batch_shared: Data shared between batches for a a single query run.
start_cursor: Optional cursor pointing before this batch.
"""
self._batch_shared = batch_shared
self.__start_cursor = start_cursor
@property
def query_options(self):
"""The QueryOptions used to retrieve the first batch."""
return self._batch_shared.query_options
@property
def query(self):
"""The query the current batch came from."""
return self._batch_shared.query
@property
def results(self):
"""A list of entities in this batch."""
return self.__results
@property
def keys_only(self):
"""Whether the entities in this batch only contain keys."""
return self._batch_shared.keys_only
@property
def index_list(self):
"""Returns the list of indexes used to peform this batch's query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self._batch_shared.index_list
@property
def start_cursor(self):
"""A cursor that points to the position just before the current batch."""
return self.__start_cursor
@property
def end_cursor(self):
"""A cursor that points to the position just after the current batch."""
return self.__end_cursor
@property
def skipped_results(self):
"""The number of results skipped because of an offset in the request.
An offset is satisfied before any results are returned. The start_cursor
points to the position in the query before the skipped results.
"""
return self._skipped_results
@property
def more_results(self):
"""Whether more results can be retrieved from the query."""
return self.__more_results
def next_batch(self, fetch_options=None):
"""Synchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
A new Batch of results or None if either the next batch has already been
fetched or there are no more results.
"""
async_ = self.next_batch_async(fetch_options)
if async_ is None:
return None
return async_.get_result()
def _compiled_query(self):
return self._batch_shared.compiled_query
def cursor(self, index):
"""Gets the cursor that points just after the result at index - 1.
The index is relative to first result in .results. Since start_cursor
points to the position before the first skipped result, the range of
indexes this function supports is limited to
[-skipped_results, len(results)].
For example, using start_cursor=batch.cursor(i) and
end_cursor=batch.cursor(j) will return the results found in
batch.results[i:j]. Note that any result added in the range (i-1, j]
will appear in the new query's results.
Warning: Any index in the range (-skipped_results, 0) may cause
continuation to miss or duplicate results if outside a transaction.
Args:
index: An int, the index relative to the first result before which the
cursor should point.
Returns:
A Cursor that points to a position just after the result index - 1,
which if used as a start_cursor will cause the first result to be
batch.result[index].
"""
if not isinstance(index, six.integer_types):
raise datastore_errors.BadArgumentError(
'index argument should be an integer (%r)' % (index,))
if not -self._skipped_results <= index <= len(self.__results):
raise datastore_errors.BadArgumentError(
'index argument must be in the inclusive range [%d, %d]' %
(-self._skipped_results, len(self.__results)))
if index == -self._skipped_results:
return self.__start_cursor
elif (index == 0 and
self.__skipped_cursor):
return self.__skipped_cursor
elif index > 0 and self.__result_cursors:
return self.__result_cursors[index - 1]
elif index == len(self.__results):
return self.__end_cursor
else:
return self.__start_cursor.advance(index + self._skipped_results,
self._batch_shared.query,
self._batch_shared.conn)
def next_batch_async(self, fetch_options=None):
"""Asynchronously get the next batch or None if there are no more batches.
Args:
fetch_options: Optional fetch options to use when fetching the next batch.
Merged with both the fetch options on the original call and the
connection.
Returns:
An async object that can be used to get the next Batch or None if either
the next batch has already been fetched or there are no more results.
"""
if not self.__datastore_cursor:
return None
fetch_options, next_batch = self._make_next_batch(fetch_options)
if (fetch_options is not None and
not FetchOptions.is_configuration(fetch_options)):
raise datastore_errors.BadArgumentError('Invalid fetch options.')
config = self._batch_shared.query_options.merge(fetch_options)
conn = next_batch._batch_shared.conn
requested_offset = 0
if fetch_options is not None and fetch_options.offset is not None:
requested_offset = fetch_options.offset
if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1:
if self._batch_shared.expected_offset != requested_offset:
raise datastore_errors.BadArgumentError(
'Cannot request the next batch with a different offset than '
' expected. Expected: %s, Got: %s.'
% (self._batch_shared.expected_offset, requested_offset))
limit = self._batch_shared.remaining_limit
next_options = QueryOptions(offset=self._batch_shared.expected_offset,
limit=limit,
start_cursor=self.__datastore_cursor)
config = config.merge(next_options)
result = next_batch._make_query_rpc_call(
config,
self._batch_shared.query._to_pb_v1(conn, config))
else:
result = next_batch._make_next_rpc_call(config,
self._to_pb(fetch_options))
self.__datastore_cursor = None
return result
def _to_pb(self, fetch_options=None):
req = datastore_pb.NextRequest()
if FetchOptions.produce_cursors(fetch_options,
self._batch_shared.query_options,
self._batch_shared.conn.config):
req.compile = True
count = FetchOptions.batch_size(fetch_options,
self._batch_shared.query_options,
self._batch_shared.conn.config)
if count is not None:
req.count = count
if fetch_options is not None and fetch_options.offset:
req.offset = fetch_options.offset
req.cursor.CopyFrom(self.__datastore_cursor)
return req
def _extend(self, next_batch):
"""Combines the current batch with the next one. Called by batcher."""
self.__datastore_cursor = next_batch.__datastore_cursor
next_batch.__datastore_cursor = None
self.__more_results = next_batch.__more_results
if not self.__results:
self.__skipped_cursor = next_batch.__skipped_cursor
self.__results.extend(next_batch.__results)
self.__result_cursors.extend(next_batch.__result_cursors)
self.__end_cursor = next_batch.__end_cursor
self._skipped_results += next_batch._skipped_results
def _make_query_rpc_call(self, config, req):
"""Makes a RunQuery call that will modify the instance.
Args:
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
_api_version = self._batch_shared.conn._api_version
if _api_version == datastore_rpc._CLOUD_DATASTORE_V1:
return self._batch_shared.conn._make_rpc_call(
config, 'RunQuery', req, googledatastore.RunQueryResponse(),
self.__v1_run_query_response_hook)
return self._batch_shared.conn._make_rpc_call(config, 'RunQuery', req,
datastore_pb.QueryResult(),
self.__query_result_hook)
def _make_next_rpc_call(self, config, req):
"""Makes a Next call that will modify the instance.
Args:
config: The datastore_rpc.Configuration to use for the call.
req: The request to send with the call.
Returns:
A UserRPC object that can be used to fetch the result of the RPC.
"""
return self._batch_shared.conn._make_rpc_call(config, 'Next', req,
datastore_pb.QueryResult(),
self.__query_result_hook)
_need_index_header = 'The suggested index for this query is:'
def __v1_run_query_response_hook(self, rpc):
try:
self._batch_shared.conn.check_rpc_success(rpc)
except datastore_errors.NeedIndexError:
raise
batch = rpc.response.batch
self._batch_shared.process_batch(batch)
if batch.skipped_cursor:
self.__skipped_cursor = Cursor(_cursor_bytes=batch.skipped_cursor)
self.__result_cursors = [Cursor(_cursor_bytes=result.cursor)
for result in batch.entity_results
if result.cursor]
if batch.end_cursor:
self.__end_cursor = Cursor(_cursor_bytes=batch.end_cursor)
self._skipped_results = batch.skipped_results
if batch.more_results == googledatastore.QueryResultBatch.NOT_FINISHED:
self.__more_results = True
self.__datastore_cursor = self.__end_cursor or self.__skipped_cursor
if self.__datastore_cursor == self.__start_cursor:
raise datastore_errors.Timeout(
'The query was not able to make progress.')
else:
self._end()
self.__results = self._process_v1_results(batch.entity_results)
return self
def __query_result_hook(self, rpc):
"""Internal method used as get_result_hook for RunQuery/Next operation."""
try:
self._batch_shared.conn.check_rpc_success(rpc)
except datastore_errors.NeedIndexError as exc:
if isinstance(rpc.request, datastore_pb.Query):
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(
rpc.request)
props = datastore_index.GetRecommendedIndexProperties(props)
yaml = datastore_index.IndexYamlForQuery(kind, ancestor, props)
xml = datastore_index.IndexXmlForQuery(kind, ancestor, props)
raise datastore_errors.NeedIndexError(
'\n'.join([str(exc), self._need_index_header, yaml]),
original_message=str(exc), header=self._need_index_header,
yaml_index=yaml, xml_index=xml)
raise
query_result = rpc.response
self._batch_shared.process_batch(query_result)
if query_result.HasField('skipped_results_compiled_cursor'):
self.__skipped_cursor = Cursor(
_cursor_bytes=query_result.skipped_results_compiled_cursor
.SerializeToString())
self.__result_cursors = [
Cursor(_cursor_bytes=result.SerializeToString())
for result in query_result.result_compiled_cursor
]
if query_result.HasField('compiled_cursor'):
self.__end_cursor = Cursor(
_cursor_bytes=query_result.compiled_cursor.SerializeToString())
self._skipped_results = query_result.skipped_results
if query_result.more_results:
self.__datastore_cursor = query_result.cursor
self.__more_results = True
else:
self._end()
self.__results = self._process_results(query_result.result)
return self
def _end(self):
"""Changes the internal state so that no more batches can be produced."""
self.__datastore_cursor = None
self.__more_results = False
def _make_next_batch(self, fetch_options):
"""Creates the object to store the next batch.
Args:
fetch_options: The datastore_query.FetchOptions passed in by the user or
None.
Returns:
A tuple containing the fetch options that should be used internally and
the object that should be used to contain the next batch.
"""
return fetch_options, Batch(self._batch_shared,
start_cursor=self.__end_cursor)
def _process_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of entity_pb2.EntityProto's returned by the datastore
Returns:
A list of results that should be returned to the user.
"""
converter = self._batch_shared.conn.adapter.pb_to_query_result
return [converter(result, self._batch_shared.query_options)
for result in results]
def _process_v1_results(self, results):
"""Converts the datastore results into results returned to the user.
Args:
results: A list of googledatastore.EntityResults.
Returns:
A list of results that should be returned to the user.
"""
converter = self._batch_shared.conn.adapter.pb_v1_to_query_result
return [converter(result.entity, self._batch_shared.query_options)
for result in results]
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.Batch is unsupported.')
class _AugmentedBatch(Batch):
"""A batch produced by a datastore_query._AugmentedQuery."""
@classmethod
@datastore_rpc._positional(5)
def create_async(cls, augmented_query, query_options, conn, req,
in_memory_offset, in_memory_limit, start_cursor):
initial_offset = 0 if in_memory_offset is not None else None
batch_shared = _BatchShared(augmented_query._query,
query_options,
conn,
augmented_query,
initial_offset=initial_offset)
batch0 = cls(batch_shared,
in_memory_offset=in_memory_offset,
in_memory_limit=in_memory_limit,
start_cursor=start_cursor)
return batch0._make_query_rpc_call(query_options, req)
@datastore_rpc._positional(2)
def __init__(self, batch_shared,
in_memory_offset=None,
in_memory_limit=None,
next_index=0,
start_cursor=Cursor()):
"""A Constructor for datastore_query._AugmentedBatch.
Constructed by datastore_query._AugmentedQuery. Should not be called
directly.
"""
super(_AugmentedBatch, self).__init__(batch_shared,
start_cursor=start_cursor)
self.__in_memory_offset = in_memory_offset
self.__in_memory_limit = in_memory_limit
self.__next_index = next_index
@property
def query(self):
"""The query the current batch came from."""
return self._batch_shared.augmented_query
def cursor(self, index):
raise NotImplementedError
def _extend(self, next_batch):
super(_AugmentedBatch, self)._extend(next_batch)
self.__in_memory_limit = next_batch.__in_memory_limit
self.__in_memory_offset = next_batch.__in_memory_offset
self.__next_index = next_batch.__next_index
def _process_v1_results(self, results):
"""Process V4 results by converting to V3 and calling _process_results."""
v3_results = []
is_projection = bool(self.query_options.projection)
for v1_result in results:
v3_entity = entity_pb2.EntityProto()
self._batch_shared.conn.adapter.get_entity_converter().v1_to_v3_entity(
v1_result.entity, v3_entity, is_projection)
v3_results.append(v3_entity)
return self._process_results(v3_results)
def _process_results(self, results):
in_memory_filter = self._batch_shared.augmented_query._in_memory_filter
if in_memory_filter:
results = list(filter(in_memory_filter, results))
in_memory_results = self._batch_shared.augmented_query._in_memory_results
if in_memory_results and self.__next_index < len(in_memory_results):
original_query = super(_AugmentedBatch, self).query
if original_query._order:
if results:
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i = 0
while i < len(results):
result = results[i]
result_key = original_query._order.key(result)
while next_key <= result_key:
results.insert(i, next_result)
i += 1
self.__next_index += 1
if self.__next_index >= len(in_memory_results):
break
next_result = in_memory_results[self.__next_index]
next_key = original_query._order.key(next_result)
i += 1
elif results or not super(_AugmentedBatch, self).more_results:
results = in_memory_results + results
self.__next_index = len(in_memory_results)
if self.__in_memory_offset:
assert not self._skipped_results
offset = min(self.__in_memory_offset, len(results))
if offset:
self._skipped_results += offset
self.__in_memory_offset -= offset
results = results[offset:]
if self.__in_memory_limit is not None:
results = results[:self.__in_memory_limit]
self.__in_memory_limit -= len(results)
if self.__in_memory_limit <= 0:
self._end()
return super(_AugmentedBatch, self)._process_results(results)
def _make_next_batch(self, fetch_options):
in_memory_offset = FetchOptions.offset(fetch_options)
augmented_query = self._batch_shared.augmented_query
if in_memory_offset and (augmented_query._in_memory_filter or
augmented_query._in_memory_results):
fetch_options = FetchOptions(offset=0)
else:
in_memory_offset = None
return (fetch_options,
_AugmentedBatch(self._batch_shared,
in_memory_offset=in_memory_offset,
in_memory_limit=self.__in_memory_limit,
start_cursor=self.end_cursor,
next_index=self.__next_index))
class Batcher(object):
"""A class that implements the Iterator interface for Batches.
Typically constructed by a call to Query.run().
The class hides the "best effort" nature of the datastore by potentially
making multiple requests to the datastore and merging the resulting batches.
This is accomplished efficiently by prefetching results and mixing both
non-blocking and blocking calls to the datastore as needed.
Iterating through batches is almost always more efficient than pulling all
results at once as RPC latency is hidden by asynchronously prefetching
results.
The batches produce by this class cannot be used to fetch the next batch
(through Batch.next_batch()) as before the current batch is returned the
request for the next batch has already been sent.
"""
ASYNC_ONLY = None
AT_LEAST_OFFSET = 0
AT_LEAST_ONE = object()
def __init__(self, query_options, first_async_batch):
"""Constructor.
Although this class can be manually constructed, it is preferable to use
Query.run(query_options).
Args:
query_options: The QueryOptions used to create the first batch.
first_async_batch: The first batch produced by
Query.run_async(query_options).
"""
self.__next_batch = first_async_batch
self.__initial_offset = QueryOptions.offset(query_options) or 0
self.__skipped_results = 0
def next(self):
"""Get the next batch. See .next_batch()."""
return self.next_batch(self.AT_LEAST_ONE)
def __next__(self):
return self.next()
def next_batch(self, min_batch_size):
"""Get the next batch.
The batch returned by this function cannot be used to fetch the next batch
(through Batch.next_batch()). Instead this function will always return None.
To retrieve the next batch use .next() or .next_batch(N).
This function may return a batch larger than min_to_fetch, but will never
return smaller unless there are no more results.
Special values can be used for min_batch_size:
ASYNC_ONLY - Do not perform any synchrounous fetches from the datastore
even if the this produces a batch with no results.
AT_LEAST_OFFSET - Only pull enough results to satifiy the offset.
AT_LEAST_ONE - Pull batches until at least one result is returned.
Args:
min_batch_size: The minimum number of results to retrieve or one of
(ASYNC_ONLY, AT_LEAST_OFFSET, AT_LEAST_ONE)
Returns:
The next Batch of results.
"""
if min_batch_size in (Batcher.ASYNC_ONLY, Batcher.AT_LEAST_OFFSET,
Batcher.AT_LEAST_ONE):
exact = False
else:
exact = True
datastore_types.ValidateInteger(min_batch_size,
'min_batch_size',
datastore_errors.BadArgumentError)
if not self.__next_batch:
raise StopIteration
batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += batch.skipped_results
if min_batch_size is not Batcher.ASYNC_ONLY:
if min_batch_size is Batcher.AT_LEAST_ONE:
min_batch_size = 1
needed_results = min_batch_size - len(batch.results)
while (batch.more_results and
(self.__skipped_results < self.__initial_offset or
needed_results > 0)):
if batch.query_options.batch_size:
batch_size = max(batch.query_options.batch_size, needed_results)
elif exact:
batch_size = needed_results
else:
batch_size = None
self.__next_batch = batch.next_batch_async(FetchOptions(
offset=max(0, self.__initial_offset - self.__skipped_results),
batch_size=batch_size))
next_batch = self.__next_batch.get_result()
self.__next_batch = None
self.__skipped_results += next_batch.skipped_results
needed_results = max(0, needed_results - len(next_batch.results))
batch._extend(next_batch)
self.__next_batch = batch.next_batch_async()
return batch
def __getstate__(self):
raise pickle.PicklingError(
'Pickling of datastore_query.Batcher is unsupported.')
def __iter__(self):
return self
class ResultsIterator(six.Iterator):
"""An iterator over the results from Batches obtained from a Batcher.
ResultsIterator implements Python's iterator protocol, so results can be
accessed with the for-statement:
> it = ResultsIterator(Query(kind='Person').run())
> for person in it:
> print 'Hi, %s!' % person['name']
At any time ResultsIterator.cursor() can be used to grab the Cursor that
points just after the last result returned by the iterator.
"""
__current_batch = None
__current_pos = 0
__last_cursor = None
def __init__(self, batcher):
"""Constructor.
Args:
batcher: A datastore_query.Batcher
"""
if not isinstance(batcher, Batcher):
raise datastore_errors.BadArgumentError(
'batcher argument should be datastore_query.Batcher (%r)' %
(batcher,))
self.__batcher = batcher
def index_list(self):
"""Returns the list of indexes used to perform the query.
Possibly None when the adapter does not implement pb_to_index.
"""
return self._ensure_current_batch().index_list
def cursor(self):
"""Returns a cursor that points just after the last result returned.
If next() throws an exception, this function returns the end_cursor from
the last successful batch or throws the same exception if no batch was
successful.
"""
return (self.__last_cursor or
self._ensure_current_batch().cursor(self.__current_pos))
def _ensure_current_batch(self):
if not self.__current_batch:
self.__current_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET)
self.__current_pos = 0
return self.__current_batch
def _compiled_query(self):
"""Returns the compiled query associated with the iterator.
Internal only do not use.
"""
return self._ensure_current_batch()._compiled_query()
def __next__(self):
"""Returns the next query result."""
while (not self.__current_batch or
self.__current_pos >= len(self.__current_batch.results)):
try:
next_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET)
except:
if self.__current_batch:
self.__last_cursor = self.__current_batch.end_cursor
raise
self.__current_pos = 0
self.__current_batch = next_batch
result = self.__current_batch.results[self.__current_pos]
self.__current_pos += 1
return result
def __iter__(self):
return self
def next(self):
return self.__next__()
| [((2463, 1, 2463, 29), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(2463, 27, 2463, 28): '(1)'}, {}), '(1)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((118, 2, 118, 48), 'google.appengine.api.datastore_types.ValidateProperty', 'datastore_types.ValidateProperty', ({(118, 35, 118, 39): 'name', (118, 41, 118, 47): 'values'}, {}), '(name, values)', False, 'from google.appengine.api import datastore_types\n'), ((119, 15, 119, 57), 'google.appengine.api.datastore_types.ToPropertyPb', 'datastore_types.ToPropertyPb', ({(119, 44, 119, 48): 'name', (119, 50, 119, 56): 'values'}, {}), '(name, values)', False, 'from google.appengine.api import datastore_types\n'), ((152, 13, 152, 66), 'six.ensure_text', 'six.ensure_text', ({(152, 29, 152, 65): 'datastore_types.KEY_SPECIAL_PROPERTY'}, {}), '(datastore_types.KEY_SPECIAL_PROPERTY)', False, 'import six\n'), ((428, 3, 428, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(428, 29, 428, 30): '(1)'}, {}), '(1)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((1000, 3, 1000, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(1000, 29, 1000, 30): '(1)'}, {}), '(1)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((1187, 3, 1187, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(1187, 29, 1187, 30): '(1)'}, {}), '(1)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((1298, 3, 1298, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(1298, 29, 1298, 30): '(1)'}, {}), '(1)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((1571, 3, 1571, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(1571, 29, 1571, 30): '(1)'}, {}), '(1)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((1705, 3, 1705, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(1705, 29, 1705, 30): '(1)'}, {}), '(1)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((1904, 3, 1904, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(1904, 29, 1904, 30): '(1)'}, {}), '(1)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((2331, 3, 2331, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(2331, 29, 2331, 30): '(2)'}, {}), '(2)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((2646, 3, 2646, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(2646, 29, 2646, 30): '(5)'}, {}), '(5)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((2653, 3, 2653, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(2653, 29, 2653, 30): '(2)'}, {}), '(2)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((3054, 3, 3054, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(3054, 29, 3054, 30): '(5)'}, {}), '(5)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((3070, 3, 3070, 31), 'google.appengine.datastore.datastore_rpc._positional', 'datastore_rpc._positional', ({(3070, 29, 3070, 30): '(2)'}, {}), '(2)', False, 'from google.appengine.datastore import datastore_rpc\n'), ((146, 16, 146, 42), 'six.ensure_text', 'six.ensure_text', ({(146, 32, 146, 41): 'prop.name'}, {}), '(prop.name)', False, 'import six\n'), ((334, 19, 334, 46), 'google.appengine.datastore.datastore_pb.Query.Filter', 'datastore_pb.Query.Filter', ({}, {}), '()', False, 'from google.appengine.datastore import datastore_pb\n'), ((351, 12, 351, 48), 'google.appengine.api.datastore_types.FromPropertyPb', 'datastore_types.FromPropertyPb', ({(351, 43, 351, 47): 'prop'}, {}), '(prop)', False, 'from google.appengine.api import datastore_types\n'), ((392, 16, 392, 40), 'google.appengine.datastore.datastore_pbs.googledatastore.Filter', 'googledatastore.Filter', ({}, {}), '()', False, 'from google.appengine.datastore.datastore_pbs import googledatastore\n'), ((399, 10, 400, 69), 'pickle.PicklingError', 'pickle.PicklingError', ({(400, 8, 400, 68): '"""Pickling of datastore_query.PropertyFilter is unsupported."""'}, {}), "(\n 'Pickling of datastore_query.PropertyFilter is unsupported.')", False, 'import pickle\n'), ((605, 16, 605, 40), 'google.appengine.datastore.datastore_pbs.googledatastore.Filter', 'googledatastore.Filter', ({}, {}), '()', False, 'from google.appengine.datastore.datastore_pbs import googledatastore\n'), ((633, 10, 634, 48), 'pickle.PicklingError', 'pickle.PicklingError', ({(634, 8, 634, 47): "('Pickling of %r is unsupported.' % self)"}, {}), "('Pickling of %r is unsupported.' % self)", False, 'import pickle\n'), ((672, 10, 673, 48), 'pickle.PicklingError', 'pickle.PicklingError', ({(673, 8, 673, 47): "('Pickling of %r is unsupported.' % self)"}, {}), "('Pickling of %r is unsupported.' % self)", False, 'import pickle\n'), ((938, 9, 938, 33), 'google.appengine.datastore.datastore_pbs.googledatastore.Filter', 'googledatastore.Filter', ({}, {}), '()', False, 'from google.appengine.datastore.datastore_pbs import googledatastore\n'), ((1117, 11, 1117, 43), 'google.appengine.api.cmp_compat.cmp', 'cmp_compat.cmp', ({(1117, 26, 1117, 33): 'lhs_key', (1117, 35, 1117, 42): 'rhs_key'}, {}), '(lhs_key, rhs_key)', False, 'from google.appengine.api import cmp_compat\n'), ((1161, 4, 1163, 69), 'google.appengine.api.datastore_types.ValidateString', 'datastore_types.ValidateString', ({(1161, 35, 1161, 39): 'prop', (1162, 35, 1162, 41): '"""prop"""', (1163, 35, 1163, 68): 'datastore_errors.BadArgumentError'}, {}), "(prop, 'prop', datastore_errors.BadArgumentError)", False, 'from google.appengine.api import datastore_types\n'), ((1168, 19, 1168, 45), 'google.appengine.datastore.datastore_pb.Query.Order', 'datastore_pb.Query.Order', ({}, {}), '()', False, 'from google.appengine.datastore import datastore_pb\n'), ((1169, 28, 1169, 60), 'six.ensure_binary', 'six.ensure_binary', ({(1169, 46, 1169, 50): 'prop', (1169, 52, 1169, 59): '"""utf-8"""'}, {}), "(prop, 'utf-8')", False, 'import six\n'), ((1250, 15, 1250, 46), 'google.appengine.datastore.datastore_pbs.googledatastore.PropertyOrder', 'googledatastore.PropertyOrder', ({}, {}), '()', False, 'from google.appengine.datastore.datastore_pbs import googledatastore\n'), ((1255, 10, 1256, 68), 'pickle.PicklingError', 'pickle.PicklingError', ({(1256, 8, 1256, 67): '"""Pickling of datastore_query.PropertyOrder is unsupported."""'}, {}), "(\n 'Pickling of datastore_query.PropertyOrder is unsupported.')", False, 'import pickle\n'), ((1392, 4, 1395, 49), 'google.appengine.api.datastore_types.ValidateInteger', 'datastore_types.ValidateInteger', (), '', False, 'from google.appengine.api import datastore_types\n'), ((1406, 4, 1408, 70), 'google.appengine.api.datastore_types.ValidateInteger', 'datastore_types.ValidateInteger', ({(1406, 36, 1406, 41): 'value', (1407, 36, 1407, 48): '"""batch_size"""', (1408, 36, 1408, 69): 'datastore_errors.BadArgumentError'}, {}), "(value, 'batch_size', datastore_errors.\n BadArgumentError)", False, 'from google.appengine.api import datastore_types\n'), ((1497, 4, 1500, 49), 'google.appengine.api.datastore_types.ValidateInteger', 'datastore_types.ValidateInteger', (), '', False, 'from google.appengine.api import datastore_types\n'), ((1511, 4, 1514, 49), 'google.appengine.api.datastore_types.ValidateInteger', 'datastore_types.ValidateInteger', (), '', False, 'from google.appengine.api import datastore_types\n'), ((1682, 4, 1684, 70), 'google.appengine.api.datastore_types.ValidateInteger', 'datastore_types.ValidateInteger', ({(1682, 36, 1682, 42): 'offset', (1683, 36, 1683, 44): '"""offset"""', (1684, 36, 1684, 69): 'datastore_errors.BadArgumentError'}, {}), "(offset, 'offset', datastore_errors.\n BadArgumentError)", False, 'from google.appengine.api import datastore_types\n'), ((1813, 9, 1813, 29), 'google.appengine.datastore.datastore_pb.Query', 'datastore_pb.Query', ({}, {}), '()', False, 'from google.appengine.datastore import datastore_pb\n'), ((1816, 4, 1816, 54), 'google.appengine.api.datastore_types.SetNamespace', 'datastore_types.SetNamespace', ({(1816, 33, 1816, 35): 'pb', (1816, 37, 1816, 53): 'self.__namespace'}, {}), '(pb, self.__namespace)', False, 'from google.appengine.api import datastore_types\n'), ((1835, 9, 1835, 42), 'google.appengine.datastore.datastore_pbs.googledatastore.RunQueryRequest', 'googledatastore.RunQueryRequest', ({}, {}), '()', False, 'from google.appengine.datastore.datastore_pbs import googledatastore\n'), ((1892, 10, 1893, 48), 'pickle.PicklingError', 'pickle.PicklingError', ({(1893, 8, 1893, 47): "('Pickling of %r is unsupported.' % self)"}, {}), "('Pickling of %r is unsupported.' % self)", False, 'import pickle\n'), ((2281, 10, 2282, 73), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2282, 8, 2282, 72): "('query argument must be a datastore_query.Query (%r)' % (query,))"}, {}), "(\n 'query argument must be a datastore_query.Query (%r)' % (query,))", False, 'from google.appengine.api import datastore_errors\n'), ((2285, 10, 2286, 62), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2286, 8, 2286, 61): "('entities argument must be a list (%r)' % (entities,))"}, {}), "('entities argument must be a list (%r)' %\n (entities,))", False, 'from google.appengine.api import datastore_errors\n'), ((2363, 4, 2366, 49), 'google.appengine.api.datastore_types.ValidateInteger', 'datastore_types.ValidateInteger', (), '', False, 'from google.appengine.api import datastore_types\n'), ((2479, 10, 2480, 73), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2480, 8, 2480, 72): "('query argument should be datastore_query.Query (%r)' % (query,))"}, {}), "(\n 'query argument should be datastore_query.Query (%r)' % (query,))", False, 'from google.appengine.api import datastore_errors\n'), ((2847, 10, 2847, 36), 'google.appengine.datastore.datastore_pb.NextRequest', 'datastore_pb.NextRequest', ({}, {}), '()', False, 'from google.appengine.datastore import datastore_pb\n'), ((3046, 10, 3047, 60), 'pickle.PicklingError', 'pickle.PicklingError', ({(3047, 8, 3047, 59): '"""Pickling of datastore_query.Batch is unsupported."""'}, {}), "('Pickling of datastore_query.Batch is unsupported.')", False, 'import pickle\n'), ((3298, 10, 3299, 62), 'pickle.PicklingError', 'pickle.PicklingError', ({(3299, 8, 3299, 61): '"""Pickling of datastore_query.Batcher is unsupported."""'}, {}), "('Pickling of datastore_query.Batcher is unsupported.')", False, 'import pickle\n'), ((154, 27, 154, 74), 'google.appengine.api.datastore_types.ReferenceToKeyValue', 'datastore_types.ReferenceToKeyValue', ({(154, 63, 154, 73): 'entity.key'}, {}), '(entity.key)', False, 'from google.appengine.api import datastore_types\n'), ((328, 12, 328, 77), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(328, 46, 328, 76): "('unknown operator: %r' % (op,))"}, {}), "('unknown operator: %r' % (op,))", False, 'from google.appengine.api import datastore_errors\n'), ((330, 12, 331, 73), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(331, 10, 331, 72): "('value argument should be entity_pb2.Property (%r)' % (value,))"}, {}), "(\n 'value argument should be entity_pb2.Property (%r)' % (value,))", False, 'from google.appengine.api import datastore_errors\n'), ((365, 24, 366, 41), 'google.appengine.api.datastore_types.PropertyValueToKeyValue', 'datastore_types.PropertyValueToKeyValue', ({(366, 10, 366, 40): 'self._filter.property[0].value'}, {}), '(self._filter.property[0].value)', False, 'from google.appengine.api import datastore_types\n'), ((441, 12, 442, 73), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(442, 10, 442, 72): "('start argument should be entity_pb2.Property (%r)' % (start,))"}, {}), "(\n 'start argument should be entity_pb2.Property (%r)' % (start,))", False, 'from google.appengine.api import datastore_errors\n'), ((444, 12, 445, 71), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(445, 10, 445, 70): "('start argument should be entity_pb2.Property (%r)' % (end,))"}, {}), "(\n 'start argument should be entity_pb2.Property (%r)' % (end,))", False, 'from google.appengine.api import datastore_errors\n'), ((447, 12, 449, 33), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(448, 10, 449, 32): "('start and end arguments must be on the same property (%s != %s)' % (start\n .name, end.name))"}, {}), "(\n 'start and end arguments must be on the same property (%s != %s)' % (\n start.name, end.name))", False, 'from google.appengine.api import datastore_errors\n'), ((451, 12, 452, 48), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(452, 10, 452, 47): '"""Unbounded ranges are not supported."""'}, {}), "('Unbounded ranges are not supported.')", False, 'from google.appengine.api import datastore_errors\n'), ((537, 30, 538, 28), 'google.appengine.api.datastore_types.PropertyValueToKeyValue', 'datastore_types.PropertyValueToKeyValue', ({(538, 10, 538, 27): 'self._start.value'}, {}), '(self._start.value)', False, 'from google.appengine.api import datastore_types\n'), ((543, 28, 544, 26), 'google.appengine.api.datastore_types.PropertyValueToKeyValue', 'datastore_types.PropertyValueToKeyValue', ({(544, 10, 544, 25): 'self._end.value'}, {}), '(self._end.value)', False, 'from google.appengine.api import datastore_types\n'), ((582, 11, 582, 38), 'google.appengine.datastore.datastore_pb.Query.Filter', 'datastore_pb.Query.Filter', ({}, {}), '()', False, 'from google.appengine.datastore import datastore_pb\n'), ((592, 11, 592, 38), 'google.appengine.datastore.datastore_pb.Query.Filter', 'datastore_pb.Query.Filter', ({}, {}), '()', False, 'from google.appengine.datastore import datastore_pb\n'), ((735, 22, 735, 56), 'six.moves.zip', 'six.moves.zip', ({(735, 36, 735, 43): 'grouped', (735, 45, 735, 55): 'value_maps'}, {}), '(grouped, value_maps)', False, 'import six\n'), ((803, 12, 803, 78), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(803, 46, 803, 77): "('unknown operator (%s)' % (op,))"}, {}), "('unknown operator (%s)' % (op,))", False, 'from google.appengine.api import datastore_errors\n'), ((805, 12, 806, 74), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(806, 10, 806, 73): "('filters argument should be a non-empty list (%r)' % (filters,))"}, {}), "(\n 'filters argument should be a non-empty list (%r)' % (filters,))", False, 'from google.appengine.api import datastore_errors\n'), ((897, 16, 897, 44), 'collections.defaultdict', 'collections.defaultdict', ({(897, 40, 897, 43): 'set'}, {}), '(set)', False, 'import collections\n'), ((943, 12, 944, 74), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(944, 10, 944, 73): '"""Datastore V4 only supports CompositeFilter with AND operator."""'}, {}), "(\n 'Datastore V4 only supports CompositeFilter with AND operator.')", False, 'from google.appengine.api import datastore_errors\n'), ((1113, 15, 1113, 59), 'google.appengine.api.datastore_types.ReferenceToKeyValue', 'datastore_types.ReferenceToKeyValue', ({(1113, 51, 1113, 58): 'lhs.key'}, {}), '(lhs.key)', False, 'from google.appengine.api import datastore_types\n'), ((1115, 15, 1115, 59), 'google.appengine.api.datastore_types.ReferenceToKeyValue', 'datastore_types.ReferenceToKeyValue', ({(1115, 51, 1115, 58): 'rhs.key'}, {}), '(rhs.key)', False, 'from google.appengine.api import datastore_types\n'), ((1140, 12, 1140, 49), 'google.appengine.api.cmp_compat.cmp', 'cmp_compat.cmp', ({(1140, 27, 1140, 36): 'self._obj', (1140, 38, 1140, 48): 'other._obj'}, {}), '(self._obj, other._obj)', False, 'from google.appengine.api import cmp_compat\n'), ((1165, 12, 1166, 59), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1165, 46, 1166, 58): "('unknown direction: %r' % (direction,))"}, {}), "('unknown direction: %r' % (direction,))", False, 'from google.appengine.api import datastore_errors\n'), ((1205, 12, 1206, 68), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1206, 10, 1206, 67): "('Missing value for property (%s)' % self.__order.property)"}, {}), "('Missing value for property (%s)' % self.\n __order.property)", False, 'from google.appengine.api import datastore_errors\n'), ((1221, 12, 1222, 72), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1222, 10, 1222, 71): "('LHS missing value for property (%s)' % self.__order.property)"}, {}), "('LHS missing value for property (%s)' %\n self.__order.property)", False, 'from google.appengine.api import datastore_errors\n'), ((1225, 12, 1226, 72), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1226, 10, 1226, 71): "('RHS missing value for property (%s)' % self.__order.property)"}, {}), "('RHS missing value for property (%s)' %\n self.__order.property)", False, 'from google.appengine.api import datastore_errors\n'), ((1276, 12, 1277, 69), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1277, 10, 1277, 68): "('orders argument should be list or tuple (%r)' % (orders,))"}, {}), "(\n 'orders argument should be list or tuple (%r)' % (orders,))", False, 'from google.appengine.api import datastore_errors\n'), ((1377, 12, 1378, 68), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1378, 10, 1378, 67): "('produce_cursors argument should be bool (%r)' % (value,))"}, {}), "(\n 'produce_cursors argument should be bool (%r)' % (value,))", False, 'from google.appengine.api import datastore_errors\n'), ((1446, 12, 1447, 62), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1447, 10, 1447, 61): "('keys_only argument should be bool (%r)' % (value,))"}, {}), "('keys_only argument should be bool (%r)' %\n (value,))", False, 'from google.appengine.api import datastore_errors\n'), ((1480, 12, 1481, 48), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1481, 10, 1481, 47): '"""projection argument cannot be empty"""'}, {}), "('projection argument cannot be empty')", False, 'from google.appengine.api import datastore_errors\n'), ((1527, 12, 1529, 19), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1528, 10, 1529, 18): "('start_cursor argument should be datastore_query.Cursor (%r)' % (value,))"}, {}), "(\n 'start_cursor argument should be datastore_query.Cursor (%r)' % (value,))", False, 'from google.appengine.api import datastore_errors\n'), ((1542, 12, 1544, 19), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1543, 10, 1544, 18): "('end_cursor argument should be datastore_query.Cursor (%r)' % (value,))"}, {}), "(\n 'end_cursor argument should be datastore_query.Cursor (%r)' % (value,))", False, 'from google.appengine.api import datastore_errors\n'), ((1555, 12, 1556, 55), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1555, 46, 1556, 54): "('Unknown query hint (%r)' % (value,))"}, {}), "('Unknown query hint (%r)' % (value,))", False, 'from google.appengine.api import datastore_errors\n'), ((1588, 28, 1588, 45), 'six.binary_type', 'six.binary_type', ({}, {}), '()', False, 'import six\n'), ((1660, 12, 1661, 70), 'google.appengine.api.datastore_errors.BadValueError', 'datastore_errors.BadValueError', ({(1661, 10, 1661, 69): "('cursor argument should be str or unicode (%r)' % (cursor,))"}, {}), "(\n 'cursor argument should be str or unicode (%r)' % (cursor,))", False, 'from google.appengine.api import datastore_errors\n'), ((1686, 12, 1687, 75), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1687, 10, 1687, 74): "('query argument should be datastore_query.Query (%r)' % (query,))"}, {}), "(\n 'query argument should be datastore_query.Query (%r)' % (query,))", False, 'from google.appengine.api import datastore_errors\n'), ((1724, 6, 1725, 58), 'google.appengine.api.datastore_types.ValidateString', 'datastore_types.ValidateString', ({(1725, 10, 1725, 14): 'kind', (1725, 16, 1725, 22): '"""kind"""', (1725, 24, 1725, 57): 'datastore_errors.BadArgumentError'}, {}), "(kind, 'kind', datastore_errors.BadArgumentError)", False, 'from google.appengine.api import datastore_types\n'), ((1734, 21, 1734, 52), 'six.ensure_binary', 'six.ensure_binary', ({(1734, 39, 1734, 51): 'ancestor.app'}, {}), '(ancestor.app)', False, 'import six\n'), ((1743, 27, 1743, 65), 'six.ensure_binary', 'six.ensure_binary', ({(1743, 45, 1743, 64): 'ancestor.name_space'}, {}), '(ancestor.name_space)', False, 'import six\n'), ((1753, 11, 1753, 33), 'google.appengine.datastore.entity_bytes_pb2.Reference', 'entity_pb2.Reference', ({}, {}), '()', True, 'from google.appengine.datastore import entity_bytes_pb2 as entity_pb2\n'), ((1763, 33, 1763, 66), 'google.appengine.api.datastore_types.ResolveAppId', 'datastore_types.ResolveAppId', ({(1763, 62, 1763, 65): 'app'}, {}), '(app)', False, 'from google.appengine.api import datastore_types\n'), ((1765, 24, 1765, 67), 'google.appengine.api.datastore_types.ResolveNamespace', 'datastore_types.ResolveNamespace', ({(1765, 57, 1765, 66): 'namespace'}, {}), '(namespace)', False, 'from google.appengine.api import datastore_types\n'), ((1845, 24, 1845, 48), 'google.appengine.datastore.datastore_pbs.googledatastore.Filter', 'googledatastore.Filter', ({}, {}), '()', False, 'from google.appengine.datastore.datastore_pbs import googledatastore\n'), ((1930, 12, 1932, 30), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1931, 10, 1932, 29): "('filter_predicate should be datastore_query.FilterPredicate (%r)' % (\n filter_predicate,))"}, {}), "(\n 'filter_predicate should be datastore_query.FilterPredicate (%r)' % (\n filter_predicate,))", False, 'from google.appengine.api import datastore_errors\n'), ((2027, 12, 2028, 73), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2028, 10, 2028, 72): "('conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))"}, {}), "(\n 'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))", False, 'from google.appengine.api import datastore_errors\n'), ((2324, 22, 2324, 61), 'functools.cmp_to_key', 'functools.cmp_to_key', ({(2324, 43, 2324, 60): 'query._order._cmp'}, {}), '(query._order._cmp)', False, 'import functools\n'), ((2349, 12, 2350, 75), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2350, 10, 2350, 74): "('query argument should be datastore_query.Query (%r)' % (query,))"}, {}), "(\n 'query argument should be datastore_query.Query (%r)' % (query,))", False, 'from google.appengine.api import datastore_errors\n'), ((2402, 12, 2403, 73), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2403, 10, 2403, 72): "('conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))"}, {}), "(\n 'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,))", False, 'from google.appengine.api import datastore_errors\n'), ((2486, 12, 2487, 72), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2487, 10, 2487, 71): "('deleted_keys argument must be a list (%r)' % (deleted_keys,))"}, {}), "(\n 'deleted_keys argument must be a list (%r)' % (deleted_keys,))", False, 'from google.appengine.api import datastore_errors\n'), ((2488, 24, 2488, 73), 'six.moves.filter', 'six.moves.filter', ({(2488, 41, 2488, 58): 'query._key_filter', (2488, 60, 2488, 72): 'deleted_keys'}, {}), '(query._key_filter, deleted_keys)', False, 'import six\n'), ((2494, 12, 2495, 80), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2495, 10, 2495, 79): "('updated_entities argument must be a list (%r)' % (updated_entities,))"}, {}), "(\n 'updated_entities argument must be a list (%r)' % (updated_entities,))", False, 'from google.appengine.api import datastore_errors\n'), ((2499, 8, 2499, 61), 'six.moves.filter', 'six.moves.filter', ({(2499, 25, 2499, 42): 'query._key_filter', (2499, 44, 2499, 60): 'updated_entities'}, {}), '(query._key_filter, updated_entities)', False, 'import six\n'), ((2770, 12, 2771, 64), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2771, 10, 2771, 63): "('index argument should be an integer (%r)' % (index,))"}, {}), "(\n 'index argument should be an integer (%r)' % (index,))", False, 'from google.appengine.api import datastore_errors\n'), ((2815, 12, 2815, 71), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2815, 46, 2815, 70): '"""Invalid fetch options."""'}, {}), "('Invalid fetch options.')", False, 'from google.appengine.api import datastore_errors\n'), ((2897, 50, 2897, 76), 'google.appengine.datastore.datastore_pb.QueryResult', 'datastore_pb.QueryResult', ({}, {}), '()', False, 'from google.appengine.datastore import datastore_pb\n'), ((2911, 50, 2911, 76), 'google.appengine.datastore.datastore_pb.QueryResult', 'datastore_pb.QueryResult', ({}, {}), '()', False, 'from google.appengine.datastore import datastore_pb\n'), ((3106, 18, 3106, 42), 'google.appengine.datastore.entity_bytes_pb2.EntityProto', 'entity_pb2.EntityProto', ({}, {}), '()', True, 'from google.appengine.datastore import entity_bytes_pb2 as entity_pb2\n'), ((3253, 6, 3255, 72), 'google.appengine.api.datastore_types.ValidateInteger', 'datastore_types.ValidateInteger', ({(3253, 38, 3253, 52): 'min_batch_size', (3254, 38, 3254, 54): '"""min_batch_size"""', (3255, 38, 3255, 71): 'datastore_errors.BadArgumentError'}, {}), "(min_batch_size, 'min_batch_size',\n datastore_errors.BadArgumentError)", False, 'from google.appengine.api import datastore_types\n'), ((3330, 12, 3332, 21), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(3331, 10, 3332, 20): "('batcher argument should be datastore_query.Batcher (%r)' % (batcher,))"}, {}), "(\n 'batcher argument should be datastore_query.Batcher (%r)' % (batcher,))", False, 'from google.appengine.api import datastore_errors\n'), ((142, 20, 142, 41), 'six.ensure_text', 'six.ensure_text', ({(142, 36, 142, 40): 'name'}, {}), '(name)', False, 'import six\n'), ((149, 10, 149, 61), 'google.appengine.api.datastore_types.PropertyValueToKeyValue', 'datastore_types.PropertyValueToKeyValue', ({(149, 50, 149, 60): 'prop.value'}, {}), '(prop.value)', False, 'from google.appengine.api import datastore_types\n'), ((354, 58, 355, 16), 'six.ensure_str', 'six.ensure_str', ({(355, 8, 355, 15): 'self.op'}, {}), '(self.op)', False, 'import six\n'), ((355, 18, 355, 38), 'six.ensure_str', 'six.ensure_str', ({(355, 33, 355, 37): 'name'}, {}), '(name)', False, 'import six\n'), ((480, 12, 481, 76), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(481, 10, 481, 75): "('other argument should be a _PropertyRangeFilter (%r)' % (other,))"}, {}), "(\n 'other argument should be a _PropertyRangeFilter (%r)' % (other,))", False, 'from google.appengine.api import datastore_errors\n'), ((1184, 16, 1184, 41), 'six.ensure_str', 'six.ensure_str', ({(1184, 31, 1184, 40): 'self.prop'}, {}), '(self.prop)', False, 'import six\n'), ((1194, 10, 1194, 48), 'six.ensure_text', 'six.ensure_text', ({(1194, 26, 1194, 47): 'self.__order.property'}, {}), '(self.__order.property)', False, 'import six\n'), ((1197, 10, 1197, 48), 'six.ensure_text', 'six.ensure_text', ({(1197, 26, 1197, 47): 'self.__order.property'}, {}), '(self.__order.property)', False, 'import six\n'), ((1477, 12, 1478, 74), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1478, 10, 1478, 73): "('projection argument should be a list or tuple (%r)' % (value,))"}, {}), "(\n 'projection argument should be a list or tuple (%r)' % (value,))", False, 'from google.appengine.api import datastore_errors\n'), ((1484, 14, 1485, 77), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1485, 12, 1485, 76): "('projection argument should contain only strings (%r)' % (prop,))"}, {}), "(\n 'projection argument should contain only strings (%r)' % (prop,))", False, 'from google.appengine.api import datastore_errors\n'), ((1582, 14, 1583, 64), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1583, 12, 1583, 63): '"""Can only specify one of urlsafe and _cursor_bytes"""'}, {}), "(\n 'Can only specify one of urlsafe and _cursor_bytes')", False, 'from google.appengine.api import datastore_errors\n'), ((1665, 10, 1665, 44), 'six.ensure_binary', 'six.ensure_binary', ({(1665, 28, 1665, 34): 'cursor', (1665, 36, 1665, 43): '"""ascii"""'}, {}), "(cursor, 'ascii')", False, 'import six\n'), ((1667, 12, 1668, 57), 'google.appengine.api.datastore_errors.BadValueError', 'datastore_errors.BadValueError', ({(1668, 10, 1668, 56): "('Invalid cursor %s. Details: %s' % (cursor, e))"}, {}), "('Invalid cursor %s. Details: %s' % (cursor, e))", False, 'from google.appengine.api import datastore_errors\n'), ((1730, 14, 1732, 24), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1731, 12, 1732, 23): "('ancestor argument should be entity_pb2.Reference (%r)' % (ancestor,))"}, {}), "(\n 'ancestor argument should be entity_pb2.Reference (%r)' % (ancestor,))", False, 'from google.appengine.api import datastore_errors\n'), ((1805, 12, 1805, 45), 'six.ensure_text', 'six.ensure_text', ({(1805, 28, 1805, 35): 'key.app', (1805, 37, 1805, 44): '"""utf-8"""'}, {}), "(key.app, 'utf-8')", False, 'import six\n'), ((1806, 12, 1806, 52), 'six.ensure_text', 'six.ensure_text', ({(1806, 28, 1806, 42): 'key.name_space', (1806, 44, 1806, 51): '"""utf-8"""'}, {}), "(key.name_space, 'utf-8')", False, 'import six\n'), ((1952, 14, 1953, 48), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1953, 12, 1953, 47): '"""group_by argument cannot be empty"""'}, {}), "('group_by argument cannot be empty')", False, 'from google.appengine.api import datastore_errors\n'), ((2000, 27, 2000, 51), 'six.ensure_str', 'six.ensure_str', ({(2000, 42, 2000, 50): 'self.app'}, {}), '(self.app)', False, 'import six\n'), ((2253, 12, 2254, 57), 'google.appengine.api.datastore_errors.BadRequestError', 'datastore_errors.BadRequestError', ({(2254, 10, 2254, 56): '"""cannot specify group_by without a projection"""'}, {}), "('cannot specify group_by without a projection'\n )", False, 'from google.appengine.api import datastore_errors\n'), ((2490, 26, 2490, 66), 'google.appengine.api.datastore_types.ReferenceToKeyValue', 'datastore_types.ReferenceToKeyValue', ({(2490, 62, 2490, 65): 'key'}, {}), '(key)', False, 'from google.appengine.api import datastore_types\n'), ((2501, 26, 2501, 73), 'google.appengine.api.datastore_types.ReferenceToKeyValue', 'datastore_types.ReferenceToKeyValue', ({(2501, 62, 2501, 72): 'entity.key'}, {}), '(entity.key)', False, 'from google.appengine.api import datastore_types\n'), ((2827, 14, 2830, 69), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(2828, 12, 2830, 68): "('Cannot request the next batch with a different offset than expected. Expected: %s, Got: %s.'\n % (self._batch_shared.expected_offset, requested_offset))"}, {}), "(\n 'Cannot request the next batch with a different offset than expected. Expected: %s, Got: %s.'\n % (self._batch_shared.expected_offset, requested_offset))", False, 'from google.appengine.api import datastore_errors\n'), ((2893, 35, 2893, 69), 'google.appengine.datastore.datastore_pbs.googledatastore.RunQueryResponse', 'googledatastore.RunQueryResponse', ({}, {}), '()', False, 'from google.appengine.datastore.datastore_pbs import googledatastore\n'), ((2944, 14, 2945, 55), 'google.appengine.api.datastore_errors.Timeout', 'datastore_errors.Timeout', ({(2945, 12, 2945, 54): '"""The query was not able to make progress."""'}, {}), "('The query was not able to make progress.')", False, 'from google.appengine.api import datastore_errors\n'), ((494, 19, 494, 70), 'google.appengine.api.cmp_compat.cmp', 'cmp_compat.cmp', ({(494, 34, 494, 51): 'other._start_incl', (494, 53, 494, 69): 'self._start_incl'}, {}), '(other._start_incl, self._start_incl)', False, 'from google.appengine.api import cmp_compat\n'), ((510, 19, 510, 66), 'google.appengine.api.cmp_compat.cmp', 'cmp_compat.cmp', ({(510, 34, 510, 48): 'self._end_incl', (510, 50, 510, 65): 'other._end_incl'}, {}), '(self._end_incl, other._end_incl)', False, 'from google.appengine.api import cmp_compat\n'), ((821, 14, 823, 17), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(822, 12, 823, 16): "('filters argument must be a list of FilterPredicates, found (%r)' % (f,))"}, {}), "(\n 'filters argument must be a list of FilterPredicates, found (%r)' % (f,))", False, 'from google.appengine.api import datastore_errors\n'), ((1287, 14, 1288, 72), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1288, 12, 1288, 71): "('orders argument should only contain Order (%r)' % (order,))"}, {}), "(\n 'orders argument should only contain Order (%r)' % (order,))", False, 'from google.appengine.api import datastore_errors\n'), ((1738, 11, 1738, 33), 'six.ensure_binary', 'six.ensure_binary', ({(1738, 29, 1738, 32): 'app'}, {}), '(app)', False, 'import six\n'), ((1739, 14, 1741, 32), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1740, 12, 1741, 31): '(\'ancestor argument should match app ("%r" != "%r")\' % (ancestor.app, app))'}, {}), '(\n \'ancestor argument should match app ("%r" != "%r")\' % (ancestor.app, app))', False, 'from google.appengine.api import datastore_errors\n'), ((1747, 11, 1747, 39), 'six.ensure_binary', 'six.ensure_binary', ({(1747, 29, 1747, 38): 'namespace'}, {}), '(namespace)', False, 'import six\n'), ((1941, 12, 1942, 50), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1942, 10, 1942, 49): "('order should be Order (%r)' % (order,))"}, {}), "('order should be Order (%r)' % (order,))", False, 'from google.appengine.api import datastore_errors\n'), ((1949, 14, 1950, 77), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1950, 12, 1950, 76): "('group_by argument should be a list or tuple (%r)' % (group_by,))"}, {}), "(\n 'group_by argument should be a list or tuple (%r)' % (group_by,))", False, 'from google.appengine.api import datastore_errors\n'), ((1956, 16, 1957, 77), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(1957, 14, 1957, 76): "('group_by argument should contain only strings (%r)' % (prop,))"}, {}), "(\n 'group_by argument should contain only strings (%r)' % (prop,))", False, 'from google.appengine.api import datastore_errors\n'), ((2003, 35, 2003, 53), 'six.ensure_str', 'six.ensure_str', ({(2003, 50, 2003, 52): 'ns'}, {}), '(ns)', False, 'import six\n'), ((2006, 30, 2006, 50), 'six.ensure_str', 'six.ensure_str', ({(2006, 45, 2006, 49): 'kind'}, {}), '(kind)', False, 'import six\n'), ((2010, 36, 2010, 59), 'six.ensure_str', 'six.ensure_str', ({(2010, 51, 2010, 58): 'websafe'}, {}), '(websafe)', False, 'import six\n'), ((2066, 10, 2066, 31), 'six.ensure_text', 'six.ensure_text', ({(2066, 26, 2066, 30): 'name'}, {}), '(name)', False, 'import six\n'), ((2209, 14, 2209, 62), 'google.appengine.api.datastore_errors.BadValueError', 'datastore_errors.BadValueError', ({(2209, 45, 2209, 61): '"""invalid cursor"""'}, {}), "('invalid cursor')", False, 'from google.appengine.api import datastore_errors\n'), ((2217, 14, 2217, 62), 'google.appengine.api.datastore_errors.BadValueError', 'datastore_errors.BadValueError', ({(2217, 45, 2217, 61): '"""invalid cursor"""'}, {}), "('invalid cursor')", False, 'from google.appengine.api import datastore_errors\n'), ((2249, 16, 2251, 22), 'google.appengine.api.datastore_errors.BadRequestError', 'datastore_errors.BadRequestError', ({(2250, 14, 2251, 21): "('projections includes properties not in the group_by argument: %s' % extra)"}, {}), "(\n 'projections includes properties not in the group_by argument: %s' % extra)", False, 'from google.appengine.api import datastore_errors\n'), ((2354, 51, 2356, 43), 'six.ensure_str', 'six.ensure_str', ({(2355, 14, 2356, 33): "('datastore_query.FilterPredicate (%r)' % (in_memory_filter,))", (2356, 35, 2356, 42): '"""utf-8"""'}, {}), "('datastore_query.FilterPredicate (%r)' % (in_memory_filter,),\n 'utf-8')", False, 'import six\n'), ((2361, 10, 2362, 55), 'six.ensure_str', 'six.ensure_str', ({(2361, 25, 2362, 45): "('datastore_pv.EntityProto (%r)' % (in_memory_results,))", (2362, 47, 2362, 54): '"""utf-8"""'}, {}), "('datastore_pv.EntityProto (%r)' % (in_memory_results,), 'utf-8')", False, 'import six\n'), ((2958, 35, 2959, 24), 'google.appengine.datastore.datastore_index.CompositeIndexForQuery', 'datastore_index.CompositeIndexForQuery', ({(2959, 12, 2959, 23): 'rpc.request'}, {}), '(rpc.request)', False, 'from google.appengine.datastore import datastore_index\n'), ((2961, 16, 2961, 68), 'google.appengine.datastore.datastore_index.GetRecommendedIndexProperties', 'datastore_index.GetRecommendedIndexProperties', ({(2961, 62, 2961, 67): 'props'}, {}), '(props)', False, 'from google.appengine.datastore import datastore_index\n'), ((2962, 15, 2962, 71), 'google.appengine.datastore.datastore_index.IndexYamlForQuery', 'datastore_index.IndexYamlForQuery', ({(2962, 49, 2962, 53): 'kind', (2962, 55, 2962, 63): 'ancestor', (2962, 65, 2962, 70): 'props'}, {}), '(kind, ancestor, props)', False, 'from google.appengine.datastore import datastore_index\n'), ((2963, 14, 2963, 69), 'google.appengine.datastore.datastore_index.IndexXmlForQuery', 'datastore_index.IndexXmlForQuery', ({(2963, 47, 2963, 51): 'kind', (2963, 53, 2963, 61): 'ancestor', (2963, 63, 2963, 68): 'props'}, {}), '(kind, ancestor, props)', False, 'from google.appengine.datastore import datastore_index\n'), ((472, 12, 473, 46), 'google.appengine.api.datastore_errors.BadArgumentError', 'datastore_errors.BadArgumentError', ({(473, 10, 473, 45): "('Unsupported operator (%s)' % (op,))"}, {}), "('Unsupported operator (%s)' % (op,))", False, 'from google.appengine.api import datastore_errors\n'), ((1802, 10, 1803, 56), 'six.ensure_str', 'six.ensure_str', ({(1802, 25, 1803, 46): "('or entity_pb2.Reference (%r)' % entity_or_reference)", (1803, 48, 1803, 55): '"""utf-8"""'}, {}), "('or entity_pb2.Reference (%r)' % entity_or_reference, 'utf-8')", False, 'import six\n'), ((1750, 13, 1750, 41), 'six.ensure_binary', 'six.ensure_binary', ({(1750, 31, 1750, 40): 'namespace'}, {}), '(namespace)', False, 'import six\n'), ((2019, 41, 2019, 58), 'six.ensure_str', 'six.ensure_str', ({(2019, 56, 2019, 57): 'x'}, {}), '(x)', False, 'import six\n')] |
Neklaustares-tPtwP/torchflare | tests/Metrics/test_recall.py | 7af6b01ef7c26f0277a041619081f6df4eb1e42c | # flake8: noqa
import warnings
import pytest
import torch
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import recall_score
from torchflare.metrics.recall_meter import Recall
from torchflare.metrics.meters import _BaseInputHandler
torch.manual_seed(42)
def test_binary_inputs():
def _test(num_classes, threshold, multilabel, average):
rc = Recall(num_classes=num_classes, threshold=threshold, multilabel=multilabel, average=average,)
outputs = torch.randn(100, 1)
targets = torch.randint(0, 2, size=(100,))
bs = _BaseInputHandler(num_classes=num_classes, average=average, threshold=0.5, multilabel=multilabel,)
np_outputs, np_targets = bs._compute(outputs=outputs, targets=targets)
rc.accumulate(outputs=outputs, targets=targets)
rec_val = rc.value
assert rc.case_type == "binary"
rec_skm = recall_score(np_targets.numpy(), np_outputs.numpy(), average="binary")
assert rec_skm == pytest.approx(rec_val.item())
rc.reset()
bs = 16
iters = targets.shape[0] // bs + 1
for i in range(iters):
idx = i * bs
rc.accumulate(outputs=outputs[idx : idx + bs], targets=targets[idx : idx + bs])
m_rc = rc.value
assert rec_skm == pytest.approx(m_rc.item())
for _ in range(10):
_test(num_classes=2, threshold=0.5, multilabel=False, average="macro")
_test(num_classes=2, threshold=0.5, multilabel=False, average="micro")
def test_multiclass_inputs():
def _test(num_classes, threshold, multilabel, average):
rc = Recall(num_classes=num_classes, threshold=threshold, multilabel=multilabel, average=average,)
outputs = torch.randn(100, 4)
targets = torch.randint(0, 4, size=(100,))
bs = _BaseInputHandler(num_classes=num_classes, average=average, threshold=0.5, multilabel=multilabel,)
np_outputs, np_targets = bs._compute(outputs=outputs, targets=targets)
rc.accumulate(outputs=outputs, targets=targets)
rec_val = rc.value
assert rc.case_type == "multiclass"
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
rec_skm = recall_score(np_targets.numpy(), np_outputs.numpy(), average=average)
assert rec_skm == pytest.approx(rec_val.item())
rc.reset()
bs = 16
iters = targets.shape[0] // bs + 1
for i in range(iters):
idx = i * bs
rc.accumulate(outputs=outputs[idx : idx + bs], targets=targets[idx : idx + bs])
rec_m = rc.value
assert rec_skm == pytest.approx(rec_m.item())
for _ in range(10):
_test(num_classes=4, threshold=0.5, multilabel=False, average="macro")
_test(num_classes=4, threshold=0.5, multilabel=False, average="micro")
def test_multilabel_inputs():
def _test(num_classes, threshold, multilabel, average):
rc = Recall(num_classes=num_classes, threshold=threshold, multilabel=multilabel, average=average,)
outputs = torch.randn(100, 4)
targets = torch.randint(0, 2, size=(100, 4))
bs = _BaseInputHandler(num_classes=num_classes, average=average, threshold=0.5, multilabel=multilabel,)
np_outputs, np_targets = bs._compute(outputs=outputs, targets=targets)
rc.accumulate(outputs=outputs, targets=targets)
rec_val = rc.value
assert rc.case_type == "multilabel"
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UndefinedMetricWarning)
rec_skm = recall_score(np_targets, np_outputs, average=average)
assert rec_skm == pytest.approx(rec_val.item())
rc.reset()
bs = 16
iters = targets.shape[0] // bs + 1
for i in range(iters):
idx = i * bs
rc.accumulate(
outputs=outputs[idx : idx + bs], targets=targets[idx : idx + bs],
)
m_rc = rc.value
assert rec_skm == pytest.approx(m_rc.item())
for _ in range(10):
_test(num_classes=4, threshold=0.5, multilabel=True, average="macro")
_test(num_classes=4, threshold=0.5, multilabel=True, average="micro")
| [((12, 0, 12, 21), 'torch.manual_seed', 'torch.manual_seed', ({(12, 18, 12, 20): '(42)'}, {}), '(42)', False, 'import torch\n'), ((18, 13, 18, 106), 'torchflare.metrics.recall_meter.Recall', 'Recall', (), '', False, 'from torchflare.metrics.recall_meter import Recall\n'), ((19, 18, 19, 37), 'torch.randn', 'torch.randn', ({(19, 30, 19, 33): '100', (19, 35, 19, 36): '1'}, {}), '(100, 1)', False, 'import torch\n'), ((20, 18, 20, 50), 'torch.randint', 'torch.randint', (), '', False, 'import torch\n'), ((22, 13, 22, 111), 'torchflare.metrics.meters._BaseInputHandler', '_BaseInputHandler', (), '', False, 'from torchflare.metrics.meters import _BaseInputHandler\n'), ((52, 13, 52, 106), 'torchflare.metrics.recall_meter.Recall', 'Recall', (), '', False, 'from torchflare.metrics.recall_meter import Recall\n'), ((54, 18, 54, 37), 'torch.randn', 'torch.randn', ({(54, 30, 54, 33): '100', (54, 35, 54, 36): '4'}, {}), '(100, 4)', False, 'import torch\n'), ((55, 18, 55, 50), 'torch.randint', 'torch.randint', (), '', False, 'import torch\n'), ((57, 13, 57, 111), 'torchflare.metrics.meters._BaseInputHandler', '_BaseInputHandler', (), '', False, 'from torchflare.metrics.meters import _BaseInputHandler\n'), ((91, 13, 91, 106), 'torchflare.metrics.recall_meter.Recall', 'Recall', (), '', False, 'from torchflare.metrics.recall_meter import Recall\n'), ((93, 18, 93, 37), 'torch.randn', 'torch.randn', ({(93, 30, 93, 33): '100', (93, 35, 93, 36): '4'}, {}), '(100, 4)', False, 'import torch\n'), ((94, 18, 94, 52), 'torch.randint', 'torch.randint', (), '', False, 'import torch\n'), ((96, 13, 96, 111), 'torchflare.metrics.meters._BaseInputHandler', '_BaseInputHandler', (), '', False, 'from torchflare.metrics.meters import _BaseInputHandler\n'), ((65, 13, 65, 38), 'warnings.catch_warnings', 'warnings.catch_warnings', ({}, {}), '()', False, 'import warnings\n'), ((66, 12, 66, 76), 'warnings.simplefilter', 'warnings.simplefilter', (), '', False, 'import warnings\n'), ((104, 13, 104, 38), 'warnings.catch_warnings', 'warnings.catch_warnings', ({}, {}), '()', False, 'import warnings\n'), ((105, 12, 105, 76), 'warnings.simplefilter', 'warnings.simplefilter', (), '', False, 'import warnings\n'), ((107, 22, 107, 75), 'sklearn.metrics.recall_score', 'recall_score', (), '', False, 'from sklearn.metrics import recall_score\n')] |
davesque/parsing.py | parsing/tests/test_utils.py | ff8b20e53b94e79571971ef23f0e5091e2786566 | from __future__ import unicode_literals
import unittest
from ..utils import compose, flatten, truncate, join, unary, equals
class TestEquals(unittest.TestCase):
def test_it_should_return_a_function_that_compares_against_x(self):
self.assertTrue(equals(234)(234))
self.assertFalse(equals(234)(123))
class TestUnary(unittest.TestCase):
def test_it_should_convert_a_function_into_a_unary_version_of_itself(self):
self.assertEqual(unary(lambda x, y: x + y)([1, 2]), 3)
class TestJoin(unittest.TestCase):
def test_it_should_join_a_sequence_into_a_string(self):
self.assertEqual(join(list('arst')), 'arst')
self.assertEqual(join(map(str, [1, 2, 3, 4])), '1234')
class TestTruncate(unittest.TestCase):
def test_it_should_truncate_a_string(self):
self.assertEqual(truncate('arst'), 'arst')
self.assertEqual(truncate('arstarstar'), 'arstarstar')
self.assertEqual(truncate('arstarstars'), 'arstarstar...')
self.assertEqual(truncate('arstarstarstarstarstarstarstarst'), 'arstarstar...')
class TestCompose(unittest.TestCase):
def test_it_should_compose_the_given_functions(self):
f = compose(
lambda x: x + 1,
lambda x: x * 2,
lambda x: x ** 3,
)
self.assertEqual(f(1), 3)
self.assertEqual(f(2), 17)
self.assertEqual(f(3), 55)
class TestFlatten(unittest.TestCase):
def test_it_should_flatten_an_arbitrarily_nested_list(self):
self.assertEqual(
flatten([1, 2, [3, 4, [5, 6]]]),
[1, 2, 3, 4, 5, 6],
)
heavily_nested = reduce(lambda a, i: (a, i), range(1000))
self.assertEqual(
flatten(heavily_nested),
list(range(1000)),
)
| [] |
prash-kr-meena/GoogleR | Array/Final450/Move_Negative_Nums_To_One_End/relative_order_matters/move_negative_nums_to_one_end--insertion_sort_modified.py | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | from Utils.Array import input_array
# Time : O(n2)
# Space : O(1) Constant space
"""
Ill be having 2 pointers here
one of them will move through the array looking for -ve numbers to operate on
and another will be pointing to the correct location where i can put the -ve elements, after i find them
also this same location will denote the starting of the 1st +ve number in the array,
--> as we will be going to move them forward
Finally when you find a -ve number, store it temporarily
do the swapping, to move all the +ve numbers forward by one step to, make place for the stored -ve number
then finally put that number in its correct position and move the pointer to store future -ve numbers
"""
def rearrange_via_modified_insertion_sort(A):
# walking_index = 0
index_to_place_nums = 0 # for placing -ve nums that i find
for walking_index in range(0, len(A)): # go through the array
if A[walking_index] >= 0: # +ve num, so move on
continue
# -ve num
found_num = A[walking_index] # temporary storage
# move all the +ve numbers, before it forward by one step
ptr = walking_index - 1
while ptr >= index_to_place_nums: # till it reaches the first +ve number
A[ptr + 1] = A[ptr]
ptr -= 1 # go back one step
# reached, now put the -ve found, at its correct position
A[index_to_place_nums] = found_num
index_to_place_nums += 1 # updating, for the index of next -ve number
if __name__ == "__main__":
arr = input_array()
rearrange_via_modified_insertion_sort(arr)
print(arr)
"""
12 11 -13 -5 6 -7 5 -3 -6
-1 2 -3 4 5 6 -7 8 9
2 3 -1 -4 -6 # Reverse
4 3 2 1 0 -1 -2 -3 # Reverse containing 0
"""
| [((43, 10, 43, 23), 'Utils.Array.input_array', 'input_array', ({}, {}), '()', False, 'from Utils.Array import input_array\n')] |
zhtsh/test-examples | python_test/test_epoll/test_epoll.py | ed5a45bf8546a9bd7fc35e38f9679be385d0d9e6 | # coding=utf8
import socket
import select
from datetime import datetime
from datetime import timedelta
EOL = b'\n\n'
response = b'HTTP/1.0 200 OK\nDate: Mon, 1 Jan 1996 01:01:01 GMT\n'
response += b'Content-Type: text/plain\nContent-Length: 13\n\n'
response += b'Hello, world!\n'
# 创建套接字对象并绑定监听端口
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(('0.0.0.0', 8080))
serversocket.listen(1)
serversocket.setblocking(0)
# 创建epoll对象,并注册socket对象的 epoll可读事件
epoll = select.epoll()
epoll.register(serversocket.fileno(), select.EPOLLIN)
try:
connections = {}
requests = {}
responses = {}
while True:
# 主循环,epoll的系统调用,一旦有网络IO事件发生,poll调用返回。这是和select系统调用的关键区别
events = epoll.poll(1)
# 通过事件通知获得监听的文件描述符,进而处理
for fileno, event in events:
# 注册监听的socket对象可读,获取连接,并注册连接的可读事件
if fileno == serversocket.fileno():
connection, address = serversocket.accept()
connection.setblocking(0)
epoll.register(connection.fileno(), select.EPOLLIN)
connections[connection.fileno()] = connection
requests[connection.fileno()] = b''
responses[connection.fileno()] = response
elif event & select.EPOLLIN:
# 连接对象可读,处理客户端发生的信息,并注册连接对象可写
try:
requests[fileno] += connections[fileno].recv(1024)
if EOL in requests[fileno]:
epoll.modify(fileno, event | select.EPOLLOUT)
print(requests[fileno])
except Exception as e:
print(e)
epoll.unregister(fileno)
del connections[fileno]
elif event & select.EPOLLOUT:
# 连接对象可写事件发生,发送数据到客户端
try:
byteswritten = connections[fileno].send(responses[fileno])
# responses[fileno] = responses[fileno][byteswritten:]
# if len(responses[fileno]) == 0:
# epoll.modify(fileno, 0)
# connections[fileno].shutdown(socket.SHUT_RDWR)
except Exception as e:
print(e)
# epoll.modify(fileno, 0)
epoll.unregister(fileno)
del connections[fileno]
elif event & select.EPOLLHUP:
epoll.unregister(fileno)
connections[fileno].close()
del connections[fileno]
finally:
epoll.unregister(serversocket.fileno())
epoll.close()
serversocket.close()
| [((14, 15, 14, 64), 'socket.socket', 'socket.socket', ({(14, 29, 14, 43): 'socket.AF_INET', (14, 45, 14, 63): 'socket.SOCK_STREAM'}, {}), '(socket.AF_INET, socket.SOCK_STREAM)', False, 'import socket\n'), ((21, 8, 21, 22), 'select.epoll', 'select.epoll', ({}, {}), '()', False, 'import select\n')] |
Kehvarl/AdventOfCode2019 | 20.2-Donut/Donut2.py | f72cfeefdfbde365bc9a5b722d5875d556379cf2 | import collections
from pprint import pprint
example1 = open("input.txt", "r").read()
# grid = [[val for val in line] for line in example1.split("\n")]
grid = example1.split("\n")
length = 0
for line in grid:
length = max(len(line), length)
out = []
for line in grid:
out.append(line[::-1].zfill(length)[::-1])
grid = out
scanned = []
neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0)]
def find_dot(dot_x, dot_y):
for (_dx, _dy) in neighbors:
if 0 <= dot_x + _dx < len(grid[0]) and 0 <= dot_y + _dy < len(grid):
if grid[dot_y + _dy][dot_x + _dx] == ".":
return (dot_x + _dx, dot_y + _dy), (dot_x - _dx, dot_y - _dy) # (dot), (tag)
return False
# Find portals
# For each portal:
# Inner edge: recurse
# Outer edge: return
portals = {}
portal_links = {}
height = len(grid) - 1
width = len(grid[0]) - 1
for y in range(len(grid)):
for x in range(len(grid[0])):
if grid[y][x].isalpha():
portal = find_dot(x, y)
if portal:
dot, (tag_x, tag_y) = portal
dot_x, dot_y = dot
edge = dot_x == 2 or dot_x == width - 2 or dot_y == 2 or dot_y == height - 2
tag = "".join(sorted(grid[y][x] + grid[tag_y][tag_x]))
if not portals.get(tag):
portals[tag] = []
portals[tag].append(((x, y), dot, edge))
gx, gy, sx, sy = (0, 0, 0, 0)
for link in portals:
ends = portals[link]
if len(ends) == 2:
(a, (a_x, a_y), a_edge), (b, (b_x, b_y), b_edge) = ends
portal_links[a] = (b_x, b_y, a_edge, link)
portal_links[b] = (a_x, a_y, b_edge, link)
elif link == "ZZ":
goal, (gx, gy), ge = ends[0]
elif link == "AA":
start, (sx, sy), se = ends[0]
pprint(portals)
print(portal_links)
bfs = collections.deque([((sx, sy), 0, 0)])
seen = {(sx, sy, 0)}
running = True
while running:
pos, level, dist = bfs.popleft()
if pos == (gx, gy) and level == 0:
print(dist)
running = False
break
for neighbor in neighbors:
dx, dy = neighbor
tx, ty = pos
tx, ty = tx + dx, ty + dy
t_level = level
if (tx, ty) in portal_links:
px, py, p_edge, link = portal_links[(tx, ty)]
# print(link, (tx, ty), (px, py), p_edge)
if p_edge and t_level > 0:
t_level -= 1
tx, ty = px, py
elif not p_edge:
t_level += 1
tx, ty = px, py
if (tx, ty, t_level) in seen:
continue
seen.add((tx, ty, t_level))
if grid[ty][tx] == '.':
p = (tx, ty)
s = (p, t_level, dist + 1)
bfs.append(s)
print("complete")
| [((68, 0, 68, 15), 'pprint.pprint', 'pprint', ({(68, 7, 68, 14): 'portals'}, {}), '(portals)', False, 'from pprint import pprint\n'), ((71, 6, 71, 43), 'collections.deque', 'collections.deque', ({(71, 24, 71, 42): '[((sx, sy), 0, 0)]'}, {}), '([((sx, sy), 0, 0)])', False, 'import collections\n')] |
idaks/OpenRefine-Provenance-Tools | OR_Client_Library/openrefine_client/tests/test_history.py | cc469c3eb8e56c8b0f4616cc501546db3c4176ea | #!/usr/bin/env python
"""
test_history.py
"""
# Copyright (c) 2011 Paul Makepeace, Real Programmers. All rights reserved.
import unittest
from OR_Client_Library.openrefine_client.google.refine.history import *
class HistoryTest(unittest.TestCase):
def test_init(self):
response = {
u"code": "ok",
u"historyEntry": {
u"id": 1303851435223,
u"description": "Split 4 cells",
u"time": "2011-04-26T16:45:08Z"
}
}
he = response['historyEntry']
entry = HistoryEntry(he['id'], he['time'], he['description'])
self.assertEqual(entry.id, 1303851435223)
self.assertEqual(entry.description, 'Split 4 cells')
self.assertEqual(entry.time, '2011-04-26T16:45:08Z')
if __name__ == '__main__':
unittest.main()
| [((31, 4, 31, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n')] |
Remmeauth/remme-core-cli | tests/batch/test_get_batch.py | 94cc09fe9d2e718b45273dde68d6c672c4773f6a | """
Provide tests for command line interface's get batch command.
"""
import json
import pytest
from click.testing import CliRunner
from cli.constants import (
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
FAILED_EXIT_FROM_COMMAND_CODE,
PASSED_EXIT_FROM_COMMAND_CODE,
)
from cli.entrypoint import cli
from cli.utils import dict_to_pretty_json
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE = 'ccb529856e538325b435c6a75261702d1bdb52d3873b29189a722330cda628a6' \
'62028a7b39d1f5475cb78f5fc12efb986a35553ce8f1b63580b97fc6ab9e9655'
def test_get_batch():
"""
Case: get a batch by identifier.
Expect: batch is returned.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
])
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert isinstance(json.loads(result.output), dict)
def test_get_batch_with_invalid_id():
"""
Case: get a batch by its invalid identifier.
Expect: the following identifier is invalid error message.
"""
invalid_batch_id = 'abcefg'
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
invalid_batch_id,
'--node-url',
DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING,
])
expected_error_message = {
'errors': {
'id': [
f'The following identifier `{invalid_batch_id}` is invalid.',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error_message) in result.output
def test_get_batch_without_node_url(mocker):
"""
Case: get a batch by its identifier without passing node URL.
Expect: batch is returned from a node on localhost.
"""
batch_id = '6f200995e766da7218ec2a3d0aeabbe1151128063cdf4e954cd08390a879b28e' \
'085a06f8708d2e6bb34f6501e8ddc981f0353627c1d4f90c80a656a8090c8751'
expected_result = {
"data": {
"header": {
"signer_public_key": "03d425d2d17b64e3ef8fee028089a567fbb05bd556f98c0b6fb62bc5750ea62b8f",
"transaction_ids": [
"5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c"
"1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7",
],
},
"header_signature": "57692f2bcc9be7fe2b59c052d5938eb92bd7be8a36487c1c7efc2c5758bf108e"
"232892987e898071e5ea13b4cbe283e96ac45d8f63cd9065522df7b85b050977",
"transactions": [
{
"header": {
"batcher_public_key": "03d425d2d17b64e3ef8fee028089a567fbb05bd556f98c0b6fb62bc5750ea62b8f",
"family_name": "sawtooth_settings",
"family_version": "1.0",
"inputs": [
"000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c1c0cbf0fbcaf64c0b",
"000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c12840f169a04216b7",
],
"outputs": [
"000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c1c0cbf0fbcaf64c0b",
],
"signer_public_key": "03d425d2d17b64e3ef8fee028089a567fbb05bd556f98c0b6fb62bc5750ea62b8f",
},
"header_signature": "5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c"
"1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7",
"payload": "CAESgAEKJnNhd3Rvb3RoLnNldHRpbmdzLnZvdGUuYyaXplZF9rZXlzEkIwM2Q0MjVkMmQxN2I2NGUzZWY4Zm"
"VlMDI4MDg5YTU2N2ZiYjA1YmQ1NTZmOThjMGI2ZmIJjNMGVhNjJiOGYaEjB4ZDU0NzJhOTY1NWJkYTNmNg==",
},
],
},
}
mock_get_batch_by_id = mocker.patch('cli.batch.service.loop.run_until_complete')
mock_get_batch_by_id.return_value = expected_result
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
batch_id,
])
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert expected_result.get('data') == json.loads(result.output).get('result')
def test_get_batch_with_invalid_node_url():
"""
Case: get a batch by its identifier by passing an invalid node URL.
Expect: the following node URL is invalid error message.
"""
invalid_node_url = 'my-node-url.com'
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
invalid_node_url,
])
expected_error_message = {
'errors': f'Please check if your node running at http://{invalid_node_url}:8080.',
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error_message) in result.output
@pytest.mark.parametrize('node_url_with_protocol', ['http://masternode.com', 'https://masternode.com'])
def test_get_batch_node_url_with_protocol(node_url_with_protocol):
"""
Case: get a batch by its identifier by passing node URL with an explicit protocol.
Expect: the following node URL contains a protocol error message.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'batch',
'get',
'--id',
BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE,
'--node-url',
node_url_with_protocol,
])
expected_error = {
'errors': {
'node_url': [
f'Pass the following node URL `{node_url_with_protocol}` without protocol (http, https, etc.).',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error) in result.output
| [((152, 1, 152, 103), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(152, 25, 152, 49): '"""node_url_with_protocol"""', (152, 51, 152, 102): "['http://masternode.com', 'https://masternode.com']"}, {}), "('node_url_with_protocol', ['http://masternode.com',\n 'https://masternode.com'])", False, 'import pytest\n'), ((26, 13, 26, 24), 'click.testing.CliRunner', 'CliRunner', ({}, {}), '()', False, 'from click.testing import CliRunner\n'), ((47, 13, 47, 24), 'click.testing.CliRunner', 'CliRunner', ({}, {}), '()', False, 'from click.testing import CliRunner\n'), ((115, 13, 115, 24), 'click.testing.CliRunner', 'CliRunner', ({}, {}), '()', False, 'from click.testing import CliRunner\n'), ((134, 13, 134, 24), 'click.testing.CliRunner', 'CliRunner', ({}, {}), '()', False, 'from click.testing import CliRunner\n'), ((158, 13, 158, 24), 'click.testing.CliRunner', 'CliRunner', ({}, {}), '()', False, 'from click.testing import CliRunner\n'), ((37, 22, 37, 47), 'json.loads', 'json.loads', ({(37, 33, 37, 46): 'result.output'}, {}), '(result.output)', False, 'import json\n'), ((66, 11, 66, 54), 'cli.utils.dict_to_pretty_json', 'dict_to_pretty_json', ({(66, 31, 66, 53): 'expected_error_message'}, {}), '(expected_error_message)', False, 'from cli.utils import dict_to_pretty_json\n'), ((149, 11, 149, 54), 'cli.utils.dict_to_pretty_json', 'dict_to_pretty_json', ({(149, 31, 149, 53): 'expected_error_message'}, {}), '(expected_error_message)', False, 'from cli.utils import dict_to_pretty_json\n'), ((177, 11, 177, 46), 'cli.utils.dict_to_pretty_json', 'dict_to_pretty_json', ({(177, 31, 177, 45): 'expected_error'}, {}), '(expected_error)', False, 'from cli.utils import dict_to_pretty_json\n'), ((124, 42, 124, 67), 'json.loads', 'json.loads', ({(124, 53, 124, 66): 'result.output'}, {}), '(result.output)', False, 'import json\n')] |
pbielak/graph-barlow-twins | experiments/scripts/preprocess_dataset.py | f8e20134afed4f17ffcecf8f48764df362ffdcad | import sys
from gssl.datasets import load_dataset
from gssl.inductive.datasets import load_ppi
from gssl.utils import seed
def main():
seed()
# Read dataset name
dataset_name = sys.argv[1]
# Load dataset
if dataset_name == "PPI":
load_ppi()
else:
load_dataset(name=dataset_name)
if __name__ == "__main__":
main()
| [((9, 4, 9, 10), 'gssl.utils.seed', 'seed', ({}, {}), '()', False, 'from gssl.utils import seed\n'), ((16, 8, 16, 18), 'gssl.inductive.datasets.load_ppi', 'load_ppi', ({}, {}), '()', False, 'from gssl.inductive.datasets import load_ppi\n'), ((18, 8, 18, 39), 'gssl.datasets.load_dataset', 'load_dataset', (), '', False, 'from gssl.datasets import load_dataset\n')] |
LukoninDmitryPy/agro_site-2 | agro_site/orders/migrations/0001_initial.py | eab7694d42104774e5ce6db05a79f11215db6ae3 | # Generated by Django 2.2.16 on 2022-04-12 13:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.expressions
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('sales_backend', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('D', 'Dialog'), ('C', 'Chat')], default='D', max_length=1, verbose_name='Тип')),
('members', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Участник')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=250)),
('postal_code', models.CharField(max_length=20)),
('city', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('paid', models.BooleanField(default=False)),
('status_order', models.CharField(choices=[('В обработке', 'В обработке'), ('Заказ собран', 'Заказ собран'), ('Заказ отправлен', 'Заказ отправлен')], default='В обработке', max_length=20)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Заказы',
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='sales_backend.Product')),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_users', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(verbose_name='Сообщение')),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Дата сообщения')),
('is_readed', models.BooleanField(default=False, verbose_name='Прочитано')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь')),
('chat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Chat', verbose_name='Чат')),
],
options={
'ordering': ['pub_date'],
},
),
migrations.AddConstraint(
model_name='orderitem',
constraint=models.CheckConstraint(check=models.Q(_negated=True, user=django.db.models.expressions.F('seller')), name='dont_buy_yourself'),
),
]
| [((16, 8, 16, 65), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', ({(16, 40, 16, 64): 'settings.AUTH_USER_MODEL'}, {}), '(settings.AUTH_USER_MODEL)', False, 'from django.db import migrations, models\n'), ((23, 23, 23, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((24, 25, 24, 133), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((25, 28, 25, 112), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import migrations, models\n'), ((31, 23, 31, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((32, 28, 32, 60), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((33, 32, 33, 63), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((34, 25, 34, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((35, 28, 35, 67), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((36, 28, 36, 63), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((37, 25, 37, 59), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((38, 33, 38, 283), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((39, 25, 39, 137), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((49, 23, 49, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((50, 26, 50, 78), 'django.db.models.DecimalField', 'models.DecimalField', (), '', False, 'from django.db import migrations, models\n'), ((51, 29, 51, 67), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((52, 26, 52, 129), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((53, 28, 53, 146), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((54, 27, 54, 141), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((55, 25, 55, 144), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((61, 23, 61, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((62, 28, 62, 79), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((63, 29, 63, 128), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((64, 30, 64, 99), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((65, 27, 65, 159), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((66, 25, 66, 128), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
FakeYou/flask-microblog | app/forms.py | 021b786417a2ae1aaa957661beb25d381a7efdb2 | from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, PasswordField
from wtforms.validators import InputRequired, Email, EqualTo, Length
class LoginForm(Form):
nickname = StringField('nickname', validators=[InputRequired()])
password = PasswordField('password', validators=[InputRequired()])
remember_me = BooleanField('remember_me', default=False)
class RegisterForm(Form):
nickname = StringField('nickname', validators=[InputRequired()])
email = StringField('email', validators=[InputRequired(), Email()])
password = PasswordField('password', validators=[InputRequired(),
EqualTo('confirm', message='Password must match')])
confirm = PasswordField('confirm password')
class NewPostForm(Form):
body = StringField('body', validators=[InputRequired(), Length(max=140)]) | [((8, 18, 8, 60), 'wtforms.BooleanField', 'BooleanField', (), '', False, 'from wtforms import StringField, BooleanField, PasswordField\n'), ((15, 14, 15, 47), 'wtforms.PasswordField', 'PasswordField', ({(15, 28, 15, 46): '"""confirm password"""'}, {}), "('confirm password')", False, 'from wtforms import StringField, BooleanField, PasswordField\n'), ((6, 51, 6, 66), 'wtforms.validators.InputRequired', 'InputRequired', ({}, {}), '()', False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((7, 53, 7, 68), 'wtforms.validators.InputRequired', 'InputRequired', ({}, {}), '()', False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((11, 51, 11, 66), 'wtforms.validators.InputRequired', 'InputRequired', ({}, {}), '()', False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((12, 45, 12, 60), 'wtforms.validators.InputRequired', 'InputRequired', ({}, {}), '()', False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((12, 62, 12, 69), 'wtforms.validators.Email', 'Email', ({}, {}), '()', False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((13, 53, 13, 68), 'wtforms.validators.InputRequired', 'InputRequired', ({}, {}), '()', False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((14, 53, 14, 102), 'wtforms.validators.EqualTo', 'EqualTo', (), '', False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((18, 43, 18, 58), 'wtforms.validators.InputRequired', 'InputRequired', ({}, {}), '()', False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n'), ((18, 60, 18, 75), 'wtforms.validators.Length', 'Length', (), '', False, 'from wtforms.validators import InputRequired, Email, EqualTo, Length\n')] |
Jimmy01240397/balsn-2021-writeup | ais3-pre-exam-2022-writeup/Misc/JeetQode/chall/problems/astmath.py | 91b71dfbddc1c214552280b12979a82ee1c3cb7e | from problem import Problem
from typing import Any, Tuple
from random import randint
import ast
import json
def gen_num():
return str(randint(1, 9))
def gen_op():
return "+-*/"[randint(0, 3)]
def gen_expr(depth):
if randint(0, depth) == 0:
l = gen_expr(depth + 1)
r = gen_expr(depth + 1)
op = gen_op()
return f"({l}{op}{r})"
return f"({gen_num()})"
class ASTMath(Problem):
@property
def name(self) -> str:
return "AST Math"
@property
def desciption(self) -> str:
return """
Input: An AST of Python's arithmetic expression (only +,-,*,/)
Output: Result number
Examples:
Input: {"body": {"left": {"value": 1, "kind": null, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 1}, "op": "<_ast.Add object at 0x7f0387ccde20>", "right": {"value": 2, "kind": null, "lineno": 1, "col_offset": 2, "end_lineno": 1, "end_col_offset": 3}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 3}}
Output: 3
Input: {"body": {"left": {"left": {"value": 8, "kind": null, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 2}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 7, "kind": null, "lineno": 1, "col_offset": 3, "end_lineno": 1, "end_col_offset": 4}, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 4}, "op": "<_ast.Sub object at 0x7f20eb76ae80>", "right": {"left": {"value": 6, "kind": null, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 8}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 3, "kind": null, "lineno": 1, "col_offset": 9, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 11}}
Output: 38
"""
@property
def rounds(self) -> int:
return 10
def dumps(self, x):
return json.dumps(
x, default=lambda x: x.__dict__ if len(x.__dict__) else str(x)
)
def generate_testcase(self) -> Tuple[bool, Any]:
l = gen_expr(1)
r = gen_expr(1)
op = gen_op()
expr = f"{l}{op}{r}"
try:
result = eval(expr)
except ZeroDivisionError:
return self.generate_testcase()
return ast.parse(expr, mode="eval"), result
| [((9, 15, 9, 28), 'random.randint', 'randint', ({(9, 23, 9, 24): '(1)', (9, 26, 9, 27): '(9)'}, {}), '(1, 9)', False, 'from random import randint\n'), ((17, 7, 17, 24), 'random.randint', 'randint', ({(17, 15, 17, 16): '(0)', (17, 18, 17, 23): 'depth'}, {}), '(0, depth)', False, 'from random import randint\n'), ((13, 18, 13, 31), 'random.randint', 'randint', ({(13, 26, 13, 27): '(0)', (13, 29, 13, 30): '(3)'}, {}), '(0, 3)', False, 'from random import randint\n'), ((62, 15, 62, 43), 'ast.parse', 'ast.parse', (), '', False, 'import ast\n')] |
RebeccaHirst/Pyllusion | pyllusion/movement/movement_circles.py | 9944076e38bced0eabb49c607482b71809150bdb | import numpy as np
from .movement_matrix import movement_matrix
from ..image import image_circles
def movement_circles(n=50, duration=2, fps=30, width=500, height=500, **kwargs):
"""
>>> import pyllusion as ill
>>>
>>> images = ill.movement_circles(n=50, duration=4, fps=30, color="black", size=0.05)
>>> #ill.images_to_gif(images, path="mygif.gif", fps=30)
"""
n_frames = int(duration * fps)
x, y = movement_matrix(n_frames=n_frames, **kwargs)
# Generate PIL images
images = []
for i in range(n_frames):
images.append(
image_circles(width=width, height=height, n=n, x=x[i], y=y[i], **kwargs)
)
return images
| [] |
hzwfl2/Semantic-consistent-Embedding | sce.py | d3712cc6f27febbf654e1eb8c43c0b48376a9be1 | #%%
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC,LinearSVC
from torch import device
from torch.optim import optimizer
from torch.utils.data import DataLoader, Dataset
from read_data import create_data
#%%
class my_dataset(Dataset):
def __init__(self,data,attribute_label):
super(my_dataset,self).__init__()
self.data=data
self.attribute_label=attribute_label
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
batch_data=self.data[index]
batch_label=self.attribute_label[index]
return batch_data,batch_label
#%%
device=torch.device('cuda')
np.random.seed(904)
def pre_model(model, traindata, train_attributelabel, testdata, testlabel, attribute_matrix):
model_dict = {'rf': RandomForestClassifier(n_estimators=100),'NB': GaussianNB(),'SVC_linear': SVC(kernel='linear'),'LinearSVC':LinearSVC()}
res_list = []
for i in range(train_attributelabel.shape[1]):
clf = model_dict[model]
if max(train_attributelabel[:, i]) != 0:
clf.fit(traindata, train_attributelabel[:, i])
res = clf.predict(testdata)
else:
res = np.zeros(testdata.shape[0])
res_list.append(res.T)
test_pre_attribute = np.mat(np.row_stack(res_list)).T
label_lis = []
for i in range(test_pre_attribute.shape[0]):
pre_res = test_pre_attribute[i, :]
loc = (np.sum(np.square(attribute_matrix - pre_res), axis=1)).argmin()
label_lis.append(np.unique(testlabel)[loc])
label_lis = np.mat(np.row_stack(label_lis))
return test_pre_attribute,label_lis, testlabel
#%%
def off_diagonal(x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
#%%
class Embedding_Net(nn.Module):
def __init__(self,dim,lambda_):
super(Embedding_Net,self).__init__()
self.l11=nn.Linear(6,dim[0])
self.l12=nn.Linear(dim[0],dim[1])
self.l13=nn.Linear(2*dim[1],6)
self.l21=nn.Linear(4,dim[0])
self.l22=nn.Linear(dim[0],dim[1])
self.l23=nn.Linear(2*dim[1],4)
self.bn1=nn.BatchNorm1d(dim[0])
self.bn2=nn.BatchNorm1d(dim[1])
self.lambda_=lambda_
def compability_loss(self,z1,z2):
N,D=z1.shape
c=self.bn2(z1).T @ self.bn2(z2)/N
on_diag=torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag=off_diagonal(c).pow_(2).sum()
loss=on_diag+self.lambda_[3]*off_diag
return loss
def compute_loss(self,z1,z2,x,a,x_,a_):
loss_R1=self.lambda_[0]*F.mse_loss(a,a_)
loss_R2=self.lambda_[1]*F.mse_loss(x,x_)
loss_CM=self.compability_loss(z1,z2)
loss_CM=self.lambda_[2]*loss_CM
loss=loss_R1+loss_R2+loss_CM
return loss_R1,loss_R2,loss_CM,loss
def transform(self,x,a):
z1=self.l11(x)
z1=torch.relu(self.bn1(z1))
z1=self.l12(z1)
z2=self.l21(a)
z2=torch.relu(self.bn1(z2))
z2=self.l22(z2)
return z1,z2
def reconstruction(self,z1,z2):
f1=torch.cat([z1,z2],dim=1)
f2=torch.cat([z2,z1],dim=1)
x_=self.l13(f1)
a_=torch.sigmoid(self.l23(f2))
return x_,a_
def forward(self,x,a):
z1,z2=self.transform(x,a)
x_,a_=self.reconstruction(z1,z2)
loss_R1,loss_R2,loss_CM,loss=self.compute_loss(z1,z2,x,a,x_,a_)
package={'z1':z1,'z2':z2,'x':x,'x_':x_,'r1':loss_R1,
'r2':loss_R2,'cm':loss_CM,'loss':loss}
return package
#%%
datapath='data/classData.csv'
modes=['NB'] #'rf'
test_classes={'test_class':[2,3]}
for key,value in test_classes.items():
print('========================================{}:[{}:{}]========================================='.format(modes,key,value))
df = pd.read_csv(datapath)
df['fault_type'] = df['G'].astype('str') + df['C'].astype('str') + df['B'].astype('str') + df['A'].astype('str')
traindata,trainlabel,train_attributelabel, train_attributematrix,testdata,testlabel,test_attributelabel,test_attributematrix,attribute_matrix=create_data(df,value)
_,y_pre,y_true=pre_model(modes[0], traindata, train_attributelabel, testdata, testlabel, test_attributematrix)
original_acc=accuracy_score(y_pre,y_true)
traindata=torch.from_numpy(traindata).float().to(device)
label=torch.from_numpy(trainlabel.squeeze()).long().to(device)
testdata=torch.from_numpy(testdata).float().to(device)
batch_size=400
trainset=my_dataset(traindata,torch.from_numpy(train_attributelabel).float().to(device))
train_loader=DataLoader(trainset,batch_size=batch_size,shuffle=True)
lambda_=[1,1e-5,1,0.25]
dim=[6,12]
model=Embedding_Net(dim,lambda_=lambda_)
model.to(device)
optimizer=optim.RMSprop(model.parameters(),lr=1e-2)
L1,L2,L3,L=[],[],[],[]
model.train()
accs=[]
best_acc=0
for epoch in range(200):
model.train()
for batch,(batch_data,batch_label) in enumerate(train_loader):
optimizer.zero_grad()
package=model(batch_data,batch_label)
loss_R1,loss_R2,loss_CM,loss=package['r1'],package['r2'],package['cm'],package['loss']
loss.backward()
optimizer.step()
L1.append(loss_R1.item())
L2.append(loss_R2.item())
L3.append(loss_CM.item())
L.append(loss.item())
model.eval()
with torch.no_grad():
train_package=model(traindata,torch.from_numpy(train_attributelabel).float().to(device))
f_train=train_package['z1']
f_train=torch.cat([f_train,traindata],dim=1).detach().cpu().numpy()
test_package=model(testdata,torch.from_numpy(test_attributelabel).float().to(device))
f_test=test_package['z1']
f_test=torch.cat([f_test,testdata],dim=1).detach().cpu().numpy()
test_preattribute,label_lis, testlabel=pre_model(modes[0], f_train, train_attributelabel, f_test, testlabel, test_attributematrix)
acc=accuracy_score(label_lis, testlabel)
accs.append(acc)
if acc>best_acc:
best_acc=acc
print('epoch:{:d}, best_acc:{:.4f}'.format(epoch,best_acc))
print('finished! FDAT:{:.4f}, SCE:{:.4f}'.format(original_acc,best_acc))
# %% | [((36, 7, 36, 27), 'torch.device', 'torch.device', ({(36, 20, 36, 26): '"""cuda"""'}, {}), "('cuda')", False, 'import torch\n'), ((38, 0, 38, 19), 'numpy.random.seed', 'np.random.seed', ({(38, 15, 38, 18): '(904)'}, {}), '(904)', True, 'import numpy as np\n'), ((140, 9, 140, 30), 'pandas.read_csv', 'pd.read_csv', ({(140, 21, 140, 29): 'datapath'}, {}), '(datapath)', True, 'import pandas as pd\n'), ((142, 146, 142, 167), 'read_data.create_data', 'create_data', ({(142, 158, 142, 160): 'df', (142, 161, 142, 166): 'value'}, {}), '(df, value)', False, 'from read_data import create_data\n'), ((145, 17, 145, 45), 'sklearn.metrics.accuracy_score', 'accuracy_score', ({(145, 32, 145, 37): 'y_pre', (145, 38, 145, 44): 'y_true'}, {}), '(y_pre, y_true)', False, 'from sklearn.metrics import accuracy_score\n'), ((153, 17, 153, 72), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, Dataset\n'), ((41, 24, 41, 64), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (), '', False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((41, 71, 41, 83), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ({}, {}), '()', False, 'from sklearn.naive_bayes import GaussianNB\n'), ((41, 98, 41, 118), 'sklearn.svm.SVC', 'SVC', (), '', False, 'from sklearn.svm import SVC, LinearSVC\n'), ((41, 131, 41, 142), 'sklearn.svm.LinearSVC', 'LinearSVC', ({}, {}), '()', False, 'from sklearn.svm import SVC, LinearSVC\n'), ((59, 23, 59, 46), 'numpy.row_stack', 'np.row_stack', ({(59, 36, 59, 45): 'label_lis'}, {}), '(label_lis)', True, 'import numpy as np\n'), ((73, 17, 73, 36), 'torch.nn.Linear', 'nn.Linear', ({(73, 27, 73, 28): '6', (73, 29, 73, 35): 'dim[0]'}, {}), '(6, dim[0])', True, 'import torch.nn as nn\n'), ((74, 17, 74, 41), 'torch.nn.Linear', 'nn.Linear', ({(74, 27, 74, 33): 'dim[0]', (74, 34, 74, 40): 'dim[1]'}, {}), '(dim[0], dim[1])', True, 'import torch.nn as nn\n'), ((75, 17, 75, 38), 'torch.nn.Linear', 'nn.Linear', ({(75, 27, 75, 35): '2 * dim[1]', (75, 36, 75, 37): '6'}, {}), '(2 * dim[1], 6)', True, 'import torch.nn as nn\n'), ((77, 17, 77, 36), 'torch.nn.Linear', 'nn.Linear', ({(77, 27, 77, 28): '4', (77, 29, 77, 35): 'dim[0]'}, {}), '(4, dim[0])', True, 'import torch.nn as nn\n'), ((78, 17, 78, 41), 'torch.nn.Linear', 'nn.Linear', ({(78, 27, 78, 33): 'dim[0]', (78, 34, 78, 40): 'dim[1]'}, {}), '(dim[0], dim[1])', True, 'import torch.nn as nn\n'), ((79, 17, 79, 38), 'torch.nn.Linear', 'nn.Linear', ({(79, 27, 79, 35): '2 * dim[1]', (79, 36, 79, 37): '4'}, {}), '(2 * dim[1], 4)', True, 'import torch.nn as nn\n'), ((81, 17, 81, 39), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', ({(81, 32, 81, 38): 'dim[0]'}, {}), '(dim[0])', True, 'import torch.nn as nn\n'), ((82, 17, 82, 39), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', ({(82, 32, 82, 38): 'dim[1]'}, {}), '(dim[1])', True, 'import torch.nn as nn\n'), ((117, 11, 117, 35), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((118, 11, 118, 35), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((50, 18, 50, 45), 'numpy.zeros', 'np.zeros', ({(50, 27, 50, 44): 'testdata.shape[0]'}, {}), '(testdata.shape[0])', True, 'import numpy as np\n'), ((52, 32, 52, 54), 'numpy.row_stack', 'np.row_stack', ({(52, 45, 52, 53): 'res_list'}, {}), '(res_list)', True, 'import numpy as np\n'), ((99, 32, 99, 48), 'torch.nn.functional.mse_loss', 'F.mse_loss', ({(99, 43, 99, 44): 'a', (99, 45, 99, 47): 'a_'}, {}), '(a, a_)', True, 'import torch.nn.functional as F\n'), ((100, 32, 100, 48), 'torch.nn.functional.mse_loss', 'F.mse_loss', ({(100, 43, 100, 44): 'x', (100, 45, 100, 47): 'x_'}, {}), '(x, x_)', True, 'import torch.nn.functional as F\n'), ((184, 13, 184, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((194, 16, 194, 52), 'sklearn.metrics.accuracy_score', 'accuracy_score', ({(194, 31, 194, 40): 'label_lis', (194, 42, 194, 51): 'testlabel'}, {}), '(label_lis, testlabel)', False, 'from sklearn.metrics import accuracy_score\n'), ((58, 25, 58, 45), 'numpy.unique', 'np.unique', ({(58, 35, 58, 44): 'testlabel'}, {}), '(testlabel)', True, 'import numpy as np\n'), ((57, 22, 57, 59), 'numpy.square', 'np.square', ({(57, 32, 57, 58): 'attribute_matrix - pre_res'}, {}), '(attribute_matrix - pre_res)', True, 'import numpy as np\n'), ((147, 14, 147, 41), 'torch.from_numpy', 'torch.from_numpy', ({(147, 31, 147, 40): 'traindata'}, {}), '(traindata)', False, 'import torch\n'), ((150, 13, 150, 39), 'torch.from_numpy', 'torch.from_numpy', ({(150, 30, 150, 38): 'testdata'}, {}), '(testdata)', False, 'import torch\n'), ((152, 34, 152, 72), 'torch.from_numpy', 'torch.from_numpy', ({(152, 51, 152, 71): 'train_attributelabel'}, {}), '(train_attributelabel)', False, 'import torch\n'), ((92, 16, 92, 33), 'torch.diagonal', 'torch.diagonal', ({(92, 31, 92, 32): 'c'}, {}), '(c)', False, 'import torch\n'), ((185, 42, 185, 80), 'torch.from_numpy', 'torch.from_numpy', ({(185, 59, 185, 79): 'train_attributelabel'}, {}), '(train_attributelabel)', False, 'import torch\n'), ((189, 40, 189, 77), 'torch.from_numpy', 'torch.from_numpy', ({(189, 57, 189, 76): 'test_attributelabel'}, {}), '(test_attributelabel)', False, 'import torch\n'), ((187, 20, 187, 56), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((191, 19, 191, 53), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n')] |
colirain/GraphSAGE | graphsage/partition_predict.py | a63145ff18f87cb69340c7b457c34839e9124086 |
import tensorflow as tf
import numpy as np
from graphsage.models import FCPartition
from graphsage.partition_train import construct_placeholders
from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap
flags = tf.app.flags
FLAGS = flags.FLAGS
# flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)')
# DIR = 'trained_models'
# MODEL = 'partition'
# with tf.Session() as sess:
# new_saver = tf.train.import_meta_graph(DIR+'/'+MODEL+'.ckpt.meta')
# new_saver.restore(sess, tf.train.latest_checkpoint(DIR + '/./'))
# new_saver.run()
# print(new_saver)
def predict(train_data, id_map):
num_classes = 3
placeholders = construct_placeholders(num_classes)
placeholders['features'] = train_data
# feed_dict = dict()
# train_data = train_data.astype('float32')
# feed_dict.update({placeholders['features']: train_data})
dim = []
# print("f:{}".format(len(train_data[0])))
dim.append(len(train_data[0]))
dim.append(FLAGS.dim_1)
dim.append(num_classes)
model = FCPartition(placeholders, dim)
sess = tf.Session()
model.load(sess)
results = model.predict()
results_np = results.eval(session=sess)
# print(results.eval(session=sess))
# print(results_np.shape)
id_map = id_map.astype('int')
results_np = np.expand_dims(results_np, axis=1)
results_np = np.insert(results_np, 0, id_map, axis=1)
results_np = results_np[results_np[:,0].argsort()]
print(results_np)
np.save(FLAGS.outDir+'/predict_predict.npy', results_np)
def main():
print("load data ...")
train_data = load_embedded_data(FLAGS.train_prefix)
id_map = load_embedded_idmap(FLAGS.train_prefix)
predict(train_data, id_map)
if __name__ == '__main__':
main() | [((29, 19, 29, 54), 'graphsage.partition_train.construct_placeholders', 'construct_placeholders', ({(29, 42, 29, 53): 'num_classes'}, {}), '(num_classes)', False, 'from graphsage.partition_train import construct_placeholders\n'), ((39, 12, 39, 42), 'graphsage.models.FCPartition', 'FCPartition', ({(39, 24, 39, 36): 'placeholders', (39, 38, 39, 41): 'dim'}, {}), '(placeholders, dim)', False, 'from graphsage.models import FCPartition\n'), ((40, 11, 40, 23), 'tensorflow.Session', 'tf.Session', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((47, 17, 47, 51), 'numpy.expand_dims', 'np.expand_dims', (), '', True, 'import numpy as np\n'), ((48, 17, 48, 57), 'numpy.insert', 'np.insert', (), '', True, 'import numpy as np\n'), ((51, 4, 51, 60), 'numpy.save', 'np.save', ({(51, 12, 51, 47): "(FLAGS.outDir + '/predict_predict.npy')", (51, 49, 51, 59): 'results_np'}, {}), "(FLAGS.outDir + '/predict_predict.npy', results_np)", True, 'import numpy as np\n'), ((56, 17, 56, 55), 'graphsage.utils.load_embedded_data', 'load_embedded_data', ({(56, 36, 56, 54): 'FLAGS.train_prefix'}, {}), '(FLAGS.train_prefix)', False, 'from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap\n'), ((57, 13, 57, 52), 'graphsage.utils.load_embedded_idmap', 'load_embedded_idmap', ({(57, 33, 57, 51): 'FLAGS.train_prefix'}, {}), '(FLAGS.train_prefix)', False, 'from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap\n')] |
AmmarQaseem/CPI-Pipeline-test | scripts/generate_XML_files/DS1/annotatedsen_to_xml.py | 3866883c54d7bd77753ee4b72997949bdcf76359 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (c) 2015, Elham Abbasian <[email protected]>, Kersten Doering <[email protected]>
This parser reads annotated sentences (output from get_relations.py) in a tab-separated format to generate a unified XML format (Tikk et al., 2010. A comprehensive benchmark of kernel methods to extract protein-protein interactions from literature. PLoS Comput. Biol).
"""
# module to make use of regular expressions
import re
# set the default encoding to utf8 and ignore all decoding/encoding steps.
# (ToDo: check whether the encoding command is needed - debug)
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# optparse - Parser for command-line options
from optparse import OptionParser
# import this function to add quotation arround the input text and ignore the extra quotations inside the sentence text
#from xml.sax.saxutils import escape # (ToDo: not needed - debug)
from xml.sax.saxutils import quoteattr
### MAIN PART OF THE SCRIPT ###
if __name__=="__main__":
# configure parsing of command-line arguments
parser= OptionParser()
parser.add_option("-i", "--input", dest="i", help='name of the input file',default="training_dataset_sorted.csv")
parser.add_option("-o", "--output", dest="o", help='name of the output file',default="DS1.xml")
(options,args)=parser.parse_args()
# save parameters in an extra variable
input_file= options.i
output_file = options.o
# open input file with annotated sentences
infile = open(input_file,"r")
# open output file
outfile = open(output_file,"w")
#example for the input format:
#18227838-359 The mood stabilizers <compound-id="28486,3028194">lithium</compound-id> and <compound-id="3121">valproate</compound-id> activate the <protein-id="P29323">ERK</protein-id> pathway in prefrontal cortex and hippocampus and potentiate <protein-id="P29323">ERK</protein-id> pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis. lithium__ERK__no_interaction valproate__ERK__interaction
#example for the output format
"""
<?xml version="1.0" encoding="UTF-8">
<corpus source="DS1">
<document id="DS1.d0" origId="18227838">
<sentence id="DS1.d0.s0" origId="18227838-359" text="The mood stabilizers lithium and valproate activate the ERK pathway in prefrontal cortex and hippocampus and potentiate ERK pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis."/>
<entity id="DS1.d0.s0.e0" origId="28486,3028194" charOffset="x1-y1" type="compound" text="lithium"/>
<entity id="DS1.d0.s0.e1" origId="3121" charOffset="x2-y2" type="compound" text="valproate"/>
<entity id="DS1.d0.s0.e2" origId="P29323" charOffset="x3-y3" type="protein" text="ERK"/>
<interaction id="DS1.d0.s0.i0" e1="DS1.do.s0.e0" e2="DS1.do.s0.e2" type="no_interaction" directed="False" />
<interaction id="DS1.d0.s0.i1" e1="DS1.do.s0.e1" e2="DS1.do.s0.e2" type="interaction" directed="False" />
</sentence>
[...]
</document>
[...]
</corpus>
"""
# add XML header and define corpus source
outfile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"+"\n")
outfile.write("<corpus source=\"DS1\">"+"\n")
# variable to store and compare the last read PubMed ID to notice whether there are multiple sentences with the same PubMed ID or not
# the document ID refers to the PubMed ID (origID)
pre_pmid=""
# doc_num counts the number of created documents
doc_num =0
# read lines in CSV file
for line in infile :
# tab-separated format
temp = line.strip().split("\t")
# get PubMed ID, sentences ID, and the sentence itself
# (ToDo: use a split command instead of this regular expression - debug)
curr_pmid = re.match('(\d{8})',temp[0]).group(0)
pmid_sent_num = temp[0]
sentence_text = temp[1]
# find all annotated proteins and compounds by matching their tags
pro_positions= [(a.start(), a.end()) for a in list(re.finditer('<protein-id="(.*?)">(.*?)</protein-id>',sentence_text))]
cmp_positions = [(a.start(), a.end()) for a in list(re.finditer('<compound-id="(.*?)">(.*?)</compound-id>',sentence_text))]
# join the two lists
positions = pro_positions + cmp_positions
positions.sort()
#Initialize the list with the number of identified tags
entity_list =[]
entity_list=[0]*len(positions)
# iterate over all identified positions of the identified tags
for i in range(len(positions)):
# initialze the second dimension of the list with a length of four (entity_type,entity_id,entity_text,entity_charoffset)
entity_list[i]=[0]*4
# store these four elements with grouping in the regular expression
obj = re.match('<(protein|compound)-id="(.*?)">(.*?)</(protein-id|compound-id)>',sentence_text[positions[i][0]:positions[i][1]])
entity_list[i][0]=obj.group(1) #entity_type
entity_list[i][1]=obj.group(2) #entity_id
entity_list[i][2]=obj.group(3) #entity_text
entity_list[i][2]=entity_list[i][2].replace("[","(").replace("]",")")
# the entity_charoffset will be assign later after having the pure sentence text generated (without any tags)
# the sentence without any tags will be generated by deleting all tags via text concatenation
# initialize (ToDo: initialization like this not needed - debug)
pur_sent_text = sentence_text
# enumerate over the list of positions (index, value)
for i,e in reversed(list(enumerate(positions))):
pur_sent_text = pur_sent_text[0:positions[i][0]]+entity_list[i][2]+pur_sent_text[positions[i][1]:]
# get the character offset of all identified synonyms
# decode the sentences to UTF8 to prevent the usage of more than one character for special letters, symbols, etc.
# make use of a list of repeated synonyms and synonym positions
repeated_syn_pos =[]
rep_syn =[]
for i in range(len(entity_list)) :
# check whether this is the fist occurrence of the current synonym
if not entity_list[i][2] in rep_syn :
# get the list of positions of all occurences of the current synonym
u_pur_sent_text = pur_sent_text.decode("utf8")
charoffset_value = [(a.start(), a.end()) for a in list(re.finditer(re.escape(entity_list[i][2]),u_pur_sent_text))]
# check whether it occures only once such that the charoffsetone directly be assigned
if len(charoffset_value) == 1 :
entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1])
else:
# if it occures more than one time, the charoffset has to be assigned according to the first pair of positions
entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1])
# append this synonym to the rep_syn list to store all repeated synonyms in this sentence
rep_syn.append(entity_list[i][2])
# delete the fist pair of positions from the list
charoffset_value = charoffset_value[1:]
# add the rest of positions pairs for the current synonym to another list
for j in range(len(charoffset_value)):
repeated_syn_pos.append([entity_list[i][2],charoffset_value[j][0],charoffset_value[j][1]])
else:
# this case refers to at least the second occurrence of the synonym
# for each repeated synonym, assign the first position pair from the repeated_syn_pos list
for k in range(len(repeated_syn_pos)):
if repeated_syn_pos[k][0] == entity_list[i][2]:
break
entity_list[i][3] = str(repeated_syn_pos[k][1])+"-"+str(repeated_syn_pos[k][2])
# get pairs and their interaction status (separated by a double underscore)
listof_int_noint = temp[2:]
interaction_list=[0]*len(listof_int_noint)
for i in range(len(listof_int_noint)):
interaction_list[i]=listof_int_noint[i].split('__')
# interaction/no_interaction corresponds to True/False
TF_int_list=[0]*len(interaction_list)
for intid in range(len(interaction_list)) :
if interaction_list[intid][2]=="interaction" :
TF_int_list[intid]="True"
else :
TF_int_list[intid]="False"
# debug:
# print TF_int_list
# build XML structure
# check whether the PubMed ID changed in comparision to the last parsed sentence
if curr_pmid == pre_pmid :
# if this is the case, only the sentence ID has to be increased
sent_num +=1
# add sentence ID using the current document number
# (doc_num has to be decreased by one, because this index is automatically increased after each sentence)
# all openning and closing squared brackets ([,]) should be replaced with round brackets, because they will make problems in the tokenization step of the (preprocessing) pipeline
pur_sent_text = pur_sent_text.replace("[","(").replace("]",")")
outfile.write(" <sentence id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n")
# build entity tags according to the list identified tags from the CSV file (entity_list)
for i in range(0,len(entity_list)) :
outfile.write(" <entity id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n")
# insert types of interaction for each pair of entities
# get the index of the synonym interactions in entity_list
origId = "DS1.d"+str(doc_num-1)+".s"+str(sent_num)
for int_id in range(len(interaction_list)) :
for ent_id in range(len(entity_list)):
if interaction_list[int_id][0] in entity_list[ent_id]:
break
first_entity=ent_id
for k in range(len(entity_list)):
if interaction_list[int_id][1] in entity_list[k]:
break
second_entity=k
outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n")
# close sentence tag
outfile.write(" </sentence>\n")
# if the current PubMed ID changed in comparison to the last parsed sentences
else :
if not doc_num == 0 :
outfile.write(" </document>\n")
sent_num =0
# a new document tag has to be opened and the sentences can be added
outfile.write(" <document id=\"DS1.d"+str(doc_num)+"\" origId=\""+str(curr_pmid)+"\">"+"\n")
# replace squared brackets ([,]) with round brackets
pur_sent_text = pur_sent_text.replace("[","(").replace("]",")")
outfile.write(" <sentence id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n")
# now have to make entity tags according to entity_list data.
for i in range(0,len(entity_list)) :
outfile.write(" <entity id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n")
# build entity tags
origId = "DS1.d"+str(doc_num)+".s"+str(sent_num)
for int_id in range(len(interaction_list)) :
for ent_id in range(len(entity_list)):
if interaction_list[int_id][0] in entity_list[ent_id]:
break
first_entity=ent_id
for k in range(len(entity_list)):
if interaction_list[int_id][1] in entity_list[k]:
break
second_entity=k
outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n")
# close sentence tag
outfile.write(" </sentence>\n")
# set new PubMed ID as the last parsed document ID and increase document index
pre_pmid = curr_pmid
doc_num+=1
# close document tag
outfile.write("</document>\n")
# close corpus tag
outfile.write("</corpus>\n")
# close files
infile.close()
outfile.close()
| [((17, 0, 17, 31), 'sys.setdefaultencoding', 'sys.setdefaultencoding', ({(17, 23, 17, 30): '"""utf-8"""'}, {}), "('utf-8')", False, 'import sys\n'), ((29, 12, 29, 26), 'optparse.OptionParser', 'OptionParser', ({}, {}), '()', False, 'from optparse import OptionParser\n'), ((96, 18, 96, 140), 're.match', 're.match', ({(96, 27, 96, 92): '"""<(protein|compound)-id="(.*?)">(.*?)</(protein-id|compound-id)>"""', (96, 93, 96, 139): 'sentence_text[positions[i][0]:positions[i][1]]'}, {}), '(\'<(protein|compound)-id="(.*?)">(.*?)</(protein-id|compound-id)>\',\n sentence_text[positions[i][0]:positions[i][1]])', False, 'import re\n'), ((79, 20, 79, 47), 're.match', 're.match', ({(79, 29, 79, 38): '"""(\\\\d{8})"""', (79, 39, 79, 46): 'temp[0]'}, {}), "('(\\\\d{8})', temp[0])", False, 'import re\n'), ((83, 60, 83, 127), 're.finditer', 're.finditer', ({(83, 72, 83, 112): '"""<protein-id="(.*?)">(.*?)</protein-id>"""', (83, 113, 83, 126): 'sentence_text'}, {}), '(\'<protein-id="(.*?)">(.*?)</protein-id>\', sentence_text)', False, 'import re\n'), ((84, 61, 84, 130), 're.finditer', 're.finditer', ({(84, 73, 84, 115): '"""<compound-id="(.*?)">(.*?)</compound-id>"""', (84, 116, 84, 129): 'sentence_text'}, {}), '(\'<compound-id="(.*?)">(.*?)</compound-id>\', sentence_text)', False, 'import re\n'), ((168, 136, 168, 160), 'xml.sax.saxutils.quoteattr', 'quoteattr', ({(168, 146, 168, 159): 'pur_sent_text'}, {}), '(pur_sent_text)', False, 'from xml.sax.saxutils import quoteattr\n'), ((199, 134, 199, 158), 'xml.sax.saxutils.quoteattr', 'quoteattr', ({(199, 144, 199, 157): 'pur_sent_text'}, {}), '(pur_sent_text)', False, 'from xml.sax.saxutils import quoteattr\n'), ((120, 83, 120, 111), 're.escape', 're.escape', ({(120, 93, 120, 110): 'entity_list[i][2]'}, {}), '(entity_list[i][2])', False, 'import re\n')] |
SergeyDorokhov/python_training | tests/test_add_contact.py | e15e561fe7ad055048643adcfc88b3f2d55530ca | def test_add_contact(app, db, json_contacts, check_ui):
contact = json_contacts
list_before = db.get_contact_list()
contact.id_contact = app.contact.get_next_id(list_before)
app.contact.create(contact)
assert len(list_before) + 1 == len(db.get_contact_list())
list_after = db.get_contact_list()
list_before.append(contact)
assert sorted(list_before) == sorted(list_after)
if check_ui:
assert sorted(list_after) == sorted(app.contact.get_list()) | [] |
eamanu/asoc_members | website/members/urls.py | bf2e99e9c63c60a59bdfd10ca1812d78851cbde6 | from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from members import views
urlpatterns = [
path('solicitud-alta/', views.signup_initial, name='signup'),
path('solicitud-alta/persona/', views.signup_form_person, name='signup_person'),
path('solicitud-alta/organizacion',
views.signup_form_organization, name='signup_organization'),
path('solicitud-alta/gracias', views.signup_thankyou, name='signup_thankyou'),
path('reportes/', views.reports_main, name='reports_main'),
path('reportes/deudas', views.report_debts, name='report_debts'),
path('reportes/completos', views.report_complete, name='report_complete'),
path('reportes/incompletos', views.report_missing, name='report_missing'),
path('reportes/ingcuotas', views.report_income_quotas, name='report_income_quotas'),
path('reportes/ingdinero', views.report_income_money, name='report_income_money'),
path('reportes/miembros', views.members_list, name="members_list"),
path('reportes/miembros/<pk>/', views.member_detail, name='member_detail'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [((24, 4, 24, 65), 'django.conf.urls.static.static', 'static', (), '', False, 'from django.conf.urls.static import static\n'), ((8, 4, 8, 64), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((9, 4, 9, 83), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((10, 4, 11, 68), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((12, 4, 12, 81), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((14, 4, 14, 62), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((15, 4, 15, 68), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((16, 4, 16, 77), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((17, 4, 17, 77), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((18, 4, 18, 87), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((19, 4, 19, 85), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((22, 4, 22, 70), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((23, 4, 23, 78), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n')] |
vais-ral/CCPi-ML | Benchmarking/Keras/Tensorflow/TF_dataforcomparisongraphss.py | ca9baeb0dd5db3a97ac8ab9e33e03aeae42ebfa4 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 14:04:03 2018
@author: zyv57124
"""
import scipy.io as sio
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tensorflow.python.training import gradient_descent
from time import time
class TimingCallback(keras.callbacks.Callback):
def __init__(self):
self.logs=[]
def on_epoch_begin(self, epoch, logs={}):
self.starttime=time()
def on_epoch_end(self, epoch, logs={}):
self.logs.append(time()-self.starttime)
#Load data ------------------------------------------------------
def loadMATData(file1):
return sio.loadmat(file1)
#Load Data-------------------------------------------------------
data = loadMATData('ex3data1.mat')
features = data['X']
labels = data['y']
filter = labels ==10
labels[filter] = 0
#shuffle data---------------------------------------------------
ran = np.arange(features.shape[0])
np.random.shuffle(ran)
features = features[ran]
labels = labels[ran]
training_features = features[:3500]
training_labels = labels[:3500]
test_features = features[3501:]
test_labels = labels[3501:]
for i in np.arange(0,500, 10):
#TF Neaural Network Builder--------------------------------------
model = keras.Sequential([
keras.layers.Dense(400, activation=tf.nn.relu),
keras.layers.Dense(25, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
predictions = model.predict(test_features)
cb=TimingCallback()
history = model.fit(training_features, training_labels, batch_size=i+1, epochs=100, verbose=2, callbacks=[cb])
#Store eoch number and loss values in .txt file
loss_data = (history.history['loss'])
f = open("TF_loss_data_batchnum_"+str(i+1)+".txt","w")
for xx in range(1,len(loss_data)+1):
if xx==1:
delta_loss = 'Nan'
else:
delta_loss = (loss_data[xx-2] - loss_data[xx-1])
#Epoch #Loss #Batch size #Time #Change in loss
f.write(str(xx) + "," + str(loss_data[xx-1]) + "," + str(i+1) + "," + str(cb.logs[xx-1]) + "," + str(delta_loss) + "\n" )
f.close() | [((72, 6, 72, 34), 'numpy.arange', 'np.arange', ({(72, 16, 72, 33): 'features.shape[0]'}, {}), '(features.shape[0])', True, 'import numpy as np\n'), ((74, 0, 74, 22), 'numpy.random.shuffle', 'np.random.shuffle', ({(74, 18, 74, 21): 'ran'}, {}), '(ran)', True, 'import numpy as np\n'), ((92, 9, 92, 29), 'numpy.arange', 'np.arange', ({(92, 19, 92, 20): '(0)', (92, 21, 92, 24): '(500)', (92, 26, 92, 28): '(10)'}, {}), '(0, 500, 10)', True, 'import numpy as np\n'), ((46, 11, 46, 29), 'scipy.io.loadmat', 'sio.loadmat', ({(46, 23, 46, 28): 'file1'}, {}), '(file1)', True, 'import scipy.io as sio\n'), ((36, 19, 36, 25), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((100, 12, 100, 58), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (), '', False, 'from tensorflow import keras\n'), ((102, 12, 102, 57), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (), '', False, 'from tensorflow import keras\n'), ((104, 12, 104, 60), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (), '', False, 'from tensorflow import keras\n'), ((110, 28, 110, 81), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (), '', True, 'import tensorflow as tf\n'), ((40, 21, 40, 27), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n')] |
aurimas13/Python-stuff | Exercise_8.py | a6e89e9f6088a6ab29da5b57830e4b7750427454 | # Solution of Exercise 8 - Exercise_8.py
#
# Uploaded by Aurimas A. Nausedas on 11/23/20.
# Updated by Aurimas A. Nausedas on 11/06/21.
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
)
| [] |
MCFrank16/python-algo | Easy/two-numbers-sum/solution-1.py | dd48f6c5b9f4a941a18fc4620164c807c0e1d35e | # solution 1: Brute Force
# time complexity: O(n^2)
# space complexity: O(1)
def twoNumberSum(arr, n):
for i in range(len(arr) - 1):
firstNum = arr[i]
for j in range(i + 1, len(arr)):
secondNum = arr[j]
if firstNum + secondNum == n:
return [firstNum, secondNum]
return []
print(twoNumberSum([3,5,-4,8,11,1,-1,6], 10))
| [] |
maurizi/cac-tripplanner | python/cac_tripplanner/destinations/migrations/0021_event.py | 3f4f1f1edc9be9e52c74eb3e124b6697429a79d6 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-28 17:32
from __future__ import unicode_literals
import ckeditor.fields
import destinations.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('destinations', '0020_auto_20170203_1251'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('website_url', models.URLField(blank=True, null=True)),
('description', ckeditor.fields.RichTextField()),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('image', models.ImageField(help_text=b'The small image. Will be displayed at 310x155.', null=True, upload_to=destinations.models.generate_filename)),
('wide_image', models.ImageField(help_text=b'The large image. Will be displayed at 680x400.', null=True, upload_to=destinations.models.generate_filename)),
('published', models.BooleanField(default=False)),
('priority', models.IntegerField(default=9999)),
('destination', models.ForeignKey(null=True, blank=True, on_delete=django.db.models.deletion.SET_NULL, to='destinations.Destination')),
],
options={
'ordering': ['priority', '-start_date'],
},
),
]
| [((21, 23, 21, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((22, 25, 22, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((23, 32, 23, 70), 'django.db.models.URLField', 'models.URLField', (), '', False, 'from django.db import migrations, models\n'), ((25, 31, 25, 53), 'django.db.models.DateTimeField', 'models.DateTimeField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((26, 29, 26, 51), 'django.db.models.DateTimeField', 'models.DateTimeField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((27, 26, 27, 164), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((28, 31, 28, 169), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((29, 30, 29, 64), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((30, 29, 30, 62), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import migrations, models\n'), ((31, 32, 31, 149), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
elpidakon/CRESCENDDI | data_extraction/scripts/bnf_adr_extraction.py | ab9e65621d331689f4aaeeb08902f29d90b7d1b9 | # Kontsioti, Maskell, Dutta & Pirmohamed, A reference set of clinically relevant
# adverse drug-drug interactions (2021)
# Code to extract single-drug side effect data from the BNF website
from bs4 import BeautifulSoup
import urllib
import os, csv
import numpy as np
import pandas as pd
import re
from tqdm import tqdm
URL_BEGINNING = 'https://bnf.nice.org.uk/drug/'
print('beginning scrape for individual drugs...')
# Fetch the HTML containing the full list of APIs.
r = urllib.request.urlopen(URL_BEGINNING).read()
soup1 = BeautifulSoup(r, 'lxml')
# Extract the full URL list.
URL_list = []
for s in soup1.find_all('div', {'class': 'span11'}):
for ai in s(href=True):
temp = URL_BEGINNING + ai['href']
URL_list.append(temp)
print(URL_list)
# Create an empty dataframe for storing the extracted data for APIs.
scraped_API_count = 0
scraped_API = pd.DataFrame(np.nan, index = range(0,160000), columns = ['API', 'AE', 'Frequency'], dtype = str)
row_count = 0
# Empty list to store API mappings to their drug class (if applicable).
API_to_drugclass = []
# Scrape individual drug (API) side effects.
HIGHEST_API_ID = len(URL_list)
for id in tqdm(range(0, HIGHEST_API_ID)):
# Try to fetch the HTML for each API.
try:
l = urllib.request.urlopen(URL_list[id]).read()
# If the page returns a 404 error, skip this id.
except urllib.error.HTTPError as e:
if e.getcode() == 404:
continue
raise
# Add one to the count of succesfully scraped products.
scraped_API_count += 1
soup2 = BeautifulSoup(l, 'lxml')
API = soup2.find('h1', id= '').span.getText()
# Extract the relevant information to a dataframe.
# In case the API contains a side effect section.
if soup2.find('section', {'id':'sideEffects'}):
ae_list = soup2.find_all('span', {'class': 'sideEffect'})
for a in ae_list:
adv_event = a.getText()
scraped_API.at[row_count, 'API'] = API
scraped_API.at[row_count,'AE'] = adv_event
freq = a.parent.parent.parent.h4.getText()
scraped_API.at[row_count, 'Frequency'] = freq
row_count += 1
# Check if the drug belongs to a specific drug class. If yes, extract
# the drug class name and the link to the corresponding webpage.
if soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*')):
temp = []
temp.append(API)
drug_class = soup2.find('a', href = re.compile(r'.*/drug-class/.*')).span.getText()
temp.append(drug_class)
li = soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*'))['href']
drug_class_link = 'https://bnf.nice.org.uk' + str(li)
temp.append(drug_class_link)
API_to_drugclass.append(temp)
# In case the API does not contain a side effect section.
else:
adv_event = 'NO AEs MENTIONED'
scraped_API.at[row_count, 'API'] = API
scraped_API.at[row_count,'AE'] = adv_event
scraped_API.at[row_count,'Frequency'] = ''
row_count += 1
# Remove empty rows from the dataframe that contains the extracted data.
scraped_API_dropna = scraped_API[~scraped_API.isin(['n']).any(axis=1)]
# Remove spaces at the beginning and at the end of the text fields.
scraped_API_dropna['API'] = scraped_API_dropna['API'].str.strip()
scraped_API_dropna['AE'] = scraped_API_dropna['AE'].str.strip()
scraped_API_dropna['Frequency'] = scraped_API_dropna['Frequency'].str.strip()
print('BNF individual side effects succesfully scraped.')
print('beginning scrape for drug classes...')
# Create a dataframe with drug names, drug classes and related URLs (where applicable).
API_class_df = pd.DataFrame(API_to_drugclass, columns = ['API','Drug_Class','Link'])
# Create a list with all the links for the drug class webpages.
class_links = API_class_df['Link'].unique().tolist()
# Scrape drug class side effects.
HIGHEST_DRUG_CLASS_ID = len(class_links)
scraped_class_count = 0
# Create an empty dataframe for storing the extracted data for drug classes.
scraped_class = pd.DataFrame(np.nan, index = range(0,160000), columns = ['Drug_Class', 'AE', 'Frequency'], dtype = str)
row_count_2 = 0
for id in tqdm(range(0, HIGHEST_DRUG_CLASS_ID)):
# Try to fetch the HTML for each drug class.
try:
l = urllib.request.urlopen(class_links[id]).read()
# If the page returns a 404 error, skip this id.
except urllib.error.HTTPError as e:
if e.getcode() == 404:
continue
raise
# Add one to the count of succesfully scraped drug classes.
scraped_class_count += 1
soup3 = BeautifulSoup(l, 'lxml')
# Extract the drug class name.
class_name = soup3.find('h1', id= '').span.getText()
# Extract the relevant information to a dataframe.
class_ae_list = soup3.find_all('span', {'class': 'sideEffect'})
for a in class_ae_list:
adv_event = a.getText()
scraped_class.at[row_count_2, 'Drug_Class'] = class_name
scraped_class.at[row_count_2,'AE'] = adv_event
freq = a.parent.parent.parent.h4.getText()
scraped_class.at[row_count_2, 'Frequency'] = freq
row_count_2 += 1
# Remove empty rows from the dataframe that contains the extracted data.
scraped_class_dropna = scraped_class[~scraped_class.isin(['n']).any(axis=1)]
# Remove spaces at the beginning and at the end of the text fields.
scraped_class_dropna['Drug_Class'] = scraped_class_dropna['Drug_Class'].str.strip()
scraped_class_dropna['AE'] = scraped_class_dropna['AE'].str.strip()
scraped_class_dropna['Frequency'] = scraped_class_dropna['Frequency'].str.strip()
print('BNF drug class side effects succesfully scraped.')
print('combine extracted data...')
## Combine both tables by adding drug class side effects to the individual
## ingredients of each drug class.
# Create a dictionary that contains all drug classes as keys and side effects
# with associated frequencies as values.
AEs_by_class_dict = scraped_class_dropna.groupby('Drug_Class')[['AE', 'Frequency']].apply(lambda g: list(map(tuple, g.values.tolist()))).to_dict()
# Remove URL column
API_class_df.drop(columns = 'Link', inplace = True)
# Create a dataframe with drug class as the index of APIs (if available)
# and add their drug class side effects and associated frequencies.
API_class_df['Drug_Class'] = API_class_df['Drug_Class'].str.strip()
API_class_df.set_index('Drug_Class', inplace = True)
API_class_df['AE_freq_tuple'] = API_class_df.index.to_series().map(AEs_by_class_dict)
API_class_df.reset_index(inplace=True)
# Create a new dataframe to store drug class side effect data for each API.
AEs_from_class_df = API_class_df.explode('AE_freq_tuple').reset_index(drop=True)
AEs_from_class_df[['AE', 'Frequency']] = pd.DataFrame(AEs_from_class_df['AE_freq_tuple'].tolist(), index = AEs_from_class_df.index)
AEs_from_class_df['from_drug_class'] = 'Yes'
AEs_from_class_df.drop(columns = ['AE_freq_tuple','Drug_Class'], inplace = True)
# Fill NAs in Frequency column if no side effects are mentioned.
scraped_API_dropna.loc[scraped_API_dropna.AE == 'NO AEs MENTIONED', 'Frequency'] = 'N/A'
# Fill NAs in drug class indicator if no side effects are mentioned. Otherwise, put 'No'.
scraped_API_dropna['from_drug_class'] = np.where(scraped_API_dropna['AE'] == 'NO AEs MENTIONED', 'N/A', 'No')
# Concatenate the two dataframes to get a final one.
final_df = pd.concat([scraped_API_dropna, AEs_from_class_df])
# Remove any rows that do not contain side effects.
final_df = final_df[final_df.AE != 'NO AEs MENTIONED']
# Convert dataframe to lowercase.
final_df = final_df.apply(lambda x: x.astype(str).str.lower())
# Sort alphabetically.
final_df = final_df.sort_values(by=['API', 'from_drug_class'])
# Remove any duplicates.
final_df.drop_duplicates(subset = ['API', 'AE', 'Frequency'], keep = 'first', inplace = True)
# Rename columns.
final_df.columns = ['Drug_name', 'AE', 'Frequency', 'from_drug_class']
FILE_NAME = 'data_extraction/output/bnf_single_data.csv'
print('saving to file...')
# Save the dataset to a csv file.
final_df.to_csv(FILE_NAME, index=False, encoding = "utf-8")
| [((19, 8, 19, 32), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(19, 22, 19, 23): 'r', (19, 25, 19, 31): '"""lxml"""'}, {}), "(r, 'lxml')", False, 'from bs4 import BeautifulSoup\n'), ((98, 15, 98, 84), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((174, 40, 174, 109), 'numpy.where', 'np.where', ({(174, 49, 174, 95): "scraped_API_dropna['AE'] == 'NO AEs MENTIONED'", (174, 97, 174, 102): '"""N/A"""', (174, 104, 174, 108): '"""No"""'}, {}), "(scraped_API_dropna['AE'] == 'NO AEs MENTIONED', 'N/A', 'No')", True, 'import numpy as np\n'), ((177, 11, 177, 61), 'pandas.concat', 'pd.concat', ({(177, 21, 177, 60): '[scraped_API_dropna, AEs_from_class_df]'}, {}), '([scraped_API_dropna, AEs_from_class_df])', True, 'import pandas as pd\n'), ((54, 12, 54, 36), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(54, 26, 54, 27): 'l', (54, 29, 54, 35): '"""lxml"""'}, {}), "(l, 'lxml')", False, 'from bs4 import BeautifulSoup\n'), ((125, 12, 125, 36), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(125, 26, 125, 27): 'l', (125, 29, 125, 35): '"""lxml"""'}, {}), "(l, 'lxml')", False, 'from bs4 import BeautifulSoup\n'), ((18, 4, 18, 41), 'urllib.request.urlopen', 'urllib.request.urlopen', ({(18, 27, 18, 40): 'URL_BEGINNING'}, {}), '(URL_BEGINNING)', False, 'import urllib\n'), ((44, 12, 44, 48), 'urllib.request.urlopen', 'urllib.request.urlopen', ({(44, 35, 44, 47): 'URL_list[id]'}, {}), '(URL_list[id])', False, 'import urllib\n'), ((69, 72, 69, 103), 're.compile', 're.compile', ({(69, 83, 69, 102): '""".*/drug-class/.*"""'}, {}), "('.*/drug-class/.*')", False, 'import re\n'), ((115, 12, 115, 51), 'urllib.request.urlopen', 'urllib.request.urlopen', ({(115, 35, 115, 50): 'class_links[id]'}, {}), '(class_links[id])', False, 'import urllib\n'), ((74, 78, 74, 109), 're.compile', 're.compile', ({(74, 89, 74, 108): '""".*/drug-class/.*"""'}, {}), "('.*/drug-class/.*')", False, 'import re\n'), ((72, 48, 72, 79), 're.compile', 're.compile', ({(72, 59, 72, 78): '""".*/drug-class/.*"""'}, {}), "('.*/drug-class/.*')", False, 'import re\n')] |
nicoknoll/howimetcorona | core/forms.py | c55198118b2c31ee8b76c023b5a9fc4454cc1e08 | from django import forms
class BaseFileForm(forms.Form):
# we try to minify the file to only submit the data
points_file = forms.FileField(
required=False,
widget=forms.FileInput(attrs={'required': 'required'}),
label="Location History File (.json)"
)
points_data = forms.CharField(widget=forms.HiddenInput(), required=False)
def clean(self):
points_file = self.cleaned_data.get('points_file')
points_data = self.cleaned_data.get('points_data')
if not points_file and not points_data:
raise forms.ValidationError({'points_file': 'File is required.'})
return self.cleaned_data
class ReportForm(BaseFileForm):
symptoms_at = forms.DateField(widget=forms.TextInput(attrs={
'placeholder': 'YYYY-MM-DD',
'pattern': '[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}',
'title': 'YYYY-MM-DD'
}))
is_verified = forms.BooleanField(required=False)
class CheckForm(BaseFileForm):
pass
class DeleteForm(forms.Form):
delete_token = forms.CharField(label="Delete token")
| [((29, 18, 29, 52), 'django.forms.BooleanField', 'forms.BooleanField', (), '', False, 'from django import forms\n'), ((37, 19, 37, 56), 'django.forms.CharField', 'forms.CharField', (), '', False, 'from django import forms\n'), ((8, 15, 8, 62), 'django.forms.FileInput', 'forms.FileInput', (), '', False, 'from django import forms\n'), ((11, 41, 11, 60), 'django.forms.HiddenInput', 'forms.HiddenInput', ({}, {}), '()', False, 'from django import forms\n'), ((18, 18, 18, 77), 'django.forms.ValidationError', 'forms.ValidationError', ({(18, 40, 18, 76): "{'points_file': 'File is required.'}"}, {}), "({'points_file': 'File is required.'})", False, 'from django import forms\n'), ((24, 41, 28, 6), 'django.forms.TextInput', 'forms.TextInput', (), '', False, 'from django import forms\n')] |
autiwg/bartender | bartender/drinks/generators.py | 1c26aefb777a01ce527745c543e60b11a972fe5d | from django.utils import timezone
from django.utils.text import slugify
def generate_billed_document_path(instance, filename):
cur_time = timezone.now()
return f"{cur_time.strftime('%Y/%m')}/{slugify(instance.name)}-{cur_time.strftime('%d.%m.%Y %H:%M')}.csv"
| [((6, 15, 6, 29), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((8, 43, 8, 65), 'django.utils.text.slugify', 'slugify', ({(8, 51, 8, 64): 'instance.name'}, {}), '(instance.name)', False, 'from django.utils.text import slugify\n')] |
AMReX-Astro/wdmerger | papers/wdmerger_I/plots/sponge.py | 9f575efacc8d373b6d2961f731e30bf59ee15ffd | # This Python program is used to create a plot displaying the sponge
# function we use in the CASTRO hydrodynamics for the wdmerger problem.
import numpy as np
import matplotlib.pyplot as plt
def sponge(r):
sp
rs = 0.75
rt = 0.85
r = np.linspace(0.0, 1.0, 1000)
f = np.zeros(len(r))
idx = np.where(r < rs)
f[idx] = 0.0
idx = np.where(r < rt)
idx = np.where(r[idx] >= rs)
f[idx] = 0.5 * (1.0 - np.cos(np.pi * (r[idx] - rs) / (rt - rs)))
idx = np.where(r >= rt)
f[idx] = 1.0
plt.plot(r, 1.0 - f, linewidth=4.0)
plt.xlabel('Radius', fontsize=20)
plt.ylabel(r'$1 - f_S$', fontsize=20)
plt.xlim([0.0, 1.0])
plt.ylim([-0.05, 1.05])
plt.tick_params(labelsize=16)
plt.tight_layout()
plt.savefig('sponge.eps')
| [((13, 4, 13, 31), 'numpy.linspace', 'np.linspace', ({(13, 16, 13, 19): '0.0', (13, 21, 13, 24): '1.0', (13, 26, 13, 30): '1000'}, {}), '(0.0, 1.0, 1000)', True, 'import numpy as np\n'), ((16, 6, 16, 22), 'numpy.where', 'np.where', ({(16, 15, 16, 21): 'r < rs'}, {}), '(r < rs)', True, 'import numpy as np\n'), ((19, 6, 19, 22), 'numpy.where', 'np.where', ({(19, 15, 19, 21): 'r < rt'}, {}), '(r < rt)', True, 'import numpy as np\n'), ((20, 6, 20, 28), 'numpy.where', 'np.where', ({(20, 15, 20, 27): 'r[idx] >= rs'}, {}), '(r[idx] >= rs)', True, 'import numpy as np\n'), ((23, 6, 23, 23), 'numpy.where', 'np.where', ({(23, 15, 23, 22): 'r >= rt'}, {}), '(r >= rt)', True, 'import numpy as np\n'), ((26, 0, 26, 35), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((27, 0, 27, 33), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (), '', True, 'import matplotlib.pyplot as plt\n'), ((28, 0, 28, 37), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (), '', True, 'import matplotlib.pyplot as plt\n'), ((30, 0, 30, 20), 'matplotlib.pyplot.xlim', 'plt.xlim', ({(30, 9, 30, 19): '[0.0, 1.0]'}, {}), '([0.0, 1.0])', True, 'import matplotlib.pyplot as plt\n'), ((31, 0, 31, 23), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(31, 9, 31, 22): '[-0.05, 1.05]'}, {}), '([-0.05, 1.05])', True, 'import matplotlib.pyplot as plt\n'), ((33, 0, 33, 29), 'matplotlib.pyplot.tick_params', 'plt.tick_params', (), '', True, 'import matplotlib.pyplot as plt\n'), ((35, 0, 35, 18), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((37, 0, 37, 25), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(37, 12, 37, 24): '"""sponge.eps"""'}, {}), "('sponge.eps')", True, 'import matplotlib.pyplot as plt\n'), ((21, 22, 21, 63), 'numpy.cos', 'np.cos', ({(21, 29, 21, 62): '(np.pi * (r[idx] - rs) / (rt - rs))'}, {}), '(np.pi * (r[idx] - rs) / (rt - rs))', True, 'import numpy as np\n')] |
JenFuChen/NKUST | Python/110-1/Midterm Additional HW/005.py | bd80a449eddfdaf75709379d2e904ff70d409666 | # 005 印出菱形
while(1):
level = int(input())
if(level <= 0):
break
L = 2*level-1
mid = int((L - 1) / 2)
inspa = mid * 2 - 1
for i in range(L):
spa = level - i - 1
if spa >= 0:
print(" " * spa, end='')
print('*', end='')
if spa < 0:
spa = -spa
print(" " * spa, end='')
print('*', end='')
if(i > 0 and i <= mid):
for j in range(i*2-1):
print(" ", end='')
print('*', end='')
if(i > 0 and i > mid and i != L-1):
inspa = inspa - 2
for j in range(inspa):
print(" ", end='')
print('*', end='')
print()
| [] |
davisidarta/dynamo-release | dynamo/plot/pseudotime.py | 0dbd769f52ea07f3cdaa8fb31022ceb89938c382 | import numpy as np
from ..tools.utils import update_dict
from .utils import save_fig
def plot_direct_graph(adata,
layout=None,
figsize=[6, 4],
save_show_or_return='show',
save_kwargs={},
):
df_mat = adata.uns["df_mat"]
import matplotlib.pyplot as plt
import networkx as nx
edge_color = "gray"
G = nx.from_pandas_edgelist(
df_mat,
source="source",
target="target",
edge_attr="weight",
create_using=nx.DiGraph(),
)
G.nodes()
W = []
for n, nbrs in G.adj.items():
for nbr, eattr in nbrs.items():
W.append(eattr["weight"])
options = {
"width": 300,
"arrowstyle": "-|>",
"arrowsize": 1000,
}
plt.figure(figsize=figsize)
if layout is None:
# pos : dictionary, optional
# A dictionary with nodes as keys and positions as values.
# If not specified a spring layout positioning will be computed.
# See :py:mod:`networkx.drawing.layout` for functions that
# compute node positions.
g = nx.draw(
G,
with_labels=True,
node_color="skyblue",
node_size=100,
edge_color=edge_color,
width=W / np.max(W) * 5,
edge_cmap=plt.cm.Blues,
options=options,
)
else:
raise Exception("layout", layout, " is not supported.")
if save_show_or_return == "save":
s_kwargs = {"path": None, "prefix": 'plot_direct_graph', "dpi": None,
"ext": 'pdf', "transparent": True, "close": True, "verbose": True}
s_kwargs = update_dict(s_kwargs, save_kwargs)
save_fig(**s_kwargs)
elif save_show_or_return == "show":
plt.tight_layout()
plt.show()
elif save_show_or_return == "return":
return g
| [((38, 4, 38, 31), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((24, 21, 24, 33), 'networkx.DiGraph', 'nx.DiGraph', ({}, {}), '()', True, 'import networkx as nx\n'), ((66, 8, 66, 26), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((67, 8, 67, 18), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((52, 22, 52, 31), 'numpy.max', 'np.max', ({(52, 29, 52, 30): 'W'}, {}), '(W)', True, 'import numpy as np\n')] |
joshualyguessennd/ocean.py | ocean_lib/web3_internal/utils.py | 23274698df4aae078d53b12d768c721af16f6e80 | # Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
from collections import namedtuple
import eth_account
import eth_keys
import eth_utils
from eth_keys import KeyAPI
from eth_utils import big_endian_to_int
from ocean_lib.web3_internal.web3_provider import Web3Provider
from web3 import Web3
Signature = namedtuple("Signature", ("v", "r", "s"))
logger = logging.getLogger(__name__)
def generate_multi_value_hash(types, values):
"""
Return the hash of the given list of values.
This is equivalent to packing and hashing values in a solidity smart contract
hence the use of `soliditySha3`.
:param types: list of solidity types expressed as strings
:param values: list of values matching the `types` list
:return: bytes
"""
assert len(types) == len(values)
return Web3.soliditySha3(types, values)
def prepare_prefixed_hash(msg_hash):
"""
:param msg_hash:
:return:
"""
return generate_multi_value_hash(
["string", "bytes32"], ["\x19Ethereum Signed Message:\n32", msg_hash]
)
def add_ethereum_prefix_and_hash_msg(text):
"""
This method of adding the ethereum prefix seems to be used in web3.personal.sign/ecRecover.
:param text: str any str to be signed / used in recovering address from a signature
:return: hash of prefixed text according to the recommended ethereum prefix
"""
prefixed_msg = f"\x19Ethereum Signed Message:\n{len(text)}{text}"
return Web3.sha3(text=prefixed_msg)
def get_public_key_from_address(web3, account):
"""
:param web3:
:param account:
:return:
"""
_hash = web3.sha3(text="verify signature.")
signature = web3.personal.sign(_hash, account.address, account.password)
signature = split_signature(web3, web3.toBytes(hexstr=signature))
signature_vrs = Signature(
signature.v % 27, big_endian_to_int(signature.r), big_endian_to_int(signature.s)
)
prefixed_hash = prepare_prefixed_hash(_hash)
pub_key = KeyAPI.PublicKey.recover_from_msg_hash(
prefixed_hash, KeyAPI.Signature(vrs=signature_vrs)
)
assert (
pub_key.to_checksum_address() == account.address
), "recovered address does not match signing address."
return pub_key
def to_32byte_hex(web3, val):
"""
:param web3:
:param val:
:return:
"""
return web3.toBytes(val).rjust(32, b"\0")
def split_signature(web3, signature):
"""
:param web3:
:param signature: signed message hash, hex str
:return:
"""
assert len(signature) == 65, (
f"invalid signature, " f"expecting bytes of length 65, got {len(signature)}"
)
v = web3.toInt(signature[-1])
r = to_32byte_hex(web3, int.from_bytes(signature[:32], "big"))
s = to_32byte_hex(web3, int.from_bytes(signature[32:64], "big"))
if v != 27 and v != 28:
v = 27 + v % 2
return Signature(v, r, s)
def get_wallet(index):
name = "PARITY_ADDRESS" if not index else f"PARITY_ADDRESS{index}"
pswrd_name = "PARITY_PASSWORD" if not index else f"PARITY_PASSWORD{index}"
key_name = "PARITY_KEY" if not index else f"PARITY_KEY{index}"
encrypted_key_name = (
"PARITY_ENCRYPTED_KEY" if not index else f"PARITY_ENCRYPTED_KEY{index}"
)
keyfile_name = "PARITY_KEYFILE" if not index else f"PARITY_KEYFILE{index}"
address = os.getenv(name)
if not address:
return None
pswrd = os.getenv(pswrd_name)
key = os.getenv(key_name)
encr_key = os.getenv(encrypted_key_name)
key_file = os.getenv(keyfile_name)
if key_file and not encr_key:
with open(key_file) as _file:
encr_key = json.loads(_file.read())
from ocean_lib.web3_internal.wallet import Wallet
return Wallet(
Web3Provider.get_web3(),
private_key=key,
encrypted_key=encr_key,
address=Web3.toChecksumAddress(address),
password=pswrd,
)
def privateKeyToAddress(private_key: str) -> str:
return eth_account.Account().privateKeyToAccount(private_key).address
def privateKeyToPublicKey(private_key: str):
private_key_bytes = eth_utils.decode_hex(private_key)
private_key_object = eth_keys.keys.PrivateKey(private_key_bytes)
return private_key_object.public_key
| [((16, 12, 16, 52), 'collections.namedtuple', 'namedtuple', ({(16, 23, 16, 34): '"""Signature"""', (16, 36, 16, 51): "('v', 'r', 's')"}, {}), "('Signature', ('v', 'r', 's'))", False, 'from collections import namedtuple\n'), ((18, 9, 18, 36), 'logging.getLogger', 'logging.getLogger', ({(18, 27, 18, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((32, 11, 32, 43), 'web3.Web3.soliditySha3', 'Web3.soliditySha3', ({(32, 29, 32, 34): 'types', (32, 36, 32, 42): 'values'}, {}), '(types, values)', False, 'from web3 import Web3\n'), ((54, 11, 54, 39), 'web3.Web3.sha3', 'Web3.sha3', (), '', False, 'from web3 import Web3\n'), ((118, 14, 118, 29), 'os.getenv', 'os.getenv', ({(118, 24, 118, 28): 'name'}, {}), '(name)', False, 'import os\n'), ((122, 12, 122, 33), 'os.getenv', 'os.getenv', ({(122, 22, 122, 32): 'pswrd_name'}, {}), '(pswrd_name)', False, 'import os\n'), ((123, 10, 123, 29), 'os.getenv', 'os.getenv', ({(123, 20, 123, 28): 'key_name'}, {}), '(key_name)', False, 'import os\n'), ((124, 15, 124, 44), 'os.getenv', 'os.getenv', ({(124, 25, 124, 43): 'encrypted_key_name'}, {}), '(encrypted_key_name)', False, 'import os\n'), ((125, 15, 125, 38), 'os.getenv', 'os.getenv', ({(125, 25, 125, 37): 'keyfile_name'}, {}), '(keyfile_name)', False, 'import os\n'), ((146, 24, 146, 57), 'eth_utils.decode_hex', 'eth_utils.decode_hex', ({(146, 45, 146, 56): 'private_key'}, {}), '(private_key)', False, 'import eth_utils\n'), ((147, 25, 147, 68), 'eth_keys.keys.PrivateKey', 'eth_keys.keys.PrivateKey', ({(147, 50, 147, 67): 'private_key_bytes'}, {}), '(private_key_bytes)', False, 'import eth_keys\n'), ((68, 26, 68, 56), 'eth_utils.big_endian_to_int', 'big_endian_to_int', ({(68, 44, 68, 55): 'signature.r'}, {}), '(signature.r)', False, 'from eth_utils import big_endian_to_int\n'), ((68, 58, 68, 88), 'eth_utils.big_endian_to_int', 'big_endian_to_int', ({(68, 76, 68, 87): 'signature.s'}, {}), '(signature.s)', False, 'from eth_utils import big_endian_to_int\n'), ((72, 23, 72, 58), 'eth_keys.KeyAPI.Signature', 'KeyAPI.Signature', (), '', False, 'from eth_keys import KeyAPI\n'), ((133, 8, 133, 31), 'ocean_lib.web3_internal.web3_provider.Web3Provider.get_web3', 'Web3Provider.get_web3', ({}, {}), '()', False, 'from ocean_lib.web3_internal.web3_provider import Web3Provider\n'), ((136, 16, 136, 47), 'web3.Web3.toChecksumAddress', 'Web3.toChecksumAddress', ({(136, 39, 136, 46): 'address'}, {}), '(address)', False, 'from web3 import Web3\n'), ((142, 11, 142, 32), 'eth_account.Account', 'eth_account.Account', ({}, {}), '()', False, 'import eth_account\n')] |
JimmyLamothe/autofront | autofront/__init__.py | d179e54411f5d53046a5fa52b4430e09b01ebaca | import autofront.autofront as autofront
import autofront.utilities as utilities
initialize = autofront.initialize
add = autofront.add
run = autofront.run
get_display = utilities.get_display
| [] |
ketsonroberto/PBDO | src/main.py | cdc1c5275bc17753be5c06a216f92391b6f1f1ab | # THIS IS A FILE TO TEST THE CODE. DO NOT USE IT AS PART OF THE CODE.
import matplotlib.pyplot as plt
import numpy as np
from StochasticMechanics import Stochastic
from scipy.optimize import minimize
from Performance import PerformanceOpt
from Hazards import Stationary
from Building import *
from BuildingProperties import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import optimize
freq = np.linspace(0.00001, 20, 500)
gamma = np.ones((ndof)) * [0.5]
nu = np.ones((ndof)) * [0.5]
alpha = np.ones((ndof)) * [1]
m = np.ones((ndof)) * [1]
c = np.ones((ndof)) * [1]
k = np.ones((ndof)) * [200]
a = np.ones((ndof)) * [0.8] #0.01
ksi = np.ones((ndof)) * [0.05]
# ksi = [0.05, 0.05]
im_max = 30
B_max = 1
# S1 = np.ones(ndof)
# Ps = Stationary(power_spectrum_object='white_noise', ndof=ndof)
# power_spectrum = Ps.power_spectrum_excitation(freq=freq, S0=S1)
# Von Karman
Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof)
power_spectrum, U = Ps.power_spectrum_excitation(u10=6.2371, freq=freq, z=z)
# plt.semilogy(freq/(2*np.pi), power_spectrum[:,0])
# plt.show()
# columns["area"] = 0.001
# columns.update({"area": 0.001})
ks = []
ms = []
msf = []
#cost = []
nlc = 100
lc = np.linspace(0.05, 2, nlc)
# fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
# fig.suptitle('Mass and Stiffness')
# ax1.plot(lc,ms)
# ax1.plot(lc,msf)
# ax2.plot(lc,ks)
# ax3.plot(ks,cost)
# plt.show()
columns = update_columns(columns=columns, lx=0.4, ly=0.4)
Building = Structure(building, columns, slabs, core, concrete, steel)
k_story = Building.stiffness_story()
m_story = Building.mass_storey(top_story=False)
m_story_f = Building.mass_storey(top_story=True)
k = np.ones(ndof) * [k_story]
m = np.ones(ndof) * [m_story]
m[-1] = m_story_f
length = 0.3
size_col = np.ones(ndof) * [length]
Sto = Stochastic(power_spectrum=power_spectrum, model='bouc_wen', ndof=ndof, freq=freq)
#Opt = PerformanceOpt(power_spectrum=power_spectrum, model='bouc_wen', freq=freq, tol=1e-5, maxiter=100,
# design_life=1) # design_life = 50
# total_cost = Opt.objective_function(size_col=size_col, ksi=ksi, im_max=im_max, B_max=B_max, gamma=gamma, nu=nu,
# alpha=alpha, a=a)
#CostFailure = Costs(building=building, columns=columns, slabs=slabs, core=core, concrete=concrete,
# steel=steel, cost=cost)
#size_col = np.ones(ndof) * [0.5]
#size_col = np.array([1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
#size_col = np.array([0.1, 0.2, 0.3])
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
sizea = 0.1
sizeb = 1
wa = 0.1
wb=100
npar = 10
nw = 10
X = np.zeros((npar * nw, 3 * ndof + 1))
y = np.zeros((npar * nw, 2 * ndof))
ct=0
ct1=0
for kk in range(npar):
size_col = sizea+(sizeb-sizea)*np.random.rand(ndof)
M, C, K, m, c, k = Sto.get_MCK(size_col=size_col, args=args, columns=columns)
for i in range(nw):
im = wa + (wb - wa) * np.random.rand(1)[0]
idd = 0
for j in np.arange(0, 3 * ndof, 3):
X[ct, j] = m[idd]
X[ct, j + 1] = c[idd]
X[ct, j + 2] = k[idd]
idd = idd + 1
X[ct, -1] = im
ct = ct + 1
Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof)
power_spectrum, ub = Ps.power_spectrum_excitation(u10=im, freq=freq, z=z)
Var, Vard = Sto.statistical_linearization(M=M, C=C, K=K, power_sp=power_spectrum, tol=0.01, maxiter=100,
gamma=gamma, nu=nu, alpha=alpha, a=a)
idd = 0
for j in np.arange(0, 2 * ndof, 2):
y[ct1, j] = Var[idd][0]
y[ct1, j + 1] = Vard[idd][0]
idd = idd + 1
ct1 = ct1 + 1
print(np.shape(y))
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels_U = [None,
ConstantKernel(1.0, (1e-4, 1e4)) * RBF(1, (1e-4, 1e4)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=1,
length_scale_bounds=(1.0e-5, 100.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, nu=1.5)]
gp = GaussianProcessRegressor(kernel=kernels_U[0], n_restarts_optimizer=10, normalize_y=False)
gp.fit(X, y)
r2 = gp.score(X, y)
print(r2)
yp = gp.predict(np.array(X[2].reshape(1, -1)))
val = X[2]
val[-1]=100.0
print(val)
yp = gp.predict(val.reshape(1, -1))
print(yp)
#print(np.shape(X))
#print(np.shape(y))
#nn_architecture = [
# {"input_dim": 10, "output_dim": 25, "activation": "relu"},
# {"input_dim": 25, "output_dim": 50, "activation": "relu"},
# {"input_dim": 50, "output_dim": 50, "activation": "relu"},
# {"input_dim": 50, "output_dim": 25, "activation": "relu"},
# {"input_dim": 25, "output_dim": 6, "activation": "relu"},
#]
#from neural import NeuralNets
#from sklearn.model_selection import train_test_split
#NN = NeuralNets(nn_architecture)
#TEST_SIZE = 0.1
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=132)
##print(X_train)
#params_values, cost_history = NN.train(X=np.transpose(X_train), Y=np.transpose(y_train), epochs=1000,
# learning_rate=1, verbose=True)
"""
b0 = np.linspace(0.1, 0.5, 20)
cost_f = []
cost_i = []
cost_t = []
mm = []
pp = []
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
for i in range(len(b0)):
Cf = CostFailure.cost_damage(b=b0[i], col_size=size_col[0], L=columns["height"], ncolumns=columns["quantity"],
dry_wall_area=dry_wall_area)
Ci = CostFailure.initial_cost_stiffness(col_size=b0[i], par0=25.55133, par1=0.33127)
scol = np.array([b0[i], b0[i]])
Ct = Opt.objective_function(size_col=scol, args=args)
#mom, phi = Building.compression(col_size=b0[i], L=columns["height"])
cost_f.append(Cf)
cost_i.append(Ci)
cost_t.append(Ct)
fig = plt.figure()
plt.plot(b0, cost_t,'-o')
plt.show()
#fig = plt.figure()
#plt.plot(phi, mom,'-o')
#plt.show()
"""
"""
b0 = np.linspace(0.05,0.5,5)
b1 = np.linspace(0.05,0.5,5)
B0, B1 = np.meshgrid(b0, b1)
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
tc = np.zeros((5, 5))
for i in range(len(b0)):
print(i)
for j in range(len(b1)):
size_col = np.array([b0[i], b1[j]])
resp = Opt.objective_function(size_col=size_col, args=args)
tc[i,j] = resp
Z = tc.reshape(B0.shape)
Z = np.array(Z)
nd = np.unravel_index(np.argmin(Z, axis=None), Z.shape)
print([B0[nd], B1[nd]])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(B0, B1, np.log(Z), cmap=plt.cm.get_cmap('plasma'),linewidth=0, antialiased=False)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
"""
#size_col = np.ones(ndof) * [0.2]
#args=[ksi, im_max, B_max, gamma, nu, alpha, a]
##args = {"ksi": ksi, "im_max": im_max, "B_max": B_max, "gamma": gamma, "nu": nu, "alpha": alpha, "a": a}
#bnds = []
#for i in range(ndof):
# bnds.append((0.1, 1))
#bnds=tuple(bnds)
###from scipy import optimize
###res = optimize.fmin(Opt.objective_function, x0=size_col)
#res = minimize(Opt.objective_function, x0=size_col, args=args, bounds=bnds)
###from scipy.optimize import basinhopping
###minimizer_kwargs = {"method": "BFGS", "args": args}
###ret = basinhopping(Opt.objective_function, x0=size_col, minimizer_kwargs=minimizer_kwargs, niter=200)
#print(res)
### Global methods.
###from scipy.optimize import rosen, shgo
###from scipy.optimize import dual_annealing
###ret = dual_annealing(Opt.objective_function, bounds=bnds)
###print((ret.x, ret.fun))
#c = Opt.linear_damping(m=m, k=k, ksi=ksi)
#M, C, K = Opt.create_mck(m=m, c=c, k=k, gamma=gamma, nu=nu, alpha=alpha, a=a)
#financial_loss_rate = Opt.stochastic_financial_loss(M=M, C=C, K=K, stiff=k, im_max=im_max,
# B_max=B_max, size_col=size_col, Nim=1, NB=1, gamma=gamma, nu=nu,
# alpha=alpha, a=a)
| [((15, 7, 15, 36), 'numpy.linspace', 'np.linspace', ({(15, 19, 15, 26): '1e-05', (15, 28, 15, 30): '20', (15, 32, 15, 35): '500'}, {}), '(1e-05, 20, 500)', True, 'import numpy as np\n'), ((36, 5, 36, 59), 'Hazards.Stationary', 'Stationary', (), '', False, 'from Hazards import Stationary\n'), ((50, 5, 50, 30), 'numpy.linspace', 'np.linspace', ({(50, 17, 50, 21): '0.05', (50, 23, 50, 24): '2', (50, 26, 50, 29): 'nlc'}, {}), '(0.05, 2, nlc)', True, 'import numpy as np\n'), ((74, 6, 74, 87), 'StochasticMechanics.Stochastic', 'Stochastic', (), '', False, 'from StochasticMechanics import Stochastic\n'), ((97, 4, 97, 39), 'numpy.zeros', 'np.zeros', ({(97, 13, 97, 38): '(npar * nw, 3 * ndof + 1)'}, {}), '((npar * nw, 3 * ndof + 1))', True, 'import numpy as np\n'), ((98, 4, 98, 35), 'numpy.zeros', 'np.zeros', ({(98, 13, 98, 34): '(npar * nw, 2 * ndof)'}, {}), '((npar * nw, 2 * ndof))', True, 'import numpy as np\n'), ((151, 5, 151, 94), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', (), '', False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((17, 8, 17, 23), 'numpy.ones', 'np.ones', ({(17, 17, 17, 21): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((18, 5, 18, 20), 'numpy.ones', 'np.ones', ({(18, 14, 18, 18): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((19, 8, 19, 23), 'numpy.ones', 'np.ones', ({(19, 17, 19, 21): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((21, 4, 21, 19), 'numpy.ones', 'np.ones', ({(21, 13, 21, 17): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((22, 4, 22, 19), 'numpy.ones', 'np.ones', ({(22, 13, 22, 17): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((23, 4, 23, 19), 'numpy.ones', 'np.ones', ({(23, 13, 23, 17): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((24, 4, 24, 19), 'numpy.ones', 'np.ones', ({(24, 13, 24, 17): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((25, 6, 25, 21), 'numpy.ones', 'np.ones', ({(25, 15, 25, 19): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((67, 4, 67, 17), 'numpy.ones', 'np.ones', ({(67, 12, 67, 16): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((68, 4, 68, 17), 'numpy.ones', 'np.ones', ({(68, 12, 68, 16): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((72, 11, 72, 24), 'numpy.ones', 'np.ones', ({(72, 19, 72, 23): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((134, 6, 134, 17), 'numpy.shape', 'np.shape', ({(134, 15, 134, 16): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((110, 17, 110, 42), 'numpy.arange', 'np.arange', ({(110, 27, 110, 28): '(0)', (110, 30, 110, 38): '(3 * ndof)', (110, 40, 110, 41): '(3)'}, {}), '(0, 3 * ndof, 3)', True, 'import numpy as np\n'), ((119, 13, 119, 67), 'Hazards.Stationary', 'Stationary', (), '', False, 'from Hazards import Stationary\n'), ((126, 17, 126, 42), 'numpy.arange', 'np.arange', ({(126, 27, 126, 28): '(0)', (126, 30, 126, 38): '(2 * ndof)', (126, 40, 126, 41): '(2)'}, {}), '(0, 2 * ndof, 2)', True, 'import numpy as np\n'), ((142, 13, 142, 45), 'sklearn.gaussian_process.kernels.ConstantKernel', 'ConstantKernel', ({(142, 28, 142, 31): '(1.0)', (142, 33, 142, 44): '(0.0001, 10000.0)'}, {}), '(1.0, (0.0001, 10000.0))', False, 'from sklearn.gaussian_process.kernels import RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel\n'), ((142, 48, 142, 67), 'sklearn.gaussian_process.kernels.RBF', 'RBF', ({(142, 52, 142, 53): '(1)', (142, 55, 142, 66): '(0.0001, 10000.0)'}, {}), '(1, (0.0001, 10000.0))', False, 'from sklearn.gaussian_process.kernels import RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel\n'), ((143, 17, 143, 63), 'sklearn.gaussian_process.kernels.RationalQuadratic', 'RationalQuadratic', (), '', False, 'from sklearn.gaussian_process.kernels import RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel\n'), ((144, 17, 146, 63), 'sklearn.gaussian_process.kernels.ExpSineSquared', 'ExpSineSquared', (), '', False, 'from sklearn.gaussian_process.kernels import RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel\n'), ((147, 11, 147, 44), 'sklearn.gaussian_process.kernels.ConstantKernel', 'ConstantKernel', ({(147, 26, 147, 29): '(0.1)', (147, 31, 147, 43): '(0.01, 10.0)'}, {}), '(0.1, (0.01, 10.0))', False, 'from sklearn.gaussian_process.kernels import RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel\n'), ((149, 17, 149, 49), 'sklearn.gaussian_process.kernels.Matern', 'Matern', (), '', False, 'from sklearn.gaussian_process.kernels import RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel\n'), ((102, 35, 102, 55), 'numpy.random.rand', 'np.random.rand', ({(102, 50, 102, 54): 'ndof'}, {}), '(ndof)', True, 'import numpy as np\n'), ((148, 14, 148, 65), 'sklearn.gaussian_process.kernels.DotProduct', 'DotProduct', (), '', False, 'from sklearn.gaussian_process.kernels import RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel\n'), ((107, 30, 107, 47), 'numpy.random.rand', 'np.random.rand', ({(107, 45, 107, 46): '(1)'}, {}), '(1)', True, 'import numpy as np\n')] |
erelcan/categorical-embedder | categorical_embedder/embedders/core/aux/custom_object_handler.py | 376b8779500af2aa459c879f8e525f2ef25d6b31 | from categorical_embedder.embedders.core.aux.custom_layers import get_custom_layer_class
from categorical_embedder.embedders.core.aux.loss_factory import get_loss_function
def prepare_custom_objects(custom_object_info):
custom_objects = {}
custom_objects.update(_prepare_custom_layers(custom_object_info["layer_info"]))
if not custom_object_info["has_implicit_loss"]:
custom_objects.update(_prepare_custom_loss(custom_object_info["loss_info"]))
return custom_objects
def _prepare_custom_layers(layer_info):
custom_layers = {}
for layer_name in layer_info:
custom_layers[layer_name] = get_custom_layer_class(layer_name)
return custom_layers
def _prepare_custom_loss(loss_info):
return {"loss": get_loss_function(loss_info)}
| [((16, 36, 16, 70), 'categorical_embedder.embedders.core.aux.custom_layers.get_custom_layer_class', 'get_custom_layer_class', ({(16, 59, 16, 69): 'layer_name'}, {}), '(layer_name)', False, 'from categorical_embedder.embedders.core.aux.custom_layers import get_custom_layer_class\n'), ((21, 20, 21, 48), 'categorical_embedder.embedders.core.aux.loss_factory.get_loss_function', 'get_loss_function', ({(21, 38, 21, 47): 'loss_info'}, {}), '(loss_info)', False, 'from categorical_embedder.embedders.core.aux.loss_factory import get_loss_function\n')] |
charliebr30/osprofiler | osprofiler/cmd/shell.py | cffca4e29e373e3f09f2ffdd458761183a851569 | # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command-line interface to the OpenStack Profiler.
"""
import argparse
import inspect
import sys
from oslo_config import cfg
import osprofiler
from osprofiler.cmd import cliutils
from osprofiler.cmd import commands
from osprofiler import exc
from osprofiler import opts
class OSProfilerShell(object):
def __init__(self, argv):
args = self._get_base_parser().parse_args(argv)
opts.set_defaults(cfg.CONF)
if not (args.os_auth_token and args.ceilometer_url):
if not args.os_username:
raise exc.CommandError(
"You must provide a username via either --os-username or "
"via env[OS_USERNAME]")
if not args.os_password:
raise exc.CommandError(
"You must provide a password via either --os-password or "
"via env[OS_PASSWORD]")
if self._no_project_and_domain_set(args):
# steer users towards Keystone V3 API
raise exc.CommandError(
"You must provide a project_id via either --os-project-id "
"or via env[OS_PROJECT_ID] and a domain_name via either "
"--os-user-domain-name or via env[OS_USER_DOMAIN_NAME] or "
"a domain_id via either --os-user-domain-id or via "
"env[OS_USER_DOMAIN_ID]")
if not args.os_auth_url:
raise exc.CommandError(
"You must provide an auth url via either --os-auth-url or "
"via env[OS_AUTH_URL]")
args.func(args)
def _get_base_parser(self):
parser = argparse.ArgumentParser(
prog="osprofiler",
description=__doc__.strip(),
add_help=True
)
parser.add_argument("-v", "--version",
action="version",
version=osprofiler.__version__)
self._append_ceilometer_args(parser)
self._append_identity_args(parser)
self._append_subcommands(parser)
return parser
def _append_ceilometer_args(self, parent_parser):
parser = parent_parser.add_argument_group("ceilometer")
parser.add_argument(
"--ceilometer-url", default=cliutils.env("CEILOMETER_URL"),
help="Defaults to env[CEILOMETER_URL].")
parser.add_argument(
"--ceilometer-api-version",
default=cliutils.env("CEILOMETER_API_VERSION", default="2"),
help="Defaults to env[CEILOMETER_API_VERSION] or 2.")
def _append_identity_args(self, parent_parser):
# FIXME(fabgia): identity related parameters should be passed by the
# Keystone client itself to avoid constant update in all the services
# clients. When this fix is merged this method can be made obsolete.
# Bug: https://bugs.launchpad.net/python-keystoneclient/+bug/1332337
parser = parent_parser.add_argument_group("identity")
parser.add_argument("-k", "--insecure",
default=False,
action="store_true",
help="Explicitly allow osprofiler to "
"perform \"insecure\" SSL (https) requests. "
"The server's certificate will "
"not be verified against any certificate "
"authorities. This option should be used with "
"caution.")
# User related options
parser.add_argument("--os-username",
default=cliutils.env("OS_USERNAME"),
help="Defaults to env[OS_USERNAME].")
parser.add_argument("--os-user-id",
default=cliutils.env("OS_USER_ID"),
help="Defaults to env[OS_USER_ID].")
parser.add_argument("--os-password",
default=cliutils.env("OS_PASSWORD"),
help="Defaults to env[OS_PASSWORD].")
# Domain related options
parser.add_argument("--os-user-domain-id",
default=cliutils.env("OS_USER_DOMAIN_ID"),
help="Defaults to env[OS_USER_DOMAIN_ID].")
parser.add_argument("--os-user-domain-name",
default=cliutils.env("OS_USER_DOMAIN_NAME"),
help="Defaults to env[OS_USER_DOMAIN_NAME].")
parser.add_argument("--os-project-domain-id",
default=cliutils.env("OS_PROJECT_DOMAIN_ID"),
help="Defaults to env[OS_PROJECT_DOMAIN_ID].")
parser.add_argument("--os-project-domain-name",
default=cliutils.env("OS_PROJECT_DOMAIN_NAME"),
help="Defaults to env[OS_PROJECT_DOMAIN_NAME].")
# Project V3 or Tenant V2 related options
parser.add_argument("--os-project-id",
default=cliutils.env("OS_PROJECT_ID"),
help="Another way to specify tenant ID. "
"This option is mutually exclusive with "
" --os-tenant-id. "
"Defaults to env[OS_PROJECT_ID].")
parser.add_argument("--os-project-name",
default=cliutils.env("OS_PROJECT_NAME"),
help="Another way to specify tenant name. "
"This option is mutually exclusive with "
" --os-tenant-name. "
"Defaults to env[OS_PROJECT_NAME].")
parser.add_argument("--os-tenant-id",
default=cliutils.env("OS_TENANT_ID"),
help="This option is mutually exclusive with "
" --os-project-id. "
"Defaults to env[OS_PROJECT_ID].")
parser.add_argument("--os-tenant-name",
default=cliutils.env("OS_TENANT_NAME"),
help="Defaults to env[OS_TENANT_NAME].")
# Auth related options
parser.add_argument("--os-auth-url",
default=cliutils.env("OS_AUTH_URL"),
help="Defaults to env[OS_AUTH_URL].")
parser.add_argument("--os-auth-token",
default=cliutils.env("OS_AUTH_TOKEN"),
help="Defaults to env[OS_AUTH_TOKEN].")
parser.add_argument("--os-cacert",
metavar="<ca-certificate-file>",
dest="os_cacert",
default=cliutils.env("OS_CACERT"),
help="Path of CA TLS certificate(s) used to verify"
" the remote server\"s certificate. Without this "
"option ceilometer looks for the default system CA"
" certificates.")
parser.add_argument("--os-cert",
help="Path of certificate file to use in SSL "
"connection. This file can optionally be "
"prepended with the private key.")
parser.add_argument("--os-key",
help="Path of client key to use in SSL "
"connection. This option is not necessary "
"if your key is prepended to your cert file.")
# Service Catalog related options
parser.add_argument("--os-service-type",
default=cliutils.env("OS_SERVICE_TYPE"),
help="Defaults to env[OS_SERVICE_TYPE].")
parser.add_argument("--os-endpoint-type",
default=cliutils.env("OS_ENDPOINT_TYPE"),
help="Defaults to env[OS_ENDPOINT_TYPE].")
parser.add_argument("--os-region-name",
default=cliutils.env("OS_REGION_NAME"),
help="Defaults to env[OS_REGION_NAME].")
def _append_subcommands(self, parent_parser):
subcommands = parent_parser.add_subparsers(help="<subcommands>")
for group_cls in commands.BaseCommand.__subclasses__():
group_parser = subcommands.add_parser(group_cls.group_name)
subcommand_parser = group_parser.add_subparsers()
for name, callback in inspect.getmembers(
group_cls(), predicate=inspect.ismethod):
command = name.replace("_", "-")
desc = callback.__doc__ or ""
help_message = desc.strip().split("\n")[0]
arguments = getattr(callback, "arguments", [])
command_parser = subcommand_parser.add_parser(
command, help=help_message, description=desc)
for (args, kwargs) in arguments:
command_parser.add_argument(*args, **kwargs)
command_parser.set_defaults(func=callback)
def _no_project_and_domain_set(self, args):
if not (args.os_project_id or (args.os_project_name and
(args.os_user_domain_name or args.os_user_domain_id)) or
(args.os_tenant_id or args.os_tenant_name)):
return True
else:
return False
def main(args=None):
if args is None:
args = sys.argv[1:]
try:
OSProfilerShell(args)
except exc.CommandError as e:
print(e.message)
return 1
if __name__ == "__main__":
main()
| [((38, 8, 38, 35), 'osprofiler.opts.set_defaults', 'opts.set_defaults', ({(38, 26, 38, 34): 'cfg.CONF'}, {}), '(cfg.CONF)', False, 'from osprofiler import opts\n'), ((208, 25, 208, 62), 'osprofiler.cmd.commands.BaseCommand.__subclasses__', 'commands.BaseCommand.__subclasses__', ({}, {}), '()', False, 'from osprofiler.cmd import commands\n'), ((42, 22, 44, 43), 'osprofiler.exc.CommandError', 'exc.CommandError', ({(43, 20, 44, 42): '"""You must provide a username via either --os-username or via env[OS_USERNAME]"""'}, {}), "(\n 'You must provide a username via either --os-username or via env[OS_USERNAME]'\n )", False, 'from osprofiler import exc\n'), ((47, 22, 49, 43), 'osprofiler.exc.CommandError', 'exc.CommandError', ({(48, 20, 49, 42): '"""You must provide a password via either --os-password or via env[OS_PASSWORD]"""'}, {}), "(\n 'You must provide a password via either --os-password or via env[OS_PASSWORD]'\n )", False, 'from osprofiler import exc\n'), ((53, 22, 58, 45), 'osprofiler.exc.CommandError', 'exc.CommandError', ({(54, 20, 58, 44): '"""You must provide a project_id via either --os-project-id or via env[OS_PROJECT_ID] and a domain_name via either --os-user-domain-name or via env[OS_USER_DOMAIN_NAME] or a domain_id via either --os-user-domain-id or via env[OS_USER_DOMAIN_ID]"""'}, {}), "(\n 'You must provide a project_id via either --os-project-id or via env[OS_PROJECT_ID] and a domain_name via either --os-user-domain-name or via env[OS_USER_DOMAIN_NAME] or a domain_id via either --os-user-domain-id or via env[OS_USER_DOMAIN_ID]'\n )", False, 'from osprofiler import exc\n'), ((61, 22, 63, 43), 'osprofiler.exc.CommandError', 'exc.CommandError', ({(62, 20, 63, 42): '"""You must provide an auth url via either --os-auth-url or via env[OS_AUTH_URL]"""'}, {}), "(\n 'You must provide an auth url via either --os-auth-url or via env[OS_AUTH_URL]'\n )", False, 'from osprofiler import exc\n'), ((87, 40, 87, 70), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(87, 53, 87, 69): '"""CEILOMETER_URL"""'}, {}), "('CEILOMETER_URL')", False, 'from osprofiler.cmd import cliutils\n'), ((91, 20, 91, 71), 'osprofiler.cmd.cliutils.env', 'cliutils.env', (), '', False, 'from osprofiler.cmd import cliutils\n'), ((112, 36, 112, 63), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(112, 49, 112, 62): '"""OS_USERNAME"""'}, {}), "('OS_USERNAME')", False, 'from osprofiler.cmd import cliutils\n'), ((116, 36, 116, 62), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(116, 49, 116, 61): '"""OS_USER_ID"""'}, {}), "('OS_USER_ID')", False, 'from osprofiler.cmd import cliutils\n'), ((120, 36, 120, 63), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(120, 49, 120, 62): '"""OS_PASSWORD"""'}, {}), "('OS_PASSWORD')", False, 'from osprofiler.cmd import cliutils\n'), ((125, 36, 125, 69), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(125, 49, 125, 68): '"""OS_USER_DOMAIN_ID"""'}, {}), "('OS_USER_DOMAIN_ID')", False, 'from osprofiler.cmd import cliutils\n'), ((129, 36, 129, 71), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(129, 49, 129, 70): '"""OS_USER_DOMAIN_NAME"""'}, {}), "('OS_USER_DOMAIN_NAME')", False, 'from osprofiler.cmd import cliutils\n'), ((133, 36, 133, 72), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(133, 49, 133, 71): '"""OS_PROJECT_DOMAIN_ID"""'}, {}), "('OS_PROJECT_DOMAIN_ID')", False, 'from osprofiler.cmd import cliutils\n'), ((137, 36, 137, 74), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(137, 49, 137, 73): '"""OS_PROJECT_DOMAIN_NAME"""'}, {}), "('OS_PROJECT_DOMAIN_NAME')", False, 'from osprofiler.cmd import cliutils\n'), ((142, 36, 142, 65), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(142, 49, 142, 64): '"""OS_PROJECT_ID"""'}, {}), "('OS_PROJECT_ID')", False, 'from osprofiler.cmd import cliutils\n'), ((149, 36, 149, 67), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(149, 49, 149, 66): '"""OS_PROJECT_NAME"""'}, {}), "('OS_PROJECT_NAME')", False, 'from osprofiler.cmd import cliutils\n'), ((156, 36, 156, 64), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(156, 49, 156, 63): '"""OS_TENANT_ID"""'}, {}), "('OS_TENANT_ID')", False, 'from osprofiler.cmd import cliutils\n'), ((162, 36, 162, 66), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(162, 49, 162, 65): '"""OS_TENANT_NAME"""'}, {}), "('OS_TENANT_NAME')", False, 'from osprofiler.cmd import cliutils\n'), ((167, 36, 167, 63), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(167, 49, 167, 62): '"""OS_AUTH_URL"""'}, {}), "('OS_AUTH_URL')", False, 'from osprofiler.cmd import cliutils\n'), ((171, 36, 171, 65), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(171, 49, 171, 64): '"""OS_AUTH_TOKEN"""'}, {}), "('OS_AUTH_TOKEN')", False, 'from osprofiler.cmd import cliutils\n'), ((177, 36, 177, 61), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(177, 49, 177, 60): '"""OS_CACERT"""'}, {}), "('OS_CACERT')", False, 'from osprofiler.cmd import cliutils\n'), ((195, 36, 195, 67), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(195, 49, 195, 66): '"""OS_SERVICE_TYPE"""'}, {}), "('OS_SERVICE_TYPE')", False, 'from osprofiler.cmd import cliutils\n'), ((199, 36, 199, 68), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(199, 49, 199, 67): '"""OS_ENDPOINT_TYPE"""'}, {}), "('OS_ENDPOINT_TYPE')", False, 'from osprofiler.cmd import cliutils\n'), ((203, 36, 203, 66), 'osprofiler.cmd.cliutils.env', 'cliutils.env', ({(203, 49, 203, 65): '"""OS_REGION_NAME"""'}, {}), "('OS_REGION_NAME')", False, 'from osprofiler.cmd import cliutils\n')] |
patrickkwang/bmt-lite | bmt/util.py | bf97f6155702a8eb38daf5a45df34b0ce1cb1a4b | """Utilities."""
from functools import wraps
import re
from typing import Callable, List, Optional, TypeVar, Union
from .data import (
all_classes, all_slots,
)
def pascal_to_snake(s: str, sep: str = "_") -> str:
"""Convert Pascal case to snake case.
Assumes that
a) all words are either all-lowercase or all-uppercase
b) all 1-letter words are lowercase
c) there are no adjacent 1-letter words
d) there are no adjacent uppercase words
Examples:
PhenotypicFeature -> phenotypic_feature
RNAProduct -> RNA_product
FeedACamel -> feed_a_camel
Optionally specify `sep` (default "_").
"""
# add an underscore before each capital letter
underscored = re.sub(
r"(?<!^)(?=[A-Z])",
sep,
s,
)
# collapse any adjacent one-letter words
collapsed = re.sub(
r"(?<![a-zA-Z])[A-Z](?:_[A-Z](?=$|_))+",
lambda match: match.group(0).replace("_", ""),
underscored,
)
# lower-case any words containing only one uppercase letter
lowercased = re.sub(
r"(?<![A-Z])[A-Z](?![A-Z])",
lambda match: match.group(0).lower(),
collapsed,
)
return lowercased
def snake_to_pascal(s: str, sep: str = "_") -> str:
"""Convert snake case to Pascal case.
This is the inverse of pascal_to_snake() when its assumptions
are true.
Optionally specify `sep` (default "_").
"""
return re.sub(
fr"(?:^|{sep})([a-zA-Z])",
lambda match: match.group(1).upper(),
s
)
def guess_casing(s: str) -> str:
"""Guess snake case or Pascal case."""
if "_" in s:
return "snake"
if any(c.isupper() for c in s):
return "pascal"
return "snake"
def normalize(s: str) -> str:
"""Normalize string input."""
if s.startswith("biolink:"):
s = s[8:]
if "_" in s:
# it's snake case
return s.replace("_", " ")
if " " in s:
return s
return pascal_to_snake(s, " ")
T = TypeVar("T")
def listify(func: Callable) -> Callable:
"""Expand function to take list of arguments."""
@wraps(func)
def wrapper(arg: Union[T, List[T]], **kwargs) -> Union[T, List[T]]:
"""Apply function to each element in list."""
if isinstance(arg, list):
return [
func(el, **kwargs)
for el in arg
]
else:
return func(arg, **kwargs)
return wrapper
@listify
def format(s: str, case: Optional[str] = None, **kwargs) -> str:
"""Format space-case string as biolink CURIE."""
if isinstance(case, str) and case.lower() == "pascal":
return "biolink:" + snake_to_pascal(s, " ")
elif isinstance(case, str) and case.lower() == "snake":
return "biolink:" + s.replace(" ", "_")
else:
return "biolink:" + s
def with_formatting():
"""Add format conversions to method."""
def decorator(func: Callable) -> Callable:
"""Generate decorator."""
@wraps(func)
def wrapper(self, s: str, *args, formatted=False, **kwargs):
"""Wrap in format conversions."""
case = guess_casing(s)
normalized = normalize(s)
output: Union[str, List[str]] = func(self, normalized, *args, **kwargs)
if formatted:
if normalized in all_classes:
output = format(output, case="pascal")
elif normalized in all_slots:
output = format(output, case="snake")
else:
output = format(output, case=case)
return output
return wrapper
return decorator
| [((84, 4, 84, 16), 'typing.TypeVar', 'TypeVar', ({(84, 12, 84, 15): '"""T"""'}, {}), "('T')", False, 'from typing import Callable, List, Optional, TypeVar, Union\n'), ((28, 18, 32, 5), 're.sub', 're.sub', ({(29, 8, 29, 26): '"""(?<!^)(?=[A-Z])"""', (30, 8, 30, 11): 'sep', (31, 8, 31, 9): 's'}, {}), "('(?<!^)(?=[A-Z])', sep, s)", False, 'import re\n'), ((89, 5, 89, 16), 'functools.wraps', 'wraps', ({(89, 11, 89, 15): 'func'}, {}), '(func)', False, 'from functools import wraps\n'), ((117, 9, 117, 20), 'functools.wraps', 'wraps', ({(117, 15, 117, 19): 'func'}, {}), '(func)', False, 'from functools import wraps\n')] |
jlevitt/py-to-json | src/py_to_json/__init__.py | 26bb68926f5ada601e965f42980e438c9718be73 | #
# OMNIVORE CONFIDENTIAL
# __________________
#
# [2013] - [2019] Omnivore Technologies
# All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains
# the property of Omnivore Technologies and its suppliers,
# if any. The intellectual and technical concepts contained
# herein are proprietary to Omnivore Technologies
# and its suppliers and may be covered by U.S. and Foreign Patents,
# patents in process, and are protected by trade secret or copyright law.
# Dissemination of this information or reproduction of this material
# is strictly forbidden unless prior written permission is obtained
# from Omnivore Technologies.
#
| [] |
brettkoonce/sktime | sktime/utils/time_series.py | 6336247bad0dac8692aa4b911c267f401dea4163 | __author__ = ["Markus Löning"]
__all__ = [
"compute_relative_to_n_timepoints",
"time_series_slope",
"fit_trend",
"remove_trend",
"add_trend"
]
import numpy as np
from sklearn.utils import check_array
from sktime.utils.validation.forecasting import check_time_index
def compute_relative_to_n_timepoints(n_timepoints, n="sqrt"):
"""
Get number of intervals from number of time points for various allowed
input arguments.
Helpful to compute number of intervals relative to time series length,
e.g. using floats or functions.
Parameters
----------
n_timepoints : int
n : {int, float, str, callable}
Returns
-------
n_intervals_ : int
Computed number of intervals
"""
# check input: n_timepoints
if not np.issubdtype(type(n_timepoints), np.dtype(int).type):
raise ValueError(
f"`n_timepoints` must be an integer, but found: "
f"{type(n_timepoints)}")
if not n_timepoints >= 1:
raise ValueError(
f"`n_timepoints` must be >= 1, but found: {n_timepoints}")
# compute number of splits
allowed_strings = ["sqrt", "log"]
# integer
if np.issubdtype(type(n), np.dtype(int).type):
if not n <= n_timepoints:
raise ValueError(
f"If `n_intervals` is an integer, it must be smaller "
f"than `n_timepoints`, but found: `n_intervals`={n} "
f"and `n_timepoints`={n_timepoints}")
if n < 1:
raise ValueError(f"If `n_intervals` is an integer, "
f"`n_intervals` must be >= 1, but found: {n}")
n_intervals_ = n
# function
elif callable(n):
n_intervals_ = n(n_timepoints)
# string
elif isinstance(n, str):
if n not in allowed_strings:
raise ValueError(
f"If `n_intervals` is a string, `n_intervals` must be "
f"in {allowed_strings}, but found: {n}")
str_func_map = {
"sqrt": np.sqrt,
"log": np.log
}
func = str_func_map[n]
n_intervals_ = func(n_timepoints)
# float
elif isinstance(n, float):
if not (0 < n <= 1):
raise ValueError(
f"If `n_intervals` is a float, `n_intervals` must be > 0 "
f"and <= 1, but found: {n}")
n_intervals_ = n * n_timepoints
else:
raise ValueError(
f"`n_intervals` must be either one of the allowed string options "
f"in "
f"{allowed_strings}, an integer or a float number.")
# make sure n_intervals is an integer and there is at least one interval
n_intervals_ = np.maximum(1, np.int(n_intervals_))
return n_intervals_
def time_series_slope(y):
"""
Compute slope of time series (y) using ordinary least squares.
Parameters
----------
y : array_like
Time-series.
axis : int
Axis along which the time-series slope is computed.
Returns
-------
slope : float
Slope of time-series.
"""
y = np.asarray(y).ravel()
len_series = len(y)
if len_series < 2:
return 0
else:
x = np.arange(len_series) # time index
x_mean = (len_series - 1) / 2 # faster than x.mean()
return (np.mean(x * y) - x_mean * np.mean(y)) / (
np.mean(x ** 2) - x_mean ** 2)
def fit_trend(x, order=0):
"""Fit linear regression with polynomial terms of given order
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is fitted separately
order : int
The polynomial order of the trend, zero is constant (mean), one is
linear trend, two is quadratic trend, and so on.
Returns
-------
coefs : ndarray, shape=[n_samples, order + 1]
Fitted coefficients of polynomial order for each sample, one column
means order zero, two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
See Also
-------
add_trend
remove_trend
"""
x = check_array(x)
if order == 0:
coefs = np.mean(x, axis=1).reshape(-1, 1)
else:
n_obs = x.shape[1]
index = np.arange(n_obs)
poly_terms = np.vander(index, N=order + 1)
# linear least squares fitting using numpy's optimised routine,
# assuming samples in columns
# coefs = np.linalg.pinv(poly_terms).dot(x.T).T
coefs, _, _, _ = np.linalg.lstsq(poly_terms, x.T, rcond=None)
# returning fitted coefficients in expected format with samples in rows
coefs = coefs.T
return coefs
def remove_trend(x, coefs, time_index=None):
"""Remove trend from an array with a trend of given order along axis 0 or 1
Parameters
----------
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is de-trended separately
coefs : ndarray, shape=[n_samples, order + 1]
Fitted coefficients for each sample, single column means order zero,
two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
time_index : array-like, shape=[n_obs], optional (default=None)
Time series index for which to add the trend components
Returns
-------
xt : ndarray
The de-trended series is the residual of the linear regression of the
data on the trend of given order.
See Also
--------
fit_trend
add_trend
References
----------
Adapted from statsmodels (0.9.0), see
https://www.statsmodels.org/dev/_modules/statsmodels/tsa/tsatools.html
#detrend
"""
x = check_array(x)
# infer order from shape of given coefficients
order = coefs.shape[1] - 1
# special case, remove mean
if order == 0:
xt = x - coefs
return xt
else:
if time_index is None:
# if no time index is given, create range index
n_obs = x.shape[1]
time_index = np.arange(n_obs)
else:
# validate given time index
time_index = check_time_index(time_index)
if not len(time_index) == x.shape[1]:
raise ValueError(
'Length of passed index does not match length of passed x')
poly_terms = np.vander(time_index, N=order + 1)
xt = x - np.dot(poly_terms, coefs.T).T
return xt
def add_trend(x, coefs, time_index=None):
"""Add trend to array for given fitted coefficients along axis 0 or 1,
inverse function to `remove_trend()`
Parameters
----------
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is treated separately
coefs : array-like, shape=[n_samples, order + 1]
fitted coefficients of polynomial order for each sample, one column
means order zero, two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
time_index : array-like, shape=[n_obs], optional (default=None)
Time series index for which to add the trend components
Returns
-------
xt : ndarray
The series with added trend.
See Also
-------
fit_trend
remove_trend
"""
x = check_array(x)
# infer order from shape of given coefficients
order = coefs.shape[1] - 1
# special case, add mean
if order == 0:
xt = x + coefs
else:
if time_index is None:
n_obs = x.shape[1]
time_index = np.arange(n_obs)
else:
# validate given time index
time_index = check_time_index(time_index)
if not len(time_index) == x.shape[1]:
raise ValueError(
'Length of passed index does not match length of passed x')
poly_terms = np.vander(time_index, N=order + 1)
xt = x + np.dot(poly_terms, coefs.T).T
return xt
| [((142, 8, 142, 22), 'sklearn.utils.check_array', 'check_array', ({(142, 20, 142, 21): 'x'}, {}), '(x)', False, 'from sklearn.utils import check_array\n'), ((194, 8, 194, 22), 'sklearn.utils.check_array', 'check_array', ({(194, 20, 194, 21): 'x'}, {}), '(x)', False, 'from sklearn.utils import check_array\n'), ((247, 8, 247, 22), 'sklearn.utils.check_array', 'check_array', ({(247, 20, 247, 21): 'x'}, {}), '(x)', False, 'from sklearn.utils import check_array\n'), ((89, 33, 89, 53), 'numpy.int', 'np.int', ({(89, 40, 89, 52): 'n_intervals_'}, {}), '(n_intervals_)', True, 'import numpy as np\n'), ((115, 12, 115, 33), 'numpy.arange', 'np.arange', ({(115, 22, 115, 32): 'len_series'}, {}), '(len_series)', True, 'import numpy as np\n'), ((149, 16, 149, 32), 'numpy.arange', 'np.arange', ({(149, 26, 149, 31): 'n_obs'}, {}), '(n_obs)', True, 'import numpy as np\n'), ((150, 21, 150, 50), 'numpy.vander', 'np.vander', (), '', True, 'import numpy as np\n'), ((155, 25, 155, 69), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (), '', True, 'import numpy as np\n'), ((216, 21, 216, 55), 'numpy.vander', 'np.vander', (), '', True, 'import numpy as np\n'), ((269, 21, 269, 55), 'numpy.vander', 'np.vander', (), '', True, 'import numpy as np\n'), ((46, 30, 46, 43), 'numpy.dtype', 'np.dtype', ({(46, 39, 46, 42): 'int'}, {}), '(int)', True, 'import numpy as np\n'), ((109, 8, 109, 21), 'numpy.asarray', 'np.asarray', ({(109, 19, 109, 20): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((208, 25, 208, 41), 'numpy.arange', 'np.arange', ({(208, 35, 208, 40): 'n_obs'}, {}), '(n_obs)', True, 'import numpy as np\n'), ((211, 25, 211, 53), 'sktime.utils.validation.forecasting.check_time_index', 'check_time_index', ({(211, 42, 211, 52): 'time_index'}, {}), '(time_index)', False, 'from sktime.utils.validation.forecasting import check_time_index\n'), ((259, 25, 259, 41), 'numpy.arange', 'np.arange', ({(259, 35, 259, 40): 'n_obs'}, {}), '(n_obs)', True, 'import numpy as np\n'), ((263, 25, 263, 53), 'sktime.utils.validation.forecasting.check_time_index', 'check_time_index', ({(263, 42, 263, 52): 'time_index'}, {}), '(time_index)', False, 'from sktime.utils.validation.forecasting import check_time_index\n'), ((34, 45, 34, 58), 'numpy.dtype', 'np.dtype', ({(34, 54, 34, 57): 'int'}, {}), '(int)', True, 'import numpy as np\n'), ((117, 16, 117, 30), 'numpy.mean', 'np.mean', ({(117, 24, 117, 29): '(x * y)'}, {}), '(x * y)', True, 'import numpy as np\n'), ((118, 20, 118, 35), 'numpy.mean', 'np.mean', ({(118, 28, 118, 34): '(x ** 2)'}, {}), '(x ** 2)', True, 'import numpy as np\n'), ((145, 16, 145, 34), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((217, 17, 217, 44), 'numpy.dot', 'np.dot', ({(217, 24, 217, 34): 'poly_terms', (217, 36, 217, 43): 'coefs.T'}, {}), '(poly_terms, coefs.T)', True, 'import numpy as np\n'), ((270, 17, 270, 44), 'numpy.dot', 'np.dot', ({(270, 24, 270, 34): 'poly_terms', (270, 36, 270, 43): 'coefs.T'}, {}), '(poly_terms, coefs.T)', True, 'import numpy as np\n'), ((117, 42, 117, 52), 'numpy.mean', 'np.mean', ({(117, 50, 117, 51): 'y'}, {}), '(y)', True, 'import numpy as np\n')] |
Hanjun-Dai/sdvae | prog_vae/prog_encoder/prog_encoder.py | bd26ea949c496419634fd2cf4802fc8e19a9194c | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import csv
import numpy as np
import math
import random
from collections import defaultdict
import torch
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append( '%s/../prog_common' % os.path.dirname(os.path.realpath(__file__)) )
from prog_util import DECISION_DIM
from cmd_args import cmd_args
from pytorch_initializer import weights_init
sys.path.append( '%s/../cfg_parser' % os.path.dirname(os.path.realpath(__file__)) )
import cfg_parser as parser
class CNNEncoder(nn.Module):
def __init__(self, max_len, latent_dim):
super(CNNEncoder, self).__init__()
self.latent_dim = latent_dim
self.max_len = max_len
self.conv1 = nn.Conv1d(DECISION_DIM, cmd_args.c1, cmd_args.c1)
self.conv2 = nn.Conv1d(cmd_args.c1, cmd_args.c2, cmd_args.c2)
self.conv3 = nn.Conv1d(cmd_args.c2, cmd_args.c3, cmd_args.c3)
self.last_conv_size = max_len - cmd_args.c1 + 1 - cmd_args.c2 + 1 - cmd_args.c3 + 1
self.w1 = nn.Linear(self.last_conv_size * cmd_args.c3, cmd_args.dense)
self.mean_w = nn.Linear(cmd_args.dense, latent_dim)
self.log_var_w = nn.Linear(cmd_args.dense, latent_dim)
weights_init(self)
def forward(self, x_cpu):
if cmd_args.mode == 'cpu':
batch_input = Variable(torch.from_numpy(x_cpu))
else:
batch_input = Variable(torch.from_numpy(x_cpu).cuda())
h1 = self.conv1(batch_input)
h1 = F.relu(h1)
h2 = self.conv2(h1)
h2 = F.relu(h2)
h3 = self.conv3(h2)
h3 = F.relu(h3)
# h3 = torch.transpose(h3, 1, 2).contiguous()
flatten = h3.view(x_cpu.shape[0], -1)
h = self.w1(flatten)
h = F.relu(h)
z_mean = self.mean_w(h)
z_log_var = self.log_var_w(h)
return (z_mean, z_log_var)
if __name__ == '__main__':
pass
| [((33, 21, 33, 70), 'torch.nn.Conv1d', 'nn.Conv1d', ({(33, 31, 33, 43): 'DECISION_DIM', (33, 45, 33, 56): 'cmd_args.c1', (33, 58, 33, 69): 'cmd_args.c1'}, {}), '(DECISION_DIM, cmd_args.c1, cmd_args.c1)', True, 'import torch.nn as nn\n'), ((34, 21, 34, 69), 'torch.nn.Conv1d', 'nn.Conv1d', ({(34, 31, 34, 42): 'cmd_args.c1', (34, 44, 34, 55): 'cmd_args.c2', (34, 57, 34, 68): 'cmd_args.c2'}, {}), '(cmd_args.c1, cmd_args.c2, cmd_args.c2)', True, 'import torch.nn as nn\n'), ((35, 21, 35, 69), 'torch.nn.Conv1d', 'nn.Conv1d', ({(35, 31, 35, 42): 'cmd_args.c2', (35, 44, 35, 55): 'cmd_args.c3', (35, 57, 35, 68): 'cmd_args.c3'}, {}), '(cmd_args.c2, cmd_args.c3, cmd_args.c3)', True, 'import torch.nn as nn\n'), ((38, 18, 38, 78), 'torch.nn.Linear', 'nn.Linear', ({(38, 28, 38, 61): 'self.last_conv_size * cmd_args.c3', (38, 63, 38, 77): 'cmd_args.dense'}, {}), '(self.last_conv_size * cmd_args.c3, cmd_args.dense)', True, 'import torch.nn as nn\n'), ((39, 22, 39, 59), 'torch.nn.Linear', 'nn.Linear', ({(39, 32, 39, 46): 'cmd_args.dense', (39, 48, 39, 58): 'latent_dim'}, {}), '(cmd_args.dense, latent_dim)', True, 'import torch.nn as nn\n'), ((40, 25, 40, 62), 'torch.nn.Linear', 'nn.Linear', ({(40, 35, 40, 49): 'cmd_args.dense', (40, 51, 40, 61): 'latent_dim'}, {}), '(cmd_args.dense, latent_dim)', True, 'import torch.nn as nn\n'), ((41, 8, 41, 26), 'pytorch_initializer.weights_init', 'weights_init', ({(41, 21, 41, 25): 'self'}, {}), '(self)', False, 'from pytorch_initializer import weights_init\n'), ((50, 13, 50, 23), 'torch.nn.functional.relu', 'F.relu', ({(50, 20, 50, 22): 'h1'}, {}), '(h1)', True, 'import torch.nn.functional as F\n'), ((52, 13, 52, 23), 'torch.nn.functional.relu', 'F.relu', ({(52, 20, 52, 22): 'h2'}, {}), '(h2)', True, 'import torch.nn.functional as F\n'), ((54, 13, 54, 23), 'torch.nn.functional.relu', 'F.relu', ({(54, 20, 54, 22): 'h3'}, {}), '(h3)', True, 'import torch.nn.functional as F\n'), ((59, 12, 59, 21), 'torch.nn.functional.relu', 'F.relu', ({(59, 19, 59, 20): 'h'}, {}), '(h)', True, 'import torch.nn.functional as F\n'), ((19, 55, 19, 81), 'os.path.realpath', 'os.path.realpath', ({(19, 72, 19, 80): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((24, 54, 24, 80), 'os.path.realpath', 'os.path.realpath', ({(24, 71, 24, 79): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((45, 35, 45, 58), 'torch.from_numpy', 'torch.from_numpy', ({(45, 52, 45, 57): 'x_cpu'}, {}), '(x_cpu)', False, 'import torch\n'), ((47, 35, 47, 58), 'torch.from_numpy', 'torch.from_numpy', ({(47, 52, 47, 57): 'x_cpu'}, {}), '(x_cpu)', False, 'import torch\n')] |
JosephRedfern/VarienseVMU | pyvmu/messages.py | e27c05a83124e024cd049b10f7d682f7f41a5c73 | from collections import namedtuple
Accelerometer = namedtuple('Accelerometer', ["timestamp", "x", "y", "z"])
Magnetometer = namedtuple('Magnetometer', ['timestamp', 'x', 'y', 'z'])
Gyroscope = namedtuple('Gyroscope', ['timestamp', 'x', 'y', 'z'])
Euler = namedtuple('Euler', ['timestamp', 'x', 'y', 'z'])
Quaternion = namedtuple('Quaternion', ['timestamp', 'w', 'x', 'y', 'z'])
Heading = namedtuple('Heading', ['timestamp', 'h'])
Status = namedtuple('Status', ['magnetometer_enabled',
'gyroscope_enabled',
'accelerometer_enabled',
'gyroscope_resolution',
'accelerometer_resolution',
'low_output_rate',
'heading_streaming',
'euler_streaming',
'magnetometer_streaming',
'quaternions_streaming',
'gyroscope_streaming',
'accelerometer_streaming'])
| [((3, 16, 3, 73), 'collections.namedtuple', 'namedtuple', ({(3, 27, 3, 42): '"""Accelerometer"""', (3, 44, 3, 72): "['timestamp', 'x', 'y', 'z']"}, {}), "('Accelerometer', ['timestamp', 'x', 'y', 'z'])", False, 'from collections import namedtuple\n'), ((4, 15, 4, 71), 'collections.namedtuple', 'namedtuple', ({(4, 26, 4, 40): '"""Magnetometer"""', (4, 42, 4, 70): "['timestamp', 'x', 'y', 'z']"}, {}), "('Magnetometer', ['timestamp', 'x', 'y', 'z'])", False, 'from collections import namedtuple\n'), ((5, 12, 5, 65), 'collections.namedtuple', 'namedtuple', ({(5, 23, 5, 34): '"""Gyroscope"""', (5, 36, 5, 64): "['timestamp', 'x', 'y', 'z']"}, {}), "('Gyroscope', ['timestamp', 'x', 'y', 'z'])", False, 'from collections import namedtuple\n'), ((6, 8, 6, 57), 'collections.namedtuple', 'namedtuple', ({(6, 19, 6, 26): '"""Euler"""', (6, 28, 6, 56): "['timestamp', 'x', 'y', 'z']"}, {}), "('Euler', ['timestamp', 'x', 'y', 'z'])", False, 'from collections import namedtuple\n'), ((8, 13, 8, 72), 'collections.namedtuple', 'namedtuple', ({(8, 24, 8, 36): '"""Quaternion"""', (8, 38, 8, 71): "['timestamp', 'w', 'x', 'y', 'z']"}, {}), "('Quaternion', ['timestamp', 'w', 'x', 'y', 'z'])", False, 'from collections import namedtuple\n'), ((10, 10, 10, 51), 'collections.namedtuple', 'namedtuple', ({(10, 21, 10, 30): '"""Heading"""', (10, 32, 10, 50): "['timestamp', 'h']"}, {}), "('Heading', ['timestamp', 'h'])", False, 'from collections import namedtuple\n'), ((12, 9, 23, 58), 'collections.namedtuple', 'namedtuple', ({(12, 20, 12, 28): '"""Status"""', (12, 30, 23, 57): "['magnetometer_enabled', 'gyroscope_enabled', 'accelerometer_enabled',\n 'gyroscope_resolution', 'accelerometer_resolution', 'low_output_rate',\n 'heading_streaming', 'euler_streaming', 'magnetometer_streaming',\n 'quaternions_streaming', 'gyroscope_streaming', 'accelerometer_streaming']"}, {}), "('Status', ['magnetometer_enabled', 'gyroscope_enabled',\n 'accelerometer_enabled', 'gyroscope_resolution',\n 'accelerometer_resolution', 'low_output_rate', 'heading_streaming',\n 'euler_streaming', 'magnetometer_streaming', 'quaternions_streaming',\n 'gyroscope_streaming', 'accelerometer_streaming'])", False, 'from collections import namedtuple\n')] |
Pythobit/python-projects | scripts/Caesar-Cipher/CaesarCipher.py | 1a6ee3f0f417846626dfa021af49c999771a0199 | from __future__ import print_function
import os
import string
import argparse
try:
maketrans = string.maketrans # python2
except AttributeError:
maketrans = str.maketrans # python3
def caeser_cipher(string_: str, offset: int, decode: bool, file_: string) -> None:
"""Caeser Cipher implementation, reads file or string. Also decodes.
Default implementation is ROT13 encoding.
To decode, specify the same offset you used to encode and your ciphertext / file.
:param string_: string to encode / decode
:param offset: # of chars to rotate by
:param decode: decode instead of encode
:param file_: file to read in then encode/decode
"""
if file_ and os.path.exists(file_):
with open(file_, "r") as f:
string_ = f.read()
if decode:
offset *= -1
lower_offset_alphabet = (
string.ascii_lowercase[offset:] + string.ascii_lowercase[:offset]
)
lower_translation_table = maketrans(string.ascii_lowercase, lower_offset_alphabet)
upper_offset_alphabet = (
string.ascii_uppercase[offset:] + string.ascii_uppercase[:offset]
)
upper_translation_table = maketrans(string.ascii_uppercase, upper_offset_alphabet)
lower_converted = string_.translate(lower_translation_table)
final_converted = lower_converted.translate(upper_translation_table)
if file_:
extension = "dec" if decode else "enc"
with open("{}.{}".format(file_, extension), "w") as f:
print(final_converted, file=f)
else:
print(final_converted)
def check_offset_range(value: int) -> int:
"""Validates that value is in the allowable range.
:param value: integer to validate
:return: valid integer
:raises: argparse.ArgumentTypeError
"""
value = int(value)
if value < -25 or value > 25:
raise argparse.ArgumentTypeError("{} is an invalid offset".format(value))
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Simple Caeser Cipher Encoder and Decoder"
)
parser.add_argument(
"-d",
"--decode",
action="store_true",
dest="decode",
help="decode ciphertext (offset should equal what was used to encode)",
default=False,
)
parser.add_argument(
"-o",
"--offset",
dest="offset",
default=13,
type=check_offset_range,
help="number of characters to shift",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-f", "--file", dest="file", help="file to encode", default=None)
group.add_argument(
"-s", "--string", dest="string", help="string to encode", default=None
)
args = parser.parse_args()
caeser_cipher(args.string, args.offset, args.decode, args.file)
| [((62, 13, 64, 5), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((21, 17, 21, 38), 'os.path.exists', 'os.path.exists', ({(21, 32, 21, 37): 'file_'}, {}), '(file_)', False, 'import os\n')] |
BuildAMovement/whistler-kobocat | onadata/libs/permissions.py | 7f61dd0761bb0aa5b27c909bcff8c29453d3311d | from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from guardian.shortcuts import (
assign_perm,
remove_perm,
get_perms,
get_users_with_perms)
from onadata.apps.api.models import OrganizationProfile
from onadata.apps.main.models.user_profile import UserProfile
from onadata.apps.logger.models import XForm
from onadata.apps.api.models import Project
# Userprofile Permissions
CAN_ADD_USERPROFILE = 'add_userprofile'
CAN_CHANGE_USERPROFILE = 'change_userprofile'
CAN_DELETE_USERPROFILE = 'delete_userprofile'
CAN_ADD_XFORM_TO_PROFILE = 'can_add_xform'
CAN_VIEW_PROFILE = 'view_profile'
# Organization Permissions
CAN_VIEW_ORGANIZATION_PROFILE = 'view_organizationprofile'
CAN_ADD_ORGANIZATION_PROFILE = 'add_organizationprofile'
CAN_ADD_ORGANIZATION_XFORM = 'can_add_xform'
CAN_CHANGE_ORGANIZATION_PROFILE = 'change_organizationprofile'
CAN_DELETE_ORGANIZATION_PROFILE = 'delete_organizationprofile'
IS_ORGANIZATION_OWNER = 'is_org_owner'
# Xform Permissions
CAN_CHANGE_XFORM = 'change_xform'
CAN_ADD_XFORM = 'add_xform'
CAN_DELETE_XFORM = 'delete_xform'
CAN_VIEW_XFORM = 'view_xform'
CAN_ADD_SUBMISSIONS = 'report_xform'
CAN_TRANSFER_OWNERSHIP = 'transfer_xform'
CAN_MOVE_TO_FOLDER = 'move_xform'
# Project Permissions
CAN_VIEW_PROJECT = 'view_project'
CAN_CHANGE_PROJECT = 'change_project'
CAN_TRANSFER_PROJECT_OWNERSHIP = 'transfer_project'
CAN_DELETE_PROJECT = 'delete_project'
CAN_ADD_DATADICTIONARY = 'add_datadictionary'
CAN_CHANGE_DATADICTIONARY = 'change_datadictionary'
CAN_DELETE_DATADICTIONARY = 'delete_datadictionary'
class Role(object):
class_to_permissions = None
permissions = None
name = None
@classmethod
def _remove_obj_permissions(self, user, obj):
content_type = ContentType.objects.get(
model=obj.__class__.__name__.lower(),
app_label=obj.__class__._meta.app_label
)
object_permissions = user.userobjectpermission_set.filter(
object_pk=obj.pk, content_type=content_type)
for perm in object_permissions:
remove_perm(perm.permission.codename, user, obj)
@classmethod
def add(cls, user, obj):
cls._remove_obj_permissions(user, obj)
for codename, klass in cls.permissions:
if type(obj) == klass:
assign_perm(codename, user, obj)
@classmethod
def has_role(cls, permissions, obj):
"""Check that permission correspond to this role for this object.
:param permissions: A list of permissions.
:param obj: An object to get the permissions of.
"""
perms_for_role = set(cls.class_to_permissions[type(obj)])
return perms_for_role.issubset(set(permissions))
@classmethod
def user_has_role(cls, user, obj):
"""Check that a user has this role.
:param user: A user object.
:param obj: An object to get the permissions of.
"""
return user.has_perms(cls.class_to_permissions[type(obj)], obj)
class ReadOnlyRole(Role):
name = 'readonly'
permissions = (
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_XFORM, XForm),
(CAN_VIEW_PROJECT, Project),
)
class DataEntryRole(Role):
name = 'dataentry'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class EditorRole(Role):
name = 'editor'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class ManagerRole(Role):
name = 'manager'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, XForm),
(CAN_ADD_XFORM_TO_PROFILE, OrganizationProfile),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_DELETE_PROJECT, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class MemberRole(Role):
"""This is a role for a member of an organization.
"""
name = 'member'
class OwnerRole(Role):
"""This is a role for an owner of a dataset, organization, or project.
"""
name = 'owner'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_ADD_XFORM, XForm),
(CAN_VIEW_XFORM, XForm),
(CAN_ADD_DATADICTIONARY, XForm),
(CAN_CHANGE_DATADICTIONARY, XForm),
(CAN_DELETE_DATADICTIONARY, XForm),
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_DELETE_XFORM, XForm),
(CAN_MOVE_TO_FOLDER, XForm),
(CAN_TRANSFER_OWNERSHIP, XForm),
(CAN_CHANGE_XFORM, XForm),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_ADD_USERPROFILE, UserProfile),
(CAN_CHANGE_USERPROFILE, UserProfile),
(CAN_DELETE_USERPROFILE, UserProfile),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_ADD_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_ADD_ORGANIZATION_XFORM, OrganizationProfile),
(CAN_CHANGE_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_DELETE_ORGANIZATION_PROFILE, OrganizationProfile),
(IS_ORGANIZATION_OWNER, OrganizationProfile),
(CAN_ADD_XFORM_TO_PROFILE, OrganizationProfile),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_DELETE_PROJECT, Project),
(CAN_DELETE_XFORM, XForm),
(CAN_MOVE_TO_FOLDER, XForm),
(CAN_TRANSFER_OWNERSHIP, XForm),
(CAN_TRANSFER_PROJECT_OWNERSHIP, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
(CAN_ADD_DATADICTIONARY, XForm),
(CAN_CHANGE_DATADICTIONARY, XForm),
(CAN_DELETE_DATADICTIONARY, XForm),
(CAN_ADD_SUBMISSIONS, XForm),
)
ROLES_ORDERED = [ReadOnlyRole,
DataEntryRole,
EditorRole,
ManagerRole,
OwnerRole]
ROLES = {role.name: role for role in ROLES_ORDERED}
# Memoize a class to permissions dict.
for role in ROLES.values():
role.class_to_permissions = defaultdict(list)
[role.class_to_permissions[k].append(p) for p, k in role.permissions]
def is_organization(obj):
try:
obj.organizationprofile
return True
except OrganizationProfile.DoesNotExist:
return False
def get_role(permissions, obj):
for role in reversed(ROLES_ORDERED):
if role.has_role(permissions, obj):
return role.name
def get_role_in_org(user, organization):
perms = get_perms(user, organization)
if 'is_org_owner' in perms:
return OwnerRole.name
else:
return get_role(perms, organization) or MemberRole.name
def get_object_users_with_permissions(obj, exclude=None, serializable=False):
"""Returns users, roles and permissions for a object.
When called with with `serializable=True`, return usernames (strings)
instead of User objects, which cannot be serialized by REST Framework.
"""
result = []
if obj:
users_with_perms = get_users_with_perms(
obj, attach_perms=True, with_group_users=False).items()
result = [{
'user': user if not serializable else user.username,
'role': get_role(permissions, obj),
'permissions': permissions} for user, permissions in
users_with_perms if not is_organization(
UserProfile.objects.get_or_create(user=user)[0]
)
]
return result
| [((210, 32, 210, 49), 'collections.defaultdict', 'defaultdict', ({(210, 44, 210, 48): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((229, 12, 229, 41), 'guardian.shortcuts.get_perms', 'get_perms', ({(229, 22, 229, 26): 'user', (229, 28, 229, 40): 'organization'}, {}), '(user, organization)', False, 'from guardian.shortcuts import assign_perm, remove_perm, get_perms, get_users_with_perms\n'), ((65, 12, 65, 60), 'guardian.shortcuts.remove_perm', 'remove_perm', ({(65, 24, 65, 48): 'perm.permission.codename', (65, 50, 65, 54): 'user', (65, 56, 65, 59): 'obj'}, {}), '(perm.permission.codename, user, obj)', False, 'from guardian.shortcuts import assign_perm, remove_perm, get_perms, get_users_with_perms\n'), ((73, 16, 73, 48), 'guardian.shortcuts.assign_perm', 'assign_perm', ({(73, 28, 73, 36): 'codename', (73, 38, 73, 42): 'user', (73, 44, 73, 47): 'obj'}, {}), '(codename, user, obj)', False, 'from guardian.shortcuts import assign_perm, remove_perm, get_perms, get_users_with_perms\n'), ((245, 27, 246, 59), 'guardian.shortcuts.get_users_with_perms', 'get_users_with_perms', (), '', False, 'from guardian.shortcuts import assign_perm, remove_perm, get_perms, get_users_with_perms\n'), ((253, 16, 253, 60), 'onadata.apps.main.models.user_profile.UserProfile.objects.get_or_create', 'UserProfile.objects.get_or_create', (), '', False, 'from onadata.apps.main.models.user_profile import UserProfile\n')] |
gauborg/lane-finding-gborgaonkar | lanelines.py | 466313a0da7c245e25f0987afa953300501d5322 | # Self-Driving Car Engineer Nanodegree
#
# ## Project: **Finding Lane Lines on the Road**
# ## Import Packages
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import moviepy
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=5):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# lists to store the slopes of lines which match our criteria
left_slope = []
right_slope = []
# lists to store the calculate b intercepts of these lines
left_b = []
right_b = []
for line in lines:
for x1,y1,x2,y2 in line:
slope = ((y2-y1)/(x2-x1))
# only select lines with specific slope range
if(((slope < 0.8) and (slope > 0.5)) or ((slope > -0.8) and (slope < -0.5))):
# check where the endpoints lie on the image...
if (x1 < (img.shape[1]/2) and x2 < (img.shape[1]/2)):
left_slope.append(slope)
left_b.append(y1-slope*x1)
left_b.append(y2-slope*x2)
else:
right_slope.append(slope)
right_b.append(y1-slope*x1)
right_b.append(y2-slope*x2)
try:
# we calculate average slope to draw the line
avg_left_slope = sum(left_slope)/len(left_slope)
avg_right_slope = sum(right_slope)/len(right_slope)
avg_left_b = sum(left_b)/len(left_b)
avg_right_b = sum(right_b)/len(right_b)
# Y co-ordinate of the lane line will definitely be at the bottom of the image
y1 = img.shape[0]
y2 = 320
y3 = 320
y4 = img.shape[0]
# X co-ordinate can be calculated by using the eqn of the line and y co-ordinate
x1 = (y1 - avg_left_b)/avg_left_slope
x2 = (y2 - avg_left_b)/avg_left_slope
x3 = (y3 - avg_right_b)/avg_right_slope
x4 = (y4 - avg_right_b)/avg_right_slope
# draw the lines, converting values to integer for pixels
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness)
cv2.line(img, (int(x3), int(y3)), (int(x4), int(y4)), color, thickness)
except ZeroDivisionError as error:
pass
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
import os
directory = os.listdir("test_images/")
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
def lanelines(image):
# 1. Grayscaling
gray = grayscale(image)
# 2. Gaussian Blur
blur = gaussian_blur(gray, 5)
# 3. Canny Detection
canny_edges = canny(blur, 50, 150)
# 4. Region Masking
vertices = np.array([[(0,image.shape[0]),(460,320),(500,320),(image.shape[1],image.shape[0])]], dtype=np.int32)
selected_region = region_of_interest(canny_edges, vertices)
mpimg.imsave(os.path.join("test_images_output/" + "output-" + i), selected_region)
# image.save(os.path.join("test_images_output/" + i + "-canny-region-output"), format=None, dpi=(540, 960))
# Hough Transform Parameters- Identify lane lines in the masked region
# execute Hough Transform
lines_image = hough_lines(selected_region, 2, np.pi/180, 25, 20, 10)
weighted_image = weighted_img(lines_image, image)
return weighted_image
for i in directory:
image = mpimg.imread(os.path.join("test_images/", i))
weighted_image = lanelines(image)
mpimg.imsave(os.path.join("test_images_output/" + "output+" + i), weighted_image)
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
# `solidWhiteRight.mp4`
# `solidYellowLeft.mp4`
#
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# Import everything needed to edit/save/watch video clips
import imageio
from moviepy.editor import VideoFileClip
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
result = lanelines(image)
return result
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) # NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
challenge_output = 'test_videos_output/challenge.mp4'
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
| [((16, 8, 16, 55), 'matplotlib.image.imread', 'mpimg.imread', ({(16, 21, 16, 54): '"""test_images/solidWhiteRight.jpg"""'}, {}), "('test_images/solidWhiteRight.jpg')", True, 'import matplotlib.image as mpimg\n'), ((20, 0, 20, 17), 'matplotlib.pyplot.imshow', 'plt.imshow', ({(20, 11, 20, 16): 'image'}, {}), '(image)', True, 'import matplotlib.pyplot as plt\n'), ((188, 12, 188, 38), 'os.listdir', 'os.listdir', ({(188, 23, 188, 37): '"""test_images/"""'}, {}), "('test_images/')", False, 'import os\n'), ((259, 8, 259, 56), 'moviepy.editor.VideoFileClip', 'VideoFileClip', ({(259, 22, 259, 55): '"""test_videos/solidWhiteRight.mp4"""'}, {}), "('test_videos/solidWhiteRight.mp4')", False, 'from moviepy.editor import VideoFileClip\n'), ((265, 8, 265, 56), 'moviepy.editor.VideoFileClip', 'VideoFileClip', ({(265, 22, 265, 55): '"""test_videos/solidYellowLeft.mp4"""'}, {}), "('test_videos/solidYellowLeft.mp4')", False, 'from moviepy.editor import VideoFileClip\n'), ((272, 8, 272, 50), 'moviepy.editor.VideoFileClip', 'VideoFileClip', ({(272, 22, 272, 49): '"""test_videos/challenge.mp4"""'}, {}), "('test_videos/challenge.mp4')", False, 'from moviepy.editor import VideoFileClip\n'), ((45, 11, 45, 48), 'cv2.cvtColor', 'cv2.cvtColor', ({(45, 24, 45, 27): 'img', (45, 29, 45, 47): 'cv2.COLOR_RGB2GRAY'}, {}), '(img, cv2.COLOR_RGB2GRAY)', False, 'import cv2\n'), ((51, 11, 51, 56), 'cv2.Canny', 'cv2.Canny', ({(51, 21, 51, 24): 'img', (51, 26, 51, 39): 'low_threshold', (51, 41, 51, 55): 'high_threshold'}, {}), '(img, low_threshold, high_threshold)', False, 'import cv2\n'), ((55, 11, 55, 63), 'cv2.GaussianBlur', 'cv2.GaussianBlur', ({(55, 28, 55, 31): 'img', (55, 33, 55, 59): '(kernel_size, kernel_size)', (55, 61, 55, 62): '(0)'}, {}), '(img, (kernel_size, kernel_size), 0)', False, 'import cv2\n'), ((67, 11, 67, 29), 'numpy.zeros_like', 'np.zeros_like', ({(67, 25, 67, 28): 'img'}, {}), '(img)', True, 'import numpy as np\n'), ((77, 4, 77, 51), 'cv2.fillPoly', 'cv2.fillPoly', ({(77, 17, 77, 21): 'mask', (77, 23, 77, 31): 'vertices', (77, 33, 77, 50): 'ignore_mask_color'}, {}), '(mask, vertices, ignore_mask_color)', False, 'import cv2\n'), ((80, 19, 80, 45), 'cv2.bitwise_and', 'cv2.bitwise_and', ({(80, 35, 80, 38): 'img', (80, 40, 80, 44): 'mask'}, {}), '(img, mask)', False, 'import cv2\n'), ((161, 15, 161, 72), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((179, 11, 179, 56), 'cv2.addWeighted', 'cv2.addWeighted', ({(179, 27, 179, 38): 'initial_img', (179, 40, 179, 42): 'α', (179, 44, 179, 47): 'img', (179, 49, 179, 51): 'β', (179, 53, 179, 55): 'γ'}, {}), '(initial_img, α, img, β, γ)', False, 'import cv2\n'), ((204, 15, 204, 115), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((160, 56, 160, 68), 'numpy.array', 'np.array', ({(160, 65, 160, 67): '[]'}, {}), '([])', True, 'import numpy as np\n'), ((207, 17, 207, 68), 'os.path.join', 'os.path.join', ({(207, 30, 207, 67): "('test_images_output/' + 'output-' + i)"}, {}), "('test_images_output/' + 'output-' + i)", False, 'import os\n'), ((219, 25, 219, 56), 'os.path.join', 'os.path.join', ({(219, 38, 219, 52): '"""test_images/"""', (219, 54, 219, 55): 'i'}, {}), "('test_images/', i)", False, 'import os\n'), ((221, 17, 221, 68), 'os.path.join', 'os.path.join', ({(221, 30, 221, 67): "('test_images_output/' + 'output+' + i)"}, {}), "('test_images_output/' + 'output+' + i)", False, 'import os\n')] |
phobson/zict | zict/zip.py | 666c7cd9fd4667cc8831a35cf958fd51788acd3e | try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
import zipfile
class Zip(MutableMapping):
"""Mutable Mapping interface to a Zip file
Keys must be strings, values must be bytes
Parameters
----------
filename: string
mode: string, ('r', 'w', 'a'), defaults to 'a'
Examples
--------
>>> z = Zip('myfile.zip') # doctest: +SKIP
>>> z['x'] = b'123' # doctest: +SKIP
>>> z['x'] # doctest: +SKIP
b'123'
>>> z.flush() # flush and write metadata to disk # doctest: +SKIP
"""
def __init__(self, filename, mode="a"):
self.filename = filename
self.mode = mode
self._file = None
@property
def file(self):
if self.mode == "closed":
raise OSError("File closed")
if not self._file or not self._file.fp:
self._file = zipfile.ZipFile(self.filename, mode=self.mode)
return self._file
def __getitem__(self, key):
return self.file.read(key)
def __setitem__(self, key, value):
self.file.writestr(key, value)
def keys(self):
return (zi.filename for zi in self.file.filelist)
def values(self):
return map(self.file.read, self.keys())
def items(self):
return ((zi.filename, self.file.read(zi.filename)) for zi in self.file.filelist)
def __iter__(self):
return self.keys()
def __delitem__(self, key):
raise NotImplementedError("Not supported by stdlib zipfile")
def __len__(self):
return len(self.file.filelist)
def flush(self):
self.file.fp.flush()
self.file.close()
def close(self):
self.flush()
self.mode = "closed"
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| [((37, 25, 37, 71), 'zipfile.ZipFile', 'zipfile.ZipFile', (), '', False, 'import zipfile\n')] |
containers-kraken/neutron-lbaas | neutron_lbaas/drivers/driver_mixins.py | 43fbc34cc90512e33202bc4187ccf712dda6a782 | # Copyright 2014 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.plugins.common import constants
from oslo_log import log as logging
import six
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseManagerMixin(object):
def __init__(self, driver):
self.driver = driver
@abc.abstractproperty
def db_delete_method(self):
pass
@abc.abstractmethod
def create(self, context, obj):
pass
@abc.abstractmethod
def update(self, context, obj_old, obj):
pass
@abc.abstractmethod
def delete(self, context, obj):
pass
def successful_completion(self, context, obj, delete=False,
lb_create=False):
"""
Sets the provisioning_status of the load balancer and obj to
ACTIVE. Should be called last in the implementor's BaseManagerMixin
methods for successful runs.
:param context: neutron context
:param obj: instance of a
neutron_lbaas.services.loadbalancer.data_model
:param delete: set True if being called from a delete method. Will
most likely result in the obj being deleted from the db.
:param lb_create: set True if this is being called after a successful
load balancer create.
"""
LOG.debug("Starting successful_completion method after a successful "
"driver action.")
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
if delete:
# Check if driver is responsible for vip allocation. If the driver
# is responsible, then it is also responsible for cleaning it up.
# At this point, the VIP should already be cleaned up, so we are
# just doing neutron lbaas db cleanup.
if (obj == obj.root_loadbalancer and
self.driver.load_balancer.allocates_vip):
# NOTE(blogan): this is quite dumb to do but it is necessary
# so that a false negative pep8 error does not get thrown. An
# "unexpected-keyword-argument" pep8 error occurs bc
# self.db_delete_method is a @property method that returns a
# method.
kwargs = {'delete_vip_port': False}
self.db_delete_method(context, obj.id, **kwargs)
else:
self.db_delete_method(context, obj.id)
if obj == obj.root_loadbalancer and delete:
# Load balancer was deleted and no longer exists
return
lb_op_status = None
lb_p_status = constants.ACTIVE
if obj == obj.root_loadbalancer:
# only set the status to online if this an operation on the
# load balancer
lb_op_status = lb_const.ONLINE
# Update the load balancer's vip address and vip port id if the driver
# was responsible for allocating the vip.
if (self.driver.load_balancer.allocates_vip and lb_create and
isinstance(obj, data_models.LoadBalancer)):
self.driver.plugin.db.update_loadbalancer(
context, obj.id, {'vip_address': obj.vip_address,
'vip_port_id': obj.vip_port_id})
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=lb_p_status,
operating_status=lb_op_status)
if obj == obj.root_loadbalancer or delete:
# Do not want to update the status of the load balancer again
# Or the obj was deleted from the db so no need to update the
# statuses
return
obj_op_status = lb_const.ONLINE
if isinstance(obj, data_models.HealthMonitor):
# Health Monitor does not have an operating status
obj_op_status = None
LOG.debug("Updating object of type {0} with id of {1} to "
"provisioning_status = {2}, operating_status = {3}".format(
obj.__class__, obj.id, constants.ACTIVE, obj_op_status))
self.driver.plugin.db.update_status(
context, obj_sa_cls, obj.id,
provisioning_status=constants.ACTIVE,
operating_status=obj_op_status)
def failed_completion(self, context, obj):
"""
Sets the provisioning status of the obj to ERROR. If obj is a
loadbalancer it will be set to ERROR, otherwise set to ACTIVE. Should
be called whenever something goes wrong (raised exception) in an
implementor's BaseManagerMixin methods.
:param context: neutron context
:param obj: instance of a
neutron_lbaas.services.loadbalancer.data_model
"""
LOG.debug("Starting failed_completion method after a failed driver "
"action.")
if isinstance(obj, data_models.LoadBalancer):
LOG.debug("Updating load balancer {0} to provisioning_status = "
"{1}, operating_status = {2}.".format(
obj.root_loadbalancer.id, constants.ERROR,
lb_const.OFFLINE))
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=constants.ERROR,
operating_status=lb_const.OFFLINE)
return
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
LOG.debug("Updating object of type {0} with id of {1} to "
"provisioning_status = {2}, operating_status = {3}".format(
obj.__class__, obj.id, constants.ERROR,
lb_const.OFFLINE))
self.driver.plugin.db.update_status(
context, obj_sa_cls, obj.id,
provisioning_status=constants.ERROR,
operating_status=lb_const.OFFLINE)
LOG.debug("Updating load balancer {0} to "
"provisioning_status = {1}".format(obj.root_loadbalancer.id,
constants.ACTIVE))
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=constants.ACTIVE)
def update_vip(self, context, loadbalancer_id, vip_address,
vip_port_id=None):
lb_update = {'vip_address': vip_address}
if vip_port_id:
lb_update['vip_port_id'] = vip_port_id
self.driver.plugin.db.update_loadbalancer(context, loadbalancer_id,
lb_update)
@six.add_metaclass(abc.ABCMeta)
class BaseRefreshMixin(object):
@abc.abstractmethod
def refresh(self, context, obj):
pass
@six.add_metaclass(abc.ABCMeta)
class BaseStatsMixin(object):
@abc.abstractmethod
def stats(self, context, obj):
pass
| [((25, 6, 25, 33), 'oslo_log.log.getLogger', 'logging.getLogger', ({(25, 24, 25, 32): '__name__'}, {}), '(__name__)', True, 'from oslo_log import log as logging\n'), ((28, 1, 28, 31), 'six.add_metaclass', 'six.add_metaclass', ({(28, 19, 28, 30): 'abc.ABCMeta'}, {}), '(abc.ABCMeta)', False, 'import six\n'), ((170, 1, 170, 31), 'six.add_metaclass', 'six.add_metaclass', ({(170, 19, 170, 30): 'abc.ABCMeta'}, {}), '(abc.ABCMeta)', False, 'import six\n'), ((178, 1, 178, 31), 'six.add_metaclass', 'six.add_metaclass', ({(178, 19, 178, 30): 'abc.ABCMeta'}, {}), '(abc.ABCMeta)', False, 'import six\n')] |
miguelsousa/hTools2 | Lib/hTools2/modules/ftp.py | eab400677c1b21bb2519a7354a142e167c2b39ba | # [h] hTools2.modules.ftp
"""Tools to connect to a FTP server, upload files etc."""
# This module uses the `ftplib` library to handle FTP connection and upload.
# http://docs.python.org/library/ftplib.html
import os
from ftplib import FTP
def connect_to_server(url, login, password, folder, verbose=False):
"""Connects to the FTP server using the given connection settings.
Use the given ``url``, ``login`` and ``password`` information to make a connection. Move to the given ``folder`` (if it exists), and return a ``FTP`` object.
To get to the lower level details about the FTP connection, use the optional parameter ``verbose=True``.
"""
# create FTP connection
ftp = FTP(url, login, password)
if verbose == True:
print "%s" % ftp.getwelcome()
# move to folder
ftp.cwd(folder)
if verbose == True:
ftp.retrlines('LIST')
print
return ftp
def upload_file(filePath, FTPconnection):
"""Upload the file at ``file_path`` to a FTP server, using the given ``ftp_connection``."""
file = open(filePath, 'rb')
fileName = os.path.split(filePath)[1]
FTPconnection.storbinary('STOR ' + fileName, file)
file.close()
| [] |
MRsoymilk/toy-car | network/pytorch2onnx.py | 5bd51bf231781a17e1d7acb4654c3d4b6adbed41 | import Net
import configparser
import torch
from PIL import Image
config = configparser.ConfigParser()
config.read('./config.ini')
MODEL = config.get("Network", "Model")
transformations = Net.transformations
net = Net.Net()
net.eval()
net.load_state_dict(torch.load(MODEL))
image = Image.open("./html/rwby.jpg")
image = transformations(image).float()
image = torch.autograd.Variable(image[None, ...])
torch.onnx.export(
net,
image,
MODEL.split('pth')[0] + 'onnx',
export_params=True,
output_names=['toy-car']
)
print("finish")
| [((6, 9, 6, 36), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((11, 6, 11, 15), 'Net.Net', 'Net.Net', ({}, {}), '()', False, 'import Net\n'), ((16, 8, 16, 37), 'PIL.Image.open', 'Image.open', ({(16, 19, 16, 36): '"""./html/rwby.jpg"""'}, {}), "('./html/rwby.jpg')", False, 'from PIL import Image\n'), ((18, 8, 18, 49), 'torch.autograd.Variable', 'torch.autograd.Variable', ({(18, 32, 18, 48): 'image[None, ...]'}, {}), '(image[None, ...])', False, 'import torch\n'), ((14, 20, 14, 37), 'torch.load', 'torch.load', ({(14, 31, 14, 36): 'MODEL'}, {}), '(MODEL)', False, 'import torch\n')] |
player1537-forks/spack | var/spack/repos/builtin/packages/r-gridextra/package.py | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGridextra(RPackage):
"""Miscellaneous Functions for "Grid" Graphics.
Provides a number of user-level functions to work with "grid" graphics,
notably to arrange multiple grid-based plots on a page, and draw tables."""
cran = "gridExtras"
version('2.3', sha256='81b60ce6f237ec308555471ae0119158b115463df696d2eca9b177ded8988e3b')
version('2.2.1', sha256='44fe455a5bcdf48a4ece7a542f83e7749cf251dc1df6ae7634470240398c6818')
depends_on('r-gtable', type=('build', 'run'))
| [] |
Magnety/tuFramework | tuframework/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py | b31cb34d476ef306b52da955021f93c91c14ddf4 | import torch
from tuframework.network_architecture.generic_UNet import Generic_UNet
from tuframework.network_architecture.initialization import InitWeights_He
from tuframework.training.network_training.tuframework_variants.data_augmentation.tuframeworkTrainerV2_insaneDA import \
tuframeworkTrainerV2_insaneDA
from tuframework.utilities.nd_softmax import softmax_helper
from torch import nn
class tuframeworkTrainerV2_MMS(tuframeworkTrainerV2_insaneDA):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["p_rot"] = 0.7
self.data_aug_params["p_eldef"] = 0.1
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params['gamma_range'] = (0.5, 1.6)
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
"""def run_training(self):
from batchviewer import view_batch
a = next(self.tr_gen)
view_batch(a['data'])
import IPython;IPython.embed()"""
| [((52, 11, 52, 36), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((50, 93, 50, 113), 'tuframework.network_architecture.initialization.InitWeights_He', 'InitWeights_He', ({(50, 108, 50, 112): '0.01'}, {}), '(0.01)', False, 'from tuframework.network_architecture.initialization import InitWeights_He\n')] |
romsok24/epiphany | ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py | f058984939561fc8d51288765976118ae12e6c32 | from typing import List
from src.command.command import Command
class Yum(Command):
"""
Interface for `yum`
"""
def __init__(self, retries: int):
super().__init__('yum', retries)
def update(self, enablerepo: str,
package: str = None,
disablerepo: str = '*',
assume_yes: bool = True):
"""
Interface for `yum update`
:param enablerepo:
:param package:
:param disablerepo:
:param assume_yes: if set to True, -y flag will be used
"""
update_parameters: List[str] = ['update']
update_parameters.append('-y' if assume_yes else '')
if package is not None:
update_parameters.append(package)
update_parameters.append(f'--disablerepo={disablerepo}')
update_parameters.append(f'--enablerepo={enablerepo}')
self.run(update_parameters)
def install(self, package: str,
assume_yes: bool = True):
"""
Interface for `yum install -y`
:param package: packaged to be installed
:param assume_yes: if set to True, -y flag will be used
"""
no_ask: str = '-y' if assume_yes else ''
self.run(['install', no_ask, package])
def remove(self, package: str,
assume_yes: bool = True):
"""
Interface for `yum remove -y`
:param package: packaged to be removed
:param assume_yes: if set to True, -y flag will be used
"""
no_ask: str = '-y' if assume_yes else ''
self.run(['remove', no_ask, package])
def is_repo_enabled(self, repo: str) -> bool:
output = self.run(['repolist',
'enabled']).stdout
if repo in output:
return True
return False
def find_rhel_repo_id(self, patterns: List[str]) -> List[str]:
output = self.run(['repolist',
'all']).stdout
repos: List[str] = []
for line in output.split('\n'):
for pattern in patterns:
if pattern in line:
repos.append(pattern)
return repos
def accept_keys(self):
# to accept import of repo's GPG key (for repo_gpgcheck=1)
self.run(['-y', 'repolist'])
def is_repo_available(self, repo: str) -> bool:
retval = self.run(['-q',
'--disablerepo=*',
f'--enablerepo={repo}',
'repoinfo']).returncode
if retval == 0:
return True
return False
def makecache(self, fast: bool = True,
assume_yes: bool = True):
args: List[str] = ['makecache']
args.append('-y' if assume_yes else '')
if fast:
args.append('fast')
self.run(args)
def list_all_repo_info(self) -> List[str]:
args: List[str] = ['repolist',
'-v',
'all']
return self._run_and_filter(args)
| [] |
jtagcat/koroonakaart | build/generate_confirmed_cases_by_counties.py | 16a6eb24a19b286589b063742b03a123315feefc | from build.chart_data_functions import get_confirmed_cases_by_county
from build.chart_data_functions import get_county_by_day
from build.constants import CONFIRMED_CASES_BY_COUNTIES_PATH
from build.constants import COUNTY_MAPPING
from build.constants import COUNTY_POPULATION
from build.constants import DATE_SETTINGS
from build.constants import TEST_RESULTS_PATH
from build.constants import TODAY_DMYHM
from build.constants import YESTERDAY_YMD
from build.utils import analyze_memory
from build.utils import analyze_time
from build.utils import logger
from build.utils import read_json_from_file
from build.utils import save_as_json
import pandas as pd
@analyze_time
@analyze_memory
def main():
# Log status
logger.info("Loading local data files")
test_results = read_json_from_file(TEST_RESULTS_PATH)
# Log status
logger.info("Calculating main statistics")
# Create date ranges for charts
case_dates = pd.date_range(start=DATE_SETTINGS["firstCaseDate"], end=YESTERDAY_YMD)
# Get data for each chart
logger.info("Calculating data for charts")
county_by_day = get_county_by_day(
test_results, case_dates, COUNTY_MAPPING, COUNTY_POPULATION
)
confirmed_cases_by_county = get_confirmed_cases_by_county(
test_results, COUNTY_MAPPING
)
del county_by_day["mapPlayback"]
del county_by_day["mapPlayback10k"]
# Create dictionary for final JSON
logger.info("Compiling final JSON")
final_json = {
"updatedOn": TODAY_DMYHM,
"dataConfirmedCasesByCounties": confirmed_cases_by_county,
"countyByDay": county_by_day,
}
# Dump JSON output
save_as_json(CONFIRMED_CASES_BY_COUNTIES_PATH, final_json)
# Log finish time
logger.info("Finished update process")
if __name__ == "__main__":
main()
| [((22, 4, 22, 43), 'build.utils.logger.info', 'logger.info', ({(22, 16, 22, 42): '"""Loading local data files"""'}, {}), "('Loading local data files')", False, 'from build.utils import logger\n'), ((23, 19, 23, 57), 'build.utils.read_json_from_file', 'read_json_from_file', ({(23, 39, 23, 56): 'TEST_RESULTS_PATH'}, {}), '(TEST_RESULTS_PATH)', False, 'from build.utils import read_json_from_file\n'), ((26, 4, 26, 46), 'build.utils.logger.info', 'logger.info', ({(26, 16, 26, 45): '"""Calculating main statistics"""'}, {}), "('Calculating main statistics')", False, 'from build.utils import logger\n'), ((29, 17, 29, 87), 'pandas.date_range', 'pd.date_range', (), '', True, 'import pandas as pd\n'), ((32, 4, 32, 46), 'build.utils.logger.info', 'logger.info', ({(32, 16, 32, 45): '"""Calculating data for charts"""'}, {}), "('Calculating data for charts')", False, 'from build.utils import logger\n'), ((33, 20, 35, 5), 'build.chart_data_functions.get_county_by_day', 'get_county_by_day', ({(34, 8, 34, 20): 'test_results', (34, 22, 34, 32): 'case_dates', (34, 34, 34, 48): 'COUNTY_MAPPING', (34, 50, 34, 67): 'COUNTY_POPULATION'}, {}), '(test_results, case_dates, COUNTY_MAPPING, COUNTY_POPULATION)', False, 'from build.chart_data_functions import get_county_by_day\n'), ((36, 32, 38, 5), 'build.chart_data_functions.get_confirmed_cases_by_county', 'get_confirmed_cases_by_county', ({(37, 8, 37, 20): 'test_results', (37, 22, 37, 36): 'COUNTY_MAPPING'}, {}), '(test_results, COUNTY_MAPPING)', False, 'from build.chart_data_functions import get_confirmed_cases_by_county\n'), ((44, 4, 44, 39), 'build.utils.logger.info', 'logger.info', ({(44, 16, 44, 38): '"""Compiling final JSON"""'}, {}), "('Compiling final JSON')", False, 'from build.utils import logger\n'), ((52, 4, 52, 62), 'build.utils.save_as_json', 'save_as_json', ({(52, 17, 52, 49): 'CONFIRMED_CASES_BY_COUNTIES_PATH', (52, 51, 52, 61): 'final_json'}, {}), '(CONFIRMED_CASES_BY_COUNTIES_PATH, final_json)', False, 'from build.utils import save_as_json\n'), ((55, 4, 55, 42), 'build.utils.logger.info', 'logger.info', ({(55, 16, 55, 41): '"""Finished update process"""'}, {}), "('Finished update process')", False, 'from build.utils import logger\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.