max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
donkeycar/tests/test_web_socket.py | wenxichen/donkeycar | 12 | 6600 |
from donkeycar.parts.web_controller.web import WebSocketCalibrateAPI
from functools import partial
from tornado import testing
import tornado.websocket
import tornado.web
import tornado.ioloop
import json
from unittest.mock import Mock
from donkeycar.parts.actuator import PWMSteering, PWMThrottle
class WebSocketCalibrateTest(testing.AsyncHTTPTestCase):
"""
Example of WebSocket usage as a client
in AsyncHTTPTestCase-based unit tests.
"""
def get_app(self):
app = tornado.web.Application([('/', WebSocketCalibrateAPI)])
self.app = app
return app
def get_ws_url(self):
return "ws://localhost:" + str(self.get_http_port()) + "/"
@tornado.testing.gen_test
def test_calibrate_servo_esc_1(self):
ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url())
# Now we can run a test on the WebSocket.
self.app.drive_train = dict()
self.app.drive_train['steering'] = Mock()
self.app.drive_train_type = "SERVO_ESC"
data = {"config": {"STEERING_LEFT_PWM": 444}}
yield ws_client.write_message(json.dumps(data))
yield ws_client.close()
assert self.app.drive_train['steering'].left_pulse == 444
assert isinstance(self.app.drive_train['steering'].right_pulse, Mock)
@tornado.testing.gen_test
def test_calibrate_servo_esc_2(self):
ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url())
# Now we can run a test on the WebSocket.
self.app.drive_train = dict()
self.app.drive_train['steering'] = Mock()
self.app.drive_train_type = "SERVO_ESC"
data = {"config": {"STEERING_RIGHT_PWM": 555}}
yield ws_client.write_message(json.dumps(data))
yield ws_client.close()
assert self.app.drive_train['steering'].right_pulse == 555
assert isinstance(self.app.drive_train['steering'].left_pulse, Mock)
@tornado.testing.gen_test
def test_calibrate_servo_esc_3(self):
ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url())
# Now we can run a test on the WebSocket.
self.app.drive_train = dict()
self.app.drive_train['throttle'] = Mock()
self.app.drive_train_type = "SERVO_ESC"
data = {"config": {"THROTTLE_FORWARD_PWM": 666}}
yield ws_client.write_message(json.dumps(data))
yield ws_client.close()
assert self.app.drive_train['throttle'].max_pulse == 666
assert isinstance(self.app.drive_train['throttle'].min_pulse, Mock)
@tornado.testing.gen_test
def test_calibrate_mm1(self):
ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url())
# Now we can run a test on the WebSocket.
self.app.drive_train = Mock()
self.app.drive_train_type = "MM1"
data = {"config": {"MM1_STEERING_MID": 1234}}
yield ws_client.write_message(json.dumps(data))
yield ws_client.close()
assert self.app.drive_train.STEERING_MID == 1234
| 2.65625 | 3 |
misc/trac_plugins/IncludeMacro/includemacro/macros.py | weese/seqan | 1 | 6601 | <reponame>weese/seqan
# TracIncludeMacro macros
import re
import urllib2
from StringIO import StringIO
from trac.core import *
from trac.wiki.macros import WikiMacroBase
from trac.wiki.formatter import system_message
from trac.wiki.model import WikiPage
from trac.mimeview.api import Mimeview, get_mimetype, Context
from trac.perm import IPermissionRequestor
from genshi.core import escape
from genshi.input import HTMLParser, ParseError
from genshi.filters.html import HTMLSanitizer
__all__ = ['IncludeMacro']
class IncludeMacro(WikiMacroBase):
"""A macro to include other resources in wiki pages.
More documentation to follow.
"""
implements(IPermissionRequestor)
# Default output formats for sources that need them
default_formats = {
'wiki': 'text/x-trac-wiki',
}
# IWikiMacroProvider methods
def expand_macro(self, formatter, name, content):
req = formatter.req # Shortcut.
safe_content = False # Whether or not to disable cleaning HTML.
args = [x.strip() for x in content.split(',')]
if len(args) == 1:
args.append(None)
elif len(args) == 3:
return system_message('args == %s' % args)
if not args[2].startswith('fragment='):
msg = ('If three arguments are given, the last one must'
' start with fragment=, but tag content was %s')
return system_message(msg % content)
elif len(args) != 2:
return system_message('Invalid arguments "%s"'%content)
# Parse out fragment name.
fragment_name = None
if args[-1] and args[-1].startswith('fragment='):
fragment_name = args[-1][len('fragment='):]
args.pop()
if len(args) == 1:
args.append(None)
# Pull out the arguments
source, dest_format = args
try:
source_format, source_obj = source.split(':', 1)
except ValueError: # If no : is present, assume its a wiki page
source_format, source_obj = 'wiki', source
# Apply a default format if needed
if dest_format is None:
try:
dest_format = self.default_formats[source_format]
except KeyError:
pass
if source_format in ('http', 'https', 'ftp'):
# Since I can't really do recursion checking, and because this
# could be a source of abuse allow selectively blocking it.
# RFE: Allow blacklist/whitelist patterns for URLS. <NPK>
# RFE: Track page edits and prevent unauthorized users from ever entering a URL include. <NPK>
if not req.perm.has_permission('INCLUDE_URL'):
self.log.info('IncludeMacro: Blocking attempt by %s to include URL %s on page %s', req.authname, source, req.path_info)
return ''
try:
urlf = urllib2.urlopen(source)
out = urlf.read()
except urllib2.URLError, e:
return system_message('Error while retrieving file', str(e))
except TracError, e:
return system_message('Error while previewing', str(e))
ctxt = Context.from_request(req)
elif source_format == 'wiki':
# XXX: Check for recursion in page includes. <NPK>
if not req.perm.has_permission('WIKI_VIEW'):
return ''
page = WikiPage(self.env, source_obj)
if not page.exists:
return system_message('Wiki page %s does not exist'%source_obj)
out = page.text
ctxt = Context.from_request(req, 'wiki', source_obj)
elif source_format == 'source':
if not req.perm.has_permission('FILE_VIEW'):
return ''
repo = self.env.get_repository(authname=req.authname)
node = repo.get_node(source_obj)
out = node.get_content().read()
if dest_format is None:
dest_format = node.content_type or get_mimetype(source_obj, out)
ctxt = Context.from_request(req, 'source', source_obj)
# RFE: Add ticket: and comment: sources. <NPK>
# RFE: Add attachment: source. <NPK>
else:
return system_message('Unsupported include source %s'%source)
# If there was a fragment name given then find the fragment.
fragment = []
current_fragment_name = None
if fragment_name:
for line in out.splitlines():
res = re.search(r'FRAGMENT\(([^)]*)\)', line)
if res:
current_fragment_name = res.groups()[0]
else:
if current_fragment_name == fragment_name:
fragment.append(line)
out = '\n'.join(fragment)
# If we have a preview format, use it
if dest_format:
# We can trust the output and do not need to call the HTML sanitizer
# below. The HTML sanitization leads to whitespace being stripped.
safe_content = True
out = Mimeview(self.env).render(ctxt, dest_format, out, force_source=True)
# Escape if needed
if not safe_content and not self.config.getbool('wiki', 'render_unsafe_content', False):
try:
out = HTMLParser(StringIO(out)).parse() | HTMLSanitizer()
except ParseError:
out = escape(out)
return out
# IPermissionRequestor methods
def get_permission_actions(self):
yield 'INCLUDE_URL'
| 2.40625 | 2 |
packages/google/cloud/logging/client.py | rjcuevas/Email-Frontend-AngularJS- | 0 | 6602 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Stackdriver Logging API."""
import os
try:
from google.cloud.gapic.logging.v2.config_service_v2_api import (
ConfigServiceV2Api as GeneratedSinksAPI)
from google.cloud.gapic.logging.v2.logging_service_v2_api import (
LoggingServiceV2Api as GeneratedLoggingAPI)
from google.cloud.gapic.logging.v2.metrics_service_v2_api import (
MetricsServiceV2Api as GeneratedMetricsAPI)
from google.cloud.logging._gax import _LoggingAPI as GAXLoggingAPI
from google.cloud.logging._gax import _MetricsAPI as GAXMetricsAPI
from google.cloud.logging._gax import _SinksAPI as GAXSinksAPI
except ImportError: # pragma: NO COVER
_HAVE_GAX = False
GeneratedLoggingAPI = GAXLoggingAPI = None
GeneratedMetricsAPI = GAXMetricsAPI = None
GeneratedSinksAPI = GAXSinksAPI = None
else:
_HAVE_GAX = True
from google.cloud.client import JSONClient
from google.cloud.environment_vars import DISABLE_GRPC
from google.cloud.logging.connection import Connection
from google.cloud.logging.connection import _LoggingAPI as JSONLoggingAPI
from google.cloud.logging.connection import _MetricsAPI as JSONMetricsAPI
from google.cloud.logging.connection import _SinksAPI as JSONSinksAPI
from google.cloud.logging.entries import ProtobufEntry
from google.cloud.logging.entries import StructEntry
from google.cloud.logging.entries import TextEntry
from google.cloud.logging.logger import Logger
from google.cloud.logging.metric import Metric
from google.cloud.logging.sink import Sink
_DISABLE_GAX = os.getenv(DISABLE_GRPC, False)
_USE_GAX = _HAVE_GAX and not _DISABLE_GAX
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: the project which the client acts on behalf of.
If not passed, falls back to the default inferred
from the environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
_logging_api = _sinks_api = _metrics_api = None
@property
def logging_api(self):
"""Helper for logging-related API calls.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs
"""
if self._logging_api is None:
if _USE_GAX:
generated = GeneratedLoggingAPI()
self._logging_api = GAXLoggingAPI(generated)
else:
self._logging_api = JSONLoggingAPI(self.connection)
return self._logging_api
@property
def sinks_api(self):
"""Helper for log sink-related API calls.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks
"""
if self._sinks_api is None:
if _USE_GAX:
generated = GeneratedSinksAPI()
self._sinks_api = GAXSinksAPI(generated)
else:
self._sinks_api = JSONSinksAPI(self.connection)
return self._sinks_api
@property
def metrics_api(self):
"""Helper for log metric-related API calls.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics
"""
if self._metrics_api is None:
if _USE_GAX:
generated = GeneratedMetricsAPI()
self._metrics_api = GAXMetricsAPI(generated)
else:
self._metrics_api = JSONMetricsAPI(self.connection)
return self._metrics_api
def logger(self, name):
"""Creates a logger bound to the current client.
:type name: str
:param name: the name of the logger to be constructed.
:rtype: :class:`google.cloud.logging.logger.Logger`
:returns: Logger created with the current client.
"""
return Logger(name, client=self)
def _entry_from_resource(self, resource, loggers):
"""Detect correct entry type from resource and instantiate.
:type resource: dict
:param resource: one entry resource from API response
:type loggers: dict or None
:param loggers: A mapping of logger fullnames -> loggers. If not
passed, the entry will have a newly-created logger.
:rtype: One of:
:class:`google.cloud.logging.entries.TextEntry`,
:class:`google.cloud.logging.entries.StructEntry`,
:class:`google.cloud.logging.entries.ProtobufEntry`
:returns: the entry instance, constructed via the resource
"""
if 'textPayload' in resource:
return TextEntry.from_api_repr(resource, self, loggers)
elif 'jsonPayload' in resource:
return StructEntry.from_api_repr(resource, self, loggers)
elif 'protoPayload' in resource:
return ProtobufEntry.from_api_repr(resource, self, loggers)
raise ValueError('Cannot parse log entry resource')
def list_entries(self, projects=None, filter_=None, order_by=None,
page_size=None, page_token=None):
"""Return a page of log entries.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the client.
:type filter_: str
:param filter_: a filter expression. See:
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: str
:param order_by: One of :data:`~google.cloud.logging.ASCENDING`
or :data:`~google.cloud.logging.DESCENDING`.
:type page_size: int
:param page_size: maximum number of entries to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: tuple, (list, str)
:returns: list of :class:`google.cloud.logging.entry.TextEntry`, plus a
"next page token" string: if not None, indicates that
more entries can be retrieved with another call (pass that
value as ``page_token``).
"""
if projects is None:
projects = [self.project]
resources, token = self.logging_api.list_entries(
projects=projects, filter_=filter_, order_by=order_by,
page_size=page_size, page_token=page_token)
loggers = {}
entries = [self._entry_from_resource(resource, loggers)
for resource in resources]
return entries, token
def sink(self, name, filter_=None, destination=None):
"""Creates a sink bound to the current client.
:type name: str
:param name: the name of the sink to be constructed.
:type filter_: str
:param filter_: (optional) the advanced logs filter expression
defining the entries exported by the sink. If not
passed, the instance should already exist, to be
refreshed via :meth:`Sink.reload`.
:type destination: str
:param destination: destination URI for the entries exported by
the sink. If not passed, the instance should
already exist, to be refreshed via
:meth:`Sink.reload`.
:rtype: :class:`google.cloud.logging.sink.Sink`
:returns: Sink created with the current client.
"""
return Sink(name, filter_, destination, client=self)
def list_sinks(self, page_size=None, page_token=None):
"""List sinks for the project associated with this client.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/list
:type page_size: int
:param page_size: maximum number of sinks to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of sinks. If not
passed, the API will return the first page of
sinks.
:rtype: tuple, (list, str)
:returns: list of :class:`google.cloud.logging.sink.Sink`, plus a
"next page token" string: if not None, indicates that
more sinks can be retrieved with another call (pass that
value as ``page_token``).
"""
resources, token = self.sinks_api.list_sinks(
self.project, page_size, page_token)
sinks = [Sink.from_api_repr(resource, self)
for resource in resources]
return sinks, token
def metric(self, name, filter_=None, description=''):
"""Creates a metric bound to the current client.
:type name: str
:param name: the name of the metric to be constructed.
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries tracked by the metric. If not
passed, the instance should already exist, to be
refreshed via :meth:`Metric.reload`.
:type description: str
:param description: the description of the metric to be constructed.
If not passed, the instance should already exist,
to be refreshed via :meth:`Metric.reload`.
:rtype: :class:`google.cloud.logging.metric.Metric`
:returns: Metric created with the current client.
"""
return Metric(name, filter_, client=self, description=description)
def list_metrics(self, page_size=None, page_token=None):
"""List metrics for the project associated with this client.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/list
:type page_size: int
:param page_size: maximum number of metrics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of metrics. If not
passed, the API will return the first page of
metrics.
:rtype: tuple, (list, str)
:returns: list of :class:`google.cloud.logging.metric.Metric`, plus a
"next page token" string: if not None, indicates that
more metrics can be retrieved with another call (pass that
value as ``page_token``).
"""
resources, token = self.metrics_api.list_metrics(
self.project, page_size, page_token)
metrics = [Metric.from_api_repr(resource, self)
for resource in resources]
return metrics, token
| 1.476563 | 1 |
tests/test_core/test_graph_objs/test_instantiate_hierarchy.py | wwwidonja/changed_plotly | 0 | 6603 | from __future__ import absolute_import
from unittest import TestCase
import os
import importlib
import inspect
from plotly.basedatatypes import BasePlotlyType, BaseFigure
datatypes_root = "new_plotly/graph_objs"
datatype_modules = [
dirpath.replace("/", ".")
for dirpath, _, _ in os.walk(datatypes_root)
if not dirpath.endswith("__pycache__")
]
class HierarchyTest(TestCase):
def test_construct_datatypes(self):
for datatypes_module in datatype_modules:
module = importlib.import_module(datatypes_module)
for name in getattr(module, "__all__", []):
if name.startswith("_") or name[0].islower() or name == "FigureWidget":
continue
obj = getattr(module, name)
try:
v = obj()
except Exception:
print(
"Failed to construct {obj} in module {module}".format(
obj=obj, module=datatypes_module
)
)
raise
if obj.__module__ == "new_plotly.graph_objs._deprecations":
self.assertTrue(isinstance(v, list) or isinstance(v, dict))
obj()
elif name in ("Figure", "FigureWidget"):
self.assertIsInstance(v, BaseFigure)
else:
self.assertIsInstance(v, BasePlotlyType)
| 2.296875 | 2 |
mycli/packages/special/main.py | lyrl/mycli | 10,997 | 6604 | <gh_stars>1000+
import logging
from collections import namedtuple
from . import export
log = logging.getLogger(__name__)
NO_QUERY = 0
PARSED_QUERY = 1
RAW_QUERY = 2
SpecialCommand = namedtuple('SpecialCommand',
['handler', 'command', 'shortcut', 'description', 'arg_type', 'hidden',
'case_sensitive'])
COMMANDS = {}
@export
class CommandNotFound(Exception):
pass
@export
def parse_special_command(sql):
command, _, arg = sql.partition(' ')
verbose = '+' in command
command = command.strip().replace('+', '')
return (command, verbose, arg.strip())
@export
def special_command(command, shortcut, description, arg_type=PARSED_QUERY,
hidden=False, case_sensitive=False, aliases=()):
def wrapper(wrapped):
register_special_command(wrapped, command, shortcut, description,
arg_type, hidden, case_sensitive, aliases)
return wrapped
return wrapper
@export
def register_special_command(handler, command, shortcut, description,
arg_type=PARSED_QUERY, hidden=False, case_sensitive=False, aliases=()):
cmd = command.lower() if not case_sensitive else command
COMMANDS[cmd] = SpecialCommand(handler, command, shortcut, description,
arg_type, hidden, case_sensitive)
for alias in aliases:
cmd = alias.lower() if not case_sensitive else alias
COMMANDS[cmd] = SpecialCommand(handler, command, shortcut, description,
arg_type, case_sensitive=case_sensitive,
hidden=True)
@export
def execute(cur, sql):
"""Execute a special command and return the results. If the special command
is not supported a KeyError will be raised.
"""
command, verbose, arg = parse_special_command(sql)
if (command not in COMMANDS) and (command.lower() not in COMMANDS):
raise CommandNotFound
try:
special_cmd = COMMANDS[command]
except KeyError:
special_cmd = COMMANDS[command.lower()]
if special_cmd.case_sensitive:
raise CommandNotFound('Command not found: %s' % command)
# "help <SQL KEYWORD> is a special case. We want built-in help, not
# mycli help here.
if command == 'help' and arg:
return show_keyword_help(cur=cur, arg=arg)
if special_cmd.arg_type == NO_QUERY:
return special_cmd.handler()
elif special_cmd.arg_type == PARSED_QUERY:
return special_cmd.handler(cur=cur, arg=arg, verbose=verbose)
elif special_cmd.arg_type == RAW_QUERY:
return special_cmd.handler(cur=cur, query=sql)
@special_command('help', '\\?', 'Show this help.', arg_type=NO_QUERY, aliases=('\\?', '?'))
def show_help(): # All the parameters are ignored.
headers = ['Command', 'Shortcut', 'Description']
result = []
for _, value in sorted(COMMANDS.items()):
if not value.hidden:
result.append((value.command, value.shortcut, value.description))
return [(None, result, headers, None)]
def show_keyword_help(cur, arg):
"""
Call the built-in "show <command>", to display help for an SQL keyword.
:param cur: cursor
:param arg: string
:return: list
"""
keyword = arg.strip('"').strip("'")
query = "help '{0}'".format(keyword)
log.debug(query)
cur.execute(query)
if cur.description and cur.rowcount > 0:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, '')]
else:
return [(None, None, None, 'No help found for {0}.'.format(keyword))]
@special_command('exit', '\\q', 'Exit.', arg_type=NO_QUERY, aliases=('\\q', ))
@special_command('quit', '\\q', 'Quit.', arg_type=NO_QUERY)
def quit(*_args):
raise EOFError
@special_command('\\e', '\\e', 'Edit command with editor (uses $EDITOR).',
arg_type=NO_QUERY, case_sensitive=True)
@special_command('\\clip', '\\clip', 'Copy query to the system clipboard.',
arg_type=NO_QUERY, case_sensitive=True)
@special_command('\\G', '\\G', 'Display current query results vertically.',
arg_type=NO_QUERY, case_sensitive=True)
def stub():
raise NotImplementedError
| 2.359375 | 2 |
core/sample_fuzzer/data_generators/base.py | ShreyasTheOne/Super-Duper-Fuzzer | 0 | 6605 | <filename>core/sample_fuzzer/data_generators/base.py
from abc import ABC, abstractmethod
class BaseDataGenerator(ABC):
def __init__(self):
pass
@staticmethod
@abstractmethod
def generate(cls):
pass
| 2.359375 | 2 |
Mon_08_06/convert2.py | TungTNg/itc110_python | 0 | 6606 | # convert2.py
# A program to convert Celsius temps to Fahrenheit.
# This version issues heat and cold warnings.
def main():
celsius = float(input("What is the Celsius temperature? "))
fahrenheit = 9 / 5 * celsius + 32
print("The temperature is", fahrenheit, "degrees fahrenheit.")
if fahrenheit >= 90:
print("It's really hot out there, be careful!")
if fahrenheit <= 30:
print("Brrrrr. Be sure to dress warmly")
main() | 4.1875 | 4 |
homeassistant/components/wolflink/__init__.py | basicpail/core | 11 | 6607 | """The Wolf SmartSet Service integration."""
from datetime import timedelta
import logging
from httpx import ConnectError, ConnectTimeout
from wolf_smartset.token_auth import InvalidAuth
from wolf_smartset.wolf_client import FetchFailed, ParameterReadError, WolfClient
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
COORDINATOR,
DEVICE_GATEWAY,
DEVICE_ID,
DEVICE_NAME,
DOMAIN,
PARAMETERS,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Wolf SmartSet Service from a config entry."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
device_name = entry.data[DEVICE_NAME]
device_id = entry.data[DEVICE_ID]
gateway_id = entry.data[DEVICE_GATEWAY]
refetch_parameters = False
_LOGGER.debug(
"Setting up wolflink integration for device: %s (ID: %s, gateway: %s)",
device_name,
device_id,
gateway_id,
)
wolf_client = WolfClient(username, password)
parameters = await fetch_parameters_init(wolf_client, gateway_id, device_id)
async def async_update_data():
"""Update all stored entities for Wolf SmartSet."""
try:
nonlocal refetch_parameters
nonlocal parameters
await wolf_client.update_session()
if not wolf_client.fetch_system_state_list(device_id, gateway_id):
refetch_parameters = True
raise UpdateFailed(
"Could not fetch values from server because device is Offline."
)
if refetch_parameters:
parameters = await fetch_parameters(wolf_client, gateway_id, device_id)
hass.data[DOMAIN][entry.entry_id][PARAMETERS] = parameters
refetch_parameters = False
values = {
v.value_id: v.value
for v in await wolf_client.fetch_value(
gateway_id, device_id, parameters
)
}
return {
parameter.parameter_id: (
parameter.value_id,
values[parameter.value_id],
)
for parameter in parameters
if parameter.value_id in values
}
except ConnectError as exception:
raise UpdateFailed(
f"Error communicating with API: {exception}"
) from exception
except FetchFailed as exception:
raise UpdateFailed(
f"Could not fetch values from server due to: {exception}"
) from exception
except ParameterReadError as exception:
refetch_parameters = True
raise UpdateFailed(
"Could not fetch values for parameter. Refreshing value IDs."
) from exception
except InvalidAuth as exception:
raise UpdateFailed("Invalid authentication during update.") from exception
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=DOMAIN,
update_method=async_update_data,
update_interval=timedelta(minutes=1),
)
await coordinator.async_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {}
hass.data[DOMAIN][entry.entry_id][PARAMETERS] = parameters
hass.data[DOMAIN][entry.entry_id][COORDINATOR] = coordinator
hass.data[DOMAIN][entry.entry_id][DEVICE_ID] = device_id
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def fetch_parameters(client: WolfClient, gateway_id: int, device_id: int):
"""
Fetch all available parameters with usage of WolfClient.
By default Reglertyp entity is removed because API will not provide value for this parameter.
"""
fetched_parameters = await client.fetch_parameters(gateway_id, device_id)
return [param for param in fetched_parameters if param.name != "Reglertyp"]
async def fetch_parameters_init(client: WolfClient, gateway_id: int, device_id: int):
"""Fetch all available parameters with usage of WolfClient but handles all exceptions and results in ConfigEntryNotReady."""
try:
return await fetch_parameters(client, gateway_id, device_id)
except (ConnectError, ConnectTimeout, FetchFailed) as exception:
raise ConfigEntryNotReady(
f"Error communicating with API: {exception}"
) from exception
| 1.960938 | 2 |
src/levenshtein_distance.py | chunribu/python-algorithms | 0 | 6608 | <gh_stars>0
class LevenshteinDistance:
def solve(self, str_a, str_b):
a, b = str_a, str_b
dist = {(x,y):0 for x in range(len(a)) for y in range(len(b))}
for x in range(len(a)): dist[(x,-1)] = x+1
for y in range(len(b)): dist[(-1,y)] = y+1
dist[(-1,-1)] = 0
for i in range(len(a)):
for j in range(len(b)):
need_edit = a[i]!=b[j]
last_edits = min(dist[(i,j-1)], dist[(i-1,j)], dist[(i-1,j-1)])
dist[(i,j)] = last_edits + int(need_edit)
self.distance = dist
return dist[(i,j)]
def show(self):
if hasattr(self, 'distance'):
dist = self.distance
for x in range(-1,len(a)):
row = []
for y in range(-1, len(b)):
row.append(dist[(x,y)])
print(row)
# test
ld = LevenshteinDistance()
ld.solve('kitten','sitting')
ld.show() | 3.0625 | 3 |
pyapprox/benchmarks/test_spectral_diffusion.py | ConnectedSystems/pyapprox | 26 | 6609 | <gh_stars>10-100
import numpy as np
import unittest
from pyapprox.benchmarks.spectral_diffusion import (
kronecker_product_2d, chebyshev_derivative_matrix,
SteadyStateDiffusionEquation2D, SteadyStateDiffusionEquation1D
)
from pyapprox.univariate_polynomials.quadrature import gauss_jacobi_pts_wts_1D
import pyapprox as pya
class TestSpectralDiffusion2D(unittest.TestCase):
def setUp(self):
np.random.seed(1)
self.eps = 2 * np.finfo(np.float).eps
def test_derivative_matrix(self):
order = 4
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0., 0.0]
xlim = [-1, 1]
model.initialize(order, bndry_cond, xlim)
derivative_matrix = model.get_derivative_matrix()
true_matrix = \
[[5.5, -6.82842712, 2., -1.17157288, 0.5],
[1.70710678, -0.70710678, -1.41421356, 0.70710678, -0.29289322],
[-0.5, 1.41421356, -0., -1.41421356, 0.5],
[0.29289322, -0.70710678, 1.41421356, 0.70710678, -1.70710678],
[-0.5, 1.17157288, -2., 6.82842712, -5.5]]
# I return points and calculate derivatives using reverse order of
# points compared to what is used by Matlab cheb function thus the
# derivative matrix I return will be the negative of the matlab version
assert np.allclose(-derivative_matrix, true_matrix)
def test_homogeneous_possion_equation(self):
"""
solve u(x)'' = 0, u(0) = 0, u(1) = 0.5
"""
order = 4
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.5]
xlim = [0, 1]
model.initialize(order, bndry_cond, xlim)
mesh_pts = model.get_collocation_points()
diff_vals = 0*mesh_pts.squeeze()+1
forcing_vals = 0*mesh_pts.squeeze()
solution = model.solve(diff_vals, forcing_vals)
def exact_sol(x): return 0.5*x
assert np.linalg.norm(exact_sol(mesh_pts.squeeze())-solution) < 20*self.eps
def test_inhomogeneous_possion_equation(self):
"""
solve u(x)'' = -1, u(0) = 0, u(1) = 1
solution u(x) = -0.5*(x-3.)*x
"""
order = 4
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 1.0]
xlim = [0, 1]
model.initialize(order, bndry_cond, xlim)
mesh_pts = model.get_collocation_points()
diff_vals = 0*mesh_pts.squeeze()+1
forcing_vals = 0*mesh_pts.squeeze()-1
solution = model.solve(diff_vals, forcing_vals)
def exact_sol(x): return -0.5*(x-3.)*x
assert np.linalg.norm(
exact_sol(mesh_pts.squeeze())-solution) < 30*self.eps
def test_inhomogeneous_diffusion_equation_with_variable_coefficient(self):
"""
solve ((1+x)*u(x)')' = -1, u(0) = 0, u(1) = 0
solution u(x) = log(x+1)/log(2) - x
"""
order = 20
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.0]
xlim = [0, 1]
model.initialize(order, bndry_cond, xlim)
mesh_pts = model.get_collocation_points()
def diffusivity_function(x): return x + 1
diff_vals = diffusivity_function(mesh_pts.squeeze())
forcing_vals = 0*mesh_pts.squeeze()-1
solution = model.solve(diff_vals, forcing_vals)
def exact_sol(x): return np.log(x+1.) / np.log(2.) - x
assert np.linalg.norm(exact_sol(mesh_pts.squeeze())-solution) < 3e-13
def test_integrate_1d(self):
order = 4
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.0]
xlim = [0, 1]
model.initialize(order, bndry_cond, xlim)
mesh_pts = model.get_collocation_points()
assert np.allclose(model.integrate(mesh_pts.T**2), 1./3.)
assert np.allclose(model.integrate(mesh_pts.T**3), 1./4.)
order = 4
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.0]
xlim = [-1, 1]
model.initialize(order, bndry_cond, xlim)
mesh_pts = model.get_collocation_points()
assert np.allclose(model.integrate(mesh_pts.T**2), 2./3.)
assert np.allclose(model.integrate(mesh_pts.T**3), 0.)
def test_evaluate(self):
"""
for the PDE ((1+z*x)*u(x)')' = -1, u(0) = 0, u(1) = 0
use model.evaluate to extract QoI
"""
order = 20
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.0]
xlim = [0, 1]
model.initialize(order, bndry_cond, xlim)
model.diffusivity_function = lambda x, z: z*x + 1.
model.forcing_function = lambda x, z: 0*x-1
qoi_coords = np.array([0.05, 0.5, 0.95])
model.qoi_functional = lambda x: model.interpolate(x, qoi_coords)[:, 0]
sample = np.ones((1, 1), float)
qoi = model(sample)
assert np.allclose(np.log(qoi_coords+1.)/np.log(2.)-qoi_coords, qoi)
sample = 0.5*np.ones((1, 1), float)
qoi = model(sample)
assert np.allclose(
-(qoi_coords*np.log(9./4.)-2.*np.log(qoi_coords+2.) +
np.log(4.))/np.log(3./2.), qoi)
def test_evaluate_gradient_1d(self):
"""
for the PDE ((1+sum(z^2)*x)*u(x)')' = -2, u(0) = 0, u(1) = 1
use model.evaluate_gradient to evaluate the gradient of the QoI
with respect to the random parameter vector z.
The QoI is the intergral of the solution over the entire domain
The adjoint rhs is then just 1.
"""
order = 20
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.0]
xlim = [0, 1]
model.initialize(order, bndry_cond, xlim)
model.diffusivity_function = lambda x, z: (z[0]**2+z[1]**2)*x + 1.
model.forcing_function = lambda x, z: 0*x-2
sample = np.random.RandomState(2).uniform(-1, 1, (2, 1))
model.diffusivity_derivs_function = \
lambda x, z, i: np.array([2.*x*z[i]]).T
model.forcing_derivs_function = \
lambda x, z, i: np.array([0.*x]).T
model(sample)
# evaluate_gradient has to be called before any more calls to
# model.solve with different parameters, because we need to
# access self.fwd_solution, which will change with any subsuquent calls
errors = pya.check_gradients(
model, lambda x: model.evaluate_gradient(x[:, 0]), sample)
errors = errors[np.isfinite(errors)]
assert errors.max() > 0.1 and errors.min() <= 6e-7
@unittest.skip("Not fully implemented")
def test_compute_error_estimate(self):
"""
for the PDE ((1+z*x)*u(x)')' = -1, u(0) = 0, u(1) = 0
use model.compute_error_estomate to compute an error estimate of
the deterministic error in the foward solution.
The QoI is the intergral of the solution over the entire domain
The adjoint rhs is then just 1.
"""
order = 5
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.0]
xlim = [0, 1]
model.initialize(order, bndry_cond, xlim)
model.diffusivity_function = lambda x, z: z[0]*x + 1.
model.forcing_function = lambda x, z: 0.*x-1.
sample = np.ones((1, 1), float)
qoi = model(sample)
error_estimate = model.compute_error_estimate(sample)
solution = model.run(sample[:, 0])
def exact_solution(x): return np.log(x+1.)/np.log(2.)-x
gl_pts, gl_wts = gauss_jacobi_pts_wts_1D(50, 0, 0)
x_range = model.xlim[1]-model.xlim[0]
gl_pts = x_range*(gl_pts+1.)/2.+model.xlim[0]
gl_wts *= x_range
gl_vals = exact_solution(gl_pts)
exact_qoi = np.dot(gl_vals, gl_wts)
exact_error = abs(exact_qoi-qoi)
print('err estimate', error_estimate)
print('exact err', exact_error)
print('effectivity ratio', error_estimate / exact_error)
# should be very close to 1. As adjoint order is increased
# it will converge to 1
sample = 0.5*np.ones((1), float)
qoi = model.evaluate(sample)
exact_solution = -(model.mesh_pts*np.log(9./4.) -
2.*np.log(model.mesh_pts+2.) +
np.log(4.))/np.log(3./2.)
exact_qoi = model.qoi_functional(exact_solution)
error = abs(exact_qoi-qoi)
error_estimate = model.compute_error_estimate(sample)
print(error_estimate, error)
# print model.integrate( (exact_solution - solution )**2 )
assert np.allclose(error_estimate, error)
def test_timestepping_without_forcing(self):
r"""
solve u_t(x,t) = u_xx(x,t), u(-1,t) = 0, u(1,t) = 0,
u(x,0) = \sin(\pi*x)
Exact solution
u(x,t) = \exp(-\pi^2t)*sin(\pi*x)
"""
order = 16
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.0]
xlim = [-1, 1]
model.initialize(order, bndry_cond, xlim)
model.diffusivity_function = lambda x, z: 0*x + 1.
model.forcing_function = lambda x, t, z: 0*x
sample = np.ones((1), float) # dummy argument for this example
model.num_time_steps = 1000
model.initial_sol = np.sin(np.pi*model.mesh_pts)
model.time_step_size = 1e-4
model.time_step_method = 'adams-moulton-3'
# model.time_step_method = 'crank-nicholson'
model.time_step_method = 'backward-euler'
model.num_stored_timesteps = 100
solution = model.transient_solve(sample)
def exact_sol(x, t): return np.exp(-np.pi**2*t)*np.sin(np.pi*x)
test_mesh_pts = np.linspace(xlim[0], xlim[1], 100)
plot = False # True
for i, t in enumerate(model.times):
if plot:
exact_sol_t = exact_sol(test_mesh_pts, t)
model_sol_t = model.interpolate(solution[:, i], test_mesh_pts)
pya.plt.plot(test_mesh_pts, model_sol_t, 'k',
label='collocation', linewidth=2)
pya.plt.plot(test_mesh_pts, exact_sol_t,
'r--', label='exact', linewidth=2)
pya.plt.legend(loc=0)
pya.plt.title('$t=%1.2f$' % t)
pya.plt.show()
L2_error = np.sqrt(model.integrate(
(exact_sol(model.mesh_pts, t)-solution[:, i])**2))
factor = np.sqrt(
model.integrate(exact_sol(model.mesh_pts, t)**2))
# print L2_error, 1e-3*factor
assert L2_error < 1e-3*factor
def test_timestepping_with_time_independent_forcing(self):
r"""
solve u_t(x,t) = u_xx(x,t)+sin(3\pi x), u(0,t) = 0, u(1,t) = 0,
u(x,0) = 5\sin(2\pi x)+2\sin(3\pi x)
Exact solution
u(x,t) = 5\exp(-4\pi^2t)*sin(2\pi*x)+(2\exp(-9\pi^2t)+(1-\exp(-9\pi^2t))/(9\pi^2))*\sin(3\pi x)
"""
order = 32
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.0]
xlim = [0, 1]
model.initialize(order, bndry_cond, xlim)
model.diffusivity_function = lambda x, z: 0*x + 1.
model.forcing_function = lambda x, t, z: np.sin(3*np.pi*x)
sample = np.ones((1), float) # dummy argument for this example
model.num_time_steps = 10000
model.initial_sol = 5*np.sin(2*np.pi*model.mesh_pts) + \
2*np.sin(3*np.pi*model.mesh_pts)
model.time_step_size = 1e-4
# model.time_step_method = 'adams-moulton-3'
model.time_step_method = 'crank-nicholson'
# model.time_step_method = 'backward-euler'
model.num_stored_timesteps = 100
solution = model.transient_solve(sample)
def exact_sol(x, t): return 5.*np.exp(-4.*np.pi**2*t)*np.sin(2.*np.pi*x) + \
(2.*np.exp(-9.*np.pi**2*t)+(1.-np.exp(-9.*np.pi**2*t))/(9.*np.pi**2))*np.sin(3.*np.pi*x)
# test_mesh_pts = np.linspace(xlim[0], xlim[1], 100)
for i, t in enumerate(model.times):
# exact_sol_t = exact_sol(test_mesh_pts,t)
# model_sol_t = model.interpolate(solution[:,i],test_mesh_pts)
# pya.plt.plot(test_mesh_pts,model_sol_t,'k',label='collocation',linewidth=2)
# pya.plt.plot(test_mesh_pts,exact_sol_t,'r--',label='exact',linewidth=2)
# pya.plt.legend(loc=0)
# pya.plt.title('$t=%1.2f$'%t)
# pya.plt.show()
L2_error = np.sqrt(model.integrate(
(exact_sol(model.mesh_pts, t)-solution[:, i])**2))
factor = np.sqrt(
model.integrate(exact_sol(model.mesh_pts, t)**2))
# print(L2_error, 1e-4*factor)
assert L2_error < 1e-4*factor
def test_timestepping_with_time_dependent_forcing(self):
r"""
solve u_t(x,t) = u_xx(x,t)+np.sin(3\pi x)*np.sin(t), u(0,t) = 0, u(1,t) = 0,
u(x,0) = 5sin(2\pi x)+2sin(3\pi x)
Exact solution
u(x,t) = 5\exp(-4\pi^2t)*np.sin(2\pi*x)+(2\exp(-9\pi^2t)+\exp(-9\pi^2t)(9\pi^2sin(t)-cos(t)+\exp(-9\pi^2t))/(1+81\pi^4))*sin(3\pi x)
"""
order = 32
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.0]
xlim = [0, 1]
model.initialize(order, bndry_cond, xlim)
model.diffusivity_function = lambda x, z: 0*x + 1.
model.forcing_function = lambda x, t, z: np.sin(3*np.pi*x)*np.sin(t)
sample = np.ones((1), float) # dummy argument for this example
model.num_time_steps = int(1e4)
model.initial_sol = 5*np.sin(2*np.pi*model.mesh_pts) + \
2*np.sin(3*np.pi*model.mesh_pts)
model.time_step_size = 1e-4
model.num_stored_timesteps = 100
# model.time_step_method = 'adams-moulton-3'
model.time_step_method = 'crank-nicholson'
# model.time_step_method = 'backward-euler'
# model.time_step_method = 'RK4'
solution = model.transient_solve(sample)
def exact_sol(x, t): return 5.*np.exp(
-4.*np.pi**2*t)*np.sin(2.*np.pi*x)+(
2.*np.exp(-9.*np.pi**2*t)+(
9.*np.pi**2*np.sin(t)-np.cos(t) +
np.exp(-9.*np.pi**2*t))/(1+81.*np.pi**4))*np.sin(
3.*np.pi*x)
test_mesh_pts = np.linspace(xlim[0], xlim[1], 100)
plot = False
for i, t in enumerate(model.times):
if plot:
exact_sol_t = exact_sol(test_mesh_pts, t)
model_sol_t = model.interpolate(solution[:, i], test_mesh_pts)
pya.plt.plot(test_mesh_pts, model_sol_t, 'k',
label='collocation', linewidth=2)
pya.plt.plot(test_mesh_pts, exact_sol_t, 'r--', label='exact',
linewidth=2)
pya.plt.legend(loc=0)
pya.plt.title('$t=%1.3f$' % t)
pya.plt.show()
L2_error = np.sqrt(model.integrate(
(exact_sol(model.mesh_pts, t)-solution[:, i])**2))
factor = np.sqrt(
model.integrate(exact_sol(model.mesh_pts, t)**2))
# print(L2_error, 1e-4*factor)
assert L2_error < 1e-4*factor
# print('time %1.2e: L2 error %1.2e' % (t, L2_error))
def test_convergence(self):
order = 8 # 1e-5
# order = 16 #1e-11
order = 20 # 2e-15
model = SteadyStateDiffusionEquation1D()
bndry_cond = [0.0, 0.0]
xlim = [0, 1]
model.initialize(order, bndry_cond, xlim)
model.diffusivity_function = lambda x, z: 0*x + 1.
model.forcing_function = lambda x, t, z: np.sin(3*np.pi*x)*np.sin(t)
sample = np.ones((1), float) # dummy argument for this example
model.initial_sol = 5*np.sin(2*np.pi*model.mesh_pts) + \
2*np.sin(3*np.pi*model.mesh_pts)
final_time = 1.
model.time_step_size = 1e-2
model.num_stored_timesteps = 1
# model.time_step_method = 'crank-nicholson'
# model.time_step_method = 'backward-euler'
# model.time_step_method = 'RK4' needs bug fixes and testing
def exact_sol(x, t): return 5.*np.exp(
-4.*np.pi**2*t)*np.sin(2.*np.pi*x)+(2.*np.exp(-9.*np.pi**2*t) + (
9.*np.pi**2*np.sin(t)-np.cos(t)+np.exp(-9.*np.pi**2*t))/(1+81.*np.pi**4))*np.sin(3.*np.pi*x)
# test_mesh_pts = np.linspace(xlim[0], xlim[1], 1000)
num_convergence_steps = 4
errors = np.empty((num_convergence_steps), float)
time_step_sizes = np.empty((num_convergence_steps), float)
num_time_steps = np.empty((num_convergence_steps), float)
for i in range(num_convergence_steps):
model.num_time_steps = int(
np.ceil(final_time/model.time_step_size))
solution = model.transient_solve(sample)
assert np.allclose(model.times[0], final_time, atol=1e-15)
L2_error = np.sqrt(model.integrate(
(exact_sol(model.mesh_pts, final_time)-solution[:, 0])**2))
# interpolated_sol = model.interpolate(exact_sol(model.mesh_pts,final_time),test_mesh_pts)
# print(np.linalg.norm(exact_sol(test_mesh_pts,final_time)-interpolated_sol)/np.sqrt(interpolated_sol.shape[0]))
# print(model.num_time_steps, L2_error)
errors[i] = L2_error
time_step_sizes[i] = model.time_step_size
num_time_steps[i] = model.num_time_steps
model.time_step_size /= 2
# print(errors)
conv_rate = -np.log10(errors[-1]/errors[0])/np.log10(
num_time_steps[-1]/num_time_steps[0])
assert np.allclose(conv_rate, 2, atol=1e-4)
# pya.plt.loglog(
# num_time_steps, errors, 'o-r',
# label=r'$\lVert u(x,T)-\hat{u}(x,T)\\rVert_{\ell_2(D)}$',
# linewidth=2)
# # print errors[0]*num_time_steps[0]/num_time_steps
# order = 1
# pya.plt.loglog(
# num_time_steps,
# errors[0]*num_time_steps[0]**order/num_time_steps**order,
# 'o--', label=r'$(\Delta t)^{-%d}$' % order, linewidth=2)
# order = 2
# pya.plt.loglog(
# num_time_steps,
# errors[0]*num_time_steps[0]**order/num_time_steps**order,
# 'o--', label=r'$(\Delta t)^{-%d}$' % order, linewidth=2)
# pya.plt.legend(loc=0)
# pya.plt.show()
def test_inhomogeneous_diffusion_equation_2d_variable_coefficient(self):
"""
wolfram alpha z random variable x and w are spatial dimension
d/dx 16*exp(-z^2)*(x^2-1/4)*(w^2-1/4)
d/dx (1+t/pi^2*z*cos(pi/2*(x^2+w^2)))*32*(w^2-1/4)*x*exp(-z^2)
Peter zaspels thesis is wrong it is 1 = sigma * not 1 + sigma +
"""
sigma = 1
num_dims = 1
order = 16
model = SteadyStateDiffusionEquation2D()
lims = [-0.5, 0.5, -0.5, 0.5]
bndry_cond = [0., 0.]
model.initialize(order, bndry_cond, lims)
def forcing_function(x, y): return \
32.*(1.+sigma*y[0]*sigma*np.cos(np.pi/2.*(x[0, :]**2+x[1, :]**2))/np.pi**2) * \
np.exp(-y[0]**2)*(x[0, :]**2+x[1, :]**2-0.5) -\
32./np.pi*y[0]*sigma*np.sin(np.pi/2.*(x[0, :]**2+x[1, :]**2)) *\
(x[0, :]**2 * np.exp(-y[0]**2)*(x[1, :]**2-0.25)+x[1, :]**2 *
np.exp(-y[0]**2)*(x[0, :]**2-0.25))
def diffusivity_function(x, y):
return 1.+sigma/np.pi**2*y[0]*np.cos(
np.pi/2.*(x[0, :]**2+x[1, :]**2))
# only well posed if |y| < pi^2/sigma
def exact_sol(x, y): return 16.*np.exp(-y**2) * \
(x[0, :]**2-0.25)*(x[1, :]**2-0.25)
rng = np.random.RandomState(1)
sample = rng.uniform(-np.sqrt(3), np.sqrt(3), (num_dims))
mesh_pts = model.get_collocation_points()
diff_vals = diffusivity_function(mesh_pts, sample)
forcing_vals = forcing_function(mesh_pts, sample)
solution = model.solve(diff_vals, forcing_vals)
# print np.linalg.norm(exact_sol( mesh_pts, sample )- solution )
assert np.linalg.norm(exact_sol(mesh_pts, sample) - solution) < 2.e-12
def test_2d_matlab_example(self):
"""
Example from Spectral methods in Matlab. Specifically program 16 on page
70 (90 PDF page number)
Solve Poisson eq. on [-1,1]x[-1,1] with u=0 on boundary
and forcing 10*np.sin(8*xx.*(yy-1))
true_solution at (xx,yy)=(1/np.sqrt(2),1/np.sqrt(2))= 0.32071594511
"""
num_dims = 10
order = 24
model = SteadyStateDiffusionEquation2D()
lims = [-1, 1, -1, 1]
bndry_cond = [0., 0.]
model.initialize(order, bndry_cond, lims)
def diffusivity(x, y): return np.ones(x.shape[1])
def forcing(x, y): return 10.*np.sin(8.*(x[0, :])*(x[1, :]-1))
rng = np.random.RandomState(1)
sample = rng.uniform(-1, 1., (num_dims))
mesh_pts = model.get_collocation_points()
diff_vals = diffusivity(mesh_pts, sample)
forcing_vals = forcing(mesh_pts, sample)
solution = model.solve(diff_vals, forcing_vals)
# because I used reverse order of chebyshev points
# and thus negative sign
# of derivative matrix the solution returned here will have different
# order to matlab which can be obtained by applying flipud(fliplr(x)),
# e.g. we can obtain the correct coordinates used in the example with
# index = np.arange((order+1)**2).reshape(
# (order+1, order+1))[3*order//4, 3*order//4]
# print(mesh_pts[:, index])
eval_samples = np.array([[1./np.sqrt(2), 1./np.sqrt(2)]]).T
qoi = model.interpolate(solution, eval_samples)
assert np.allclose(qoi, 0.32071594511)
def test_integrate_2d(self):
order = 4
model = SteadyStateDiffusionEquation2D()
bndry_cond = [0.0, 0.0]
lims = [0., 1., 0., 1.]
model.initialize(order, bndry_cond, lims)
mesh_pts = model.get_collocation_points()
assert np.allclose(
model.integrate(np.sum(mesh_pts**2, axis=0)[:, None]), 2./3.)
order = 4
model = SteadyStateDiffusionEquation2D()
bndry_cond = [0.0, 0.0]
lims = [-1., 1., -1., 1.]
model.initialize(order, bndry_cond, lims)
mesh_pts = model.get_collocation_points()
assert np.allclose(
model.integrate(np.sum(mesh_pts**2, axis=0)[:, None]), 8./3.)
def test_evaluate_gradient_2d(self):
"""
for the PDE ((1+sum(z^2)*x)*u(x)')' = -2, u(0) = 0, u(1) = 1
use model.evaluate_gradient to evaluate the gradient of the QoI
with respect to the random parameter vector z.
The QoI is the intergral of the solution over the entire domain
The adjoint rhs is then just 1.
"""
order = 20
model = SteadyStateDiffusionEquation2D()
lims = [0., 1., 0., 1.]
bndry_cond = [0., 0.]
model.initialize(order, bndry_cond, lims)
model.diffusivity_function = \
lambda x, z: (z[0]**2+z[1]**2)*(x[0]+x[1]) + 1.
model.forcing_function = lambda x, z: 0*x[0]-2
sample = np.random.RandomState(2).uniform(-1, 1, (2, 1))
model.diffusivity_derivs_function = \
lambda x, z, i: np.array([2.*(x[0]+x[1])*z[i]]).T
model.forcing_derivs_function = \
lambda x, z, i: np.array([0.*x[0]]).T
model(sample)
# evaluate_gradient has to be called before any more calls to
# model.solve with different parameters, because we need to
# access self.fwd_solution, which will change with any subsuquent calls
errors = pya.check_gradients(
model, lambda x: model.evaluate_gradient(x[:, 0]), sample)
errors = errors[np.isfinite(errors)]
assert errors.max() > 0.1 and errors.min() <= 4e-6
if __name__ == "__main__":
spectral_diffusion_test_suite = \
unittest.TestLoader().loadTestsFromTestCase(TestSpectralDiffusion2D)
unittest.TextTestRunner(verbosity=2).run(spectral_diffusion_test_suite)
| 2.203125 | 2 |
torchdrug/layers/flow.py | wconnell/torchdrug | 772 | 6610 | import torch
from torch import nn
from torch.nn import functional as F
from torchdrug import layers
class ConditionalFlow(nn.Module):
"""
Conditional flow transformation from `Masked Autoregressive Flow for Density Estimation`_.
.. _Masked Autoregressive Flow for Density Estimation:
https://arxiv.org/pdf/1705.07057.pdf
Parameters:
input_dim (int): input & output dimension
condition_dim (int): condition dimension
hidden_dims (list of int, optional): hidden dimensions
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, condition_dim, hidden_dims=None, activation="relu"):
super(ConditionalFlow, self).__init__()
self.input_dim = input_dim
self.output_dim = input_dim
if hidden_dims is None:
hidden_dims = []
self.mlp = layers.MLP(condition_dim, list(hidden_dims) + [input_dim * 2], activation)
self.rescale = nn.Parameter(torch.zeros(1))
def forward(self, input, condition):
"""
Transform data into latent representations.
Parameters:
input (Tensor): input representations
condition (Tensor): conditional representations
Returns:
(Tensor, Tensor): latent representations, log-likelihood of the transformation
"""
scale, bias = self.mlp(condition).chunk(2, dim=-1)
scale = (F.tanh(scale) * self.rescale)
output = (input + bias) * scale.exp()
log_det = scale
return output, log_det
def reverse(self, latent, condition):
"""
Transform latent representations into data.
Parameters:
latent (Tensor): latent representations
condition (Tensor): conditional representations
Returns:
(Tensor, Tensor): input representations, log-likelihood of the transformation
"""
scale, bias = self.mlp(condition).chunk(2, dim=-1)
scale = (F.tanh(scale) * self.rescale)
output = latent / scale.exp() - bias
log_det = scale
return output, log_det | 2.75 | 3 |
olctools/accessoryFunctions/metadataprinter.py | lowandrew/OLCTools | 1 | 6611 | <filename>olctools/accessoryFunctions/metadataprinter.py
#!/usr/bin/env python3
import logging
import json
import os
__author__ = 'adamkoziol'
class MetadataPrinter(object):
def printmetadata(self):
# Iterate through each sample in the analysis
for sample in self.metadata:
# Set the name of the json file
jsonfile = os.path.join(sample.general.outputdirectory, '{}_metadata.json'.format(sample.name))
try:
# Open the metadata file to write
with open(jsonfile, 'w') as metadatafile:
# Write the json dump of the object dump to the metadata file
json.dump(sample.dump(), metadatafile, sort_keys=True, indent=4, separators=(',', ': '))
except IOError:
# Print useful information in case of an error
logging.warning('Error creating .json file for {sample}'.format(sample=sample.name))
raise
except TypeError as e:
logging.debug(f'Encountered TypeError writing metadata to file with the following details: {e}')
def __init__(self, inputobject):
try:
self.metadata = inputobject.runmetadata.samples
except AttributeError:
try:
self.metadata = inputobject.metadata.samples
except AttributeError:
try:
self.metadata = inputobject.metadata
except AttributeError:
self.metadata = inputobject.runmetadata
self.printmetadata()
| 2.890625 | 3 |
mindware/estimators.py | aman-gupta-1995/Machine-Learning-Mindware | 27 | 6612 | import numpy as np
from sklearn.utils.multiclass import type_of_target
from mindware.base_estimator import BaseEstimator
from mindware.components.utils.constants import type_dict, MULTILABEL_CLS, IMG_CLS, TEXT_CLS, OBJECT_DET
from mindware.components.feature_engineering.transformation_graph import DataNode
class Classifier(BaseEstimator):
"""This class implements the classification task. """
def initialize(self, data: DataNode, **kwargs):
if self.metric is None:
self.metric = 'acc'
# Check the task type: {binary, multiclass}
task_type = type_of_target(data.data[1])
if task_type in type_dict:
task_type = type_dict[task_type]
else:
raise ValueError("Invalid Task Type: %s!" % task_type)
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data: DataNode, **kwargs):
"""
Fit the classifier to given training data.
:param data: instance of DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Predict classes for X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples]
The predicted classes.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def refit(self):
return super().refit()
def predict_proba(self, X, batch_size=None, n_jobs=1):
"""
Predict probabilities of classes for all samples X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples, n_classes]
The predicted class probabilities.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
pred_proba = super().predict_proba(X, batch_size=batch_size, n_jobs=n_jobs)
if self.task_type != MULTILABEL_CLS:
assert (
np.allclose(
np.sum(pred_proba, axis=1),
np.ones_like(pred_proba[:, 0]))
), "Prediction probability does not sum up to 1!"
# Check that all probability values lie between 0 and 1.
assert (
(pred_proba >= 0).all() and (pred_proba <= 1).all()
), "Found prediction probability value outside of [0, 1]!"
return pred_proba
def get_tree_importance(self, data: DataNode):
from lightgbm import LGBMClassifier
import pandas as pd
X, y = self.data_transformer(data).data
lgb = LGBMClassifier(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_importance(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import pandas as pd
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
std_array = np.std(_ef, ddof=1, axis=0)
abs_array = abs(_ef)
mean_array = np.mean(abs_array, axis=0)
_importance = std_array / mean_array
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_impact(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import pandas as pd
if (len(set(data.data[1]))) > 2:
print('ERROR! Only binary classification is supported!')
return 0
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
_impact = _ef[0]
h = {}
h['feature_id'] = np.array(range(len(_impact)))
h['feature_impact'] = _impact
return pd.DataFrame(h)
class Regressor(BaseEstimator):
"""This class implements the regression task. """
def initialize(self, data: DataNode, **kwargs):
self.metric = 'mse' if self.metric is None else self.metric
# Check the task type: {continuous}
task_type = type_dict['continuous']
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data, **kwargs):
"""
Fit the regressor to given training data.
:param data: DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Make predictions for X.
:param X: DataNode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples] or [n_samples, n_labels]
The predicted classes.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def get_tree_importance(self, data: DataNode):
from lightgbm import LGBMRegressor
import pandas as pd
X, y = self.data_transformer(data).data
lgb = LGBMRegressor(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_impact(self, data: DataNode):
from sklearn.linear_model import LinearRegression
import pandas as pd
X, y = self.data_transformer(data).data
reg = LinearRegression()
reg.fit(X, y)
_impact = reg.coef_
h = {}
h['feature_id'] = np.array(range(len(_impact)))
h['feature_impact'] = _impact
return pd.DataFrame(h)
| 2.46875 | 2 |
AnimeSpider/spiders/AinmeLinkList.py | xiaowenwen1995/AnimeSpider | 7 | 6613 | # -*- coding: utf-8 -*-
import scrapy
import json
import os
import codecs
from AnimeSpider.items import AnimespiderItem
class AinmelinklistSpider(scrapy.Spider):
name = 'AinmeLinkList'
allowed_domains = ['bilibili.com']
start_urls = ['http://bilibili.com/']
def start_requests(self):
jsonpath = os.path.dirname(__file__) + '/output'
jsonfile = codecs.open('%s/AinmeList_items.json' % jsonpath, 'r', encoding='utf-8')
for line in jsonfile:
ainme = json.loads(line)
ainmename = ainme["name"]
url = ainme["link"].replace("//", "https://")
yield scrapy.Request(url=url, callback=self.parse, meta={'ainmename': ainmename})
def parse(self, response):
item = AnimespiderItem()
item["info_link"] = response.css(".media-title").xpath('@href').get()
yield item
| 2.8125 | 3 |
Module 1/Chapter 7/prog1.py | PacktPublishing/Raspberry-Pi-Making-Amazing-Projects-Right-from-Scratch- | 3 | 6614 | <reponame>PacktPublishing/Raspberry-Pi-Making-Amazing-Projects-Right-from-Scratch-
import cv2
print cv2.__version__
| 1.101563 | 1 |
setup.py | darlenew/pytest-testplan | 0 | 6615 | """Setup for pytest-testplan plugin."""
from setuptools import setup
setup(
name='pytest-testplan',
version='0.1.0',
description='A pytest plugin to generate a CSV test report.',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
py_modules=['pytest_testplan'],
install_requires=['pytest'],
entry_points={'pytest11': ['testplan = pytest_testplan', ]},
)
| 1.289063 | 1 |
examples/industrial_quality_inspection/train_yolov3.py | petr-kalinin/PaddleX | 1 | 6616 | <reponame>petr-kalinin/PaddleX
# 环境变量配置,用于控制是否使用GPU
# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from paddlex.det import transforms
import paddlex as pdx
# 下载和解压铝材缺陷检测数据集
aluminum_dataset = 'https://bj.bcebos.com/paddlex/examples/industrial_quality_inspection/datasets/aluminum_inspection.tar.gz'
pdx.utils.download_and_decompress(aluminum_dataset, path='./')
# 定义训练和验证时的transforms
# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html
train_transforms = transforms.Compose([
transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(),
transforms.RandomExpand(), transforms.RandomCrop(), transforms.Resize(
target_size=608, interp='RANDOM'), transforms.RandomHorizontalFlip(),
transforms.Normalize()
])
eval_transforms = transforms.Compose([
transforms.Resize(
target_size=608, interp='CUBIC'), transforms.Normalize()
])
# 定义训练和验证所用的数据集
# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection
train_dataset = pdx.datasets.VOCDetection(
data_dir='aluminum_inspection',
file_list='aluminum_inspection/train_list.txt',
label_list='aluminum_inspection/labels.txt',
transforms=train_transforms,
shuffle=True)
eval_dataset = pdx.datasets.VOCDetection(
data_dir='aluminum_inspection',
file_list='aluminum_inspection/val_list.txt',
label_list='aluminum_inspection/labels.txt',
transforms=eval_transforms)
# 初始化模型,并进行训练
# 可使用VisualDL查看训练指标,参考https://paddlex.readthedocs.io/zh_CN/develop/train/visualdl.html
num_classes = len(train_dataset.labels)
# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3
model = pdx.det.YOLOv3(num_classes=num_classes, backbone='MobileNetV3_large')
# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#train
# 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html
model.train(
num_epochs=400,
train_dataset=train_dataset,
train_batch_size=8,
eval_dataset=eval_dataset,
warmup_steps=4000,
learning_rate=0.000125,
lr_decay_epochs=[240, 320],
save_dir='output/yolov3_mobilenetv3',
use_vdl=True)
| 2.296875 | 2 |
api/migrations/0004_auto_20210107_2032.py | bartoszper/Django-REST-API-movierater | 0 | 6617 | # Generated by Django 3.1.4 on 2021-01-07 19:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20210107_2010'),
]
operations = [
migrations.AlterField(
model_name='extrainfo',
name='rodzaj',
field=models.IntegerField(choices=[(2, 'Sci-Fi'), (0, 'Nieznany'), (5, 'Komedia'), (3, 'Dramat'), (1, 'Horror')], default=0),
),
migrations.CreateModel(
name='Recenzja',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('opis', models.TextField(default='')),
('gwizdki', models.IntegerField(default=5)),
('film', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.film')),
],
),
]
| 1.703125 | 2 |
wooey/migrations/0009_script_versioning.py | macdaliot/Wooey | 1 | 6618 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wooey.models.mixins
class Migration(migrations.Migration):
dependencies = [
('wooey', '0008_short_param_admin'),
]
operations = [
migrations.CreateModel(
name='ScriptVersion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('script_version', models.CharField(default='1', help_text='The script version.', max_length=50, blank=True)),
('script_iteration', models.PositiveSmallIntegerField(default=1)),
('script_path', models.FileField(upload_to=b'')),
('default_version', models.BooleanField(default=False)),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('script', models.ForeignKey(related_name='script_version_new', to='wooey.Script')),
],
bases=(wooey.models.mixins.ModelDiffMixin, wooey.models.mixins.WooeyPy2Mixin, models.Model),
),
migrations.AddField(
model_name='scriptparameter',
name='script_version',
field=models.ForeignKey(null=True, to='wooey.ScriptVersion'),
preserve_default=False,
),
migrations.AddField(
model_name='scriptparametergroup',
name='script_version',
field=models.ForeignKey(null=True, to='wooey.ScriptVersion'),
preserve_default=False,
),
migrations.AddField(
model_name='wooeyjob',
name='script_version',
field=models.ForeignKey(null=True, to='wooey.ScriptVersion'),
preserve_default=False,
),
]
| 1.765625 | 2 |
vendor/munkireport/firewall/scripts/firewall.py | menamegaly/MR | 0 | 6619 | #!/usr/bin/python
"""
Firewall for munkireport.
By Tuxudo
Will return all details about how the firewall is configured
"""
import subprocess
import os
import sys
import platform
import re
import plistlib
import json
sys.path.insert(0,'/usr/local/munki')
sys.path.insert(0, '/usr/local/munkireport')
from munkilib import FoundationPlist
def get_firewall_info():
'''Uses system profiler to get firewall info for the machine.'''
cmd = ['/usr/sbin/system_profiler', 'SPFirewallDataType', '-xml']
proc = subprocess.Popen(cmd, shell=False, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, unused_error) = proc.communicate()
try:
plist = plistlib.readPlistFromString(output)
# system_profiler xml is an array
firewall_dict = plist[0]
items = firewall_dict['_items']
return items
except Exception:
return {}
def flatten_firewall_info(array):
'''Un-nest firewall info, return array with objects with relevant keys'''
firewall = {}
for obj in array:
for item in obj:
if item == '_items':
out = out + flatten_firewall_info(obj['_items'])
elif item == 'spfirewall_services':
for service in obj[item]:
if obj[item][service] == "spfirewall_allow_all":
obj[item][service] = 1
else:
obj[item][service] = 0
firewall['services'] = json.dumps(obj[item])
elif item == 'spfirewall_applications':
for application in obj[item]:
if obj[item][application] == "spfirewall_allow_all":
obj[item][application] = 1
else:
obj[item][application] = 0
firewall['applications'] = json.dumps(obj[item])
return firewall
def get_alf_preferences():
pl = FoundationPlist.readPlist("/Library/Preferences/com.apple.alf.plist")
firewall = {}
for item in pl:
if item == 'allowdownloadsignedenabled':
firewall['allowdownloadsignedenabled'] = to_bool(pl[item])
elif item == 'allowsignedenabled':
firewall['allowsignedenabled'] = to_bool(pl[item])
elif item == 'firewallunload':
firewall['firewallunload'] = to_bool(pl[item])
elif item == 'globalstate':
firewall['globalstate'] = to_bool(pl[item])
elif item == 'stealthenabled':
firewall['stealthenabled'] = to_bool(pl[item])
elif item == 'loggingenabled':
firewall['loggingenabled'] = to_bool(pl[item])
elif item == 'loggingoption':
firewall['loggingoption'] = pl[item]
elif item == 'version':
firewall['version'] = pl[item]
return firewall
def to_bool(s):
if s == True:
return 1
else:
return 0
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
def main():
"""Main"""
# Skip manual check
if len(sys.argv) > 1:
if sys.argv[1] == 'manualcheck':
print 'Manual check: skipping'
exit(0)
# Create cache dir if it does not exist
cachedir = '%s/cache' % os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# Set the encoding
# The "ugly hack" :P
reload(sys)
sys.setdefaultencoding('utf8')
# Get results
result = dict()
info = get_firewall_info()
result = merge_two_dicts(flatten_firewall_info(info), get_alf_preferences())
# Write firewall results to cache
output_plist = os.path.join(cachedir, 'firewall.plist')
FoundationPlist.writePlist(result, output_plist)
#print FoundationPlist.writePlistToString(result)
if __name__ == "__main__":
main()
| 2.4375 | 2 |
cf_step/metrics.py | dpoulopoulos/cf_step | 25 | 6620 | <reponame>dpoulopoulos/cf_step<gh_stars>10-100
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/metrics.ipynb (unless otherwise specified).
__all__ = ['recall_at_k', 'precision_at_k']
# Cell
from typing import List
# Cell
def recall_at_k(predictions: List[int], targets: List[int], k: int = 10) -> float:
"""Computes `Recall@k` from the given predictions and targets sets."""
predictions_set = set(predictions[:k])
targets_set = set(targets)
result = len(targets_set & predictions_set) / float(len(targets_set))
return result
# Cell
def precision_at_k(predictions: List[int], targets: List[int], k: int = 10) -> float:
"""Computes `Precision@k` from the given predictions and targets sets."""
predictions_set = set(predictions[:k])
targets_set = set(targets)
result = len(targets_set & predictions_set) / float(len(predictions_set))
return result | 2.265625 | 2 |
bicycleparameters/period.py | sandertyu/Simple-Geometry-Plot | 20 | 6621 | <reponame>sandertyu/Simple-Geometry-Plot<filename>bicycleparameters/period.py
#!/usr/bin/env/ python
import os
from math import pi
import numpy as np
from numpy import ma
from scipy.optimize import leastsq
import matplotlib.pyplot as plt
from uncertainties import ufloat
# local modules
from .io import load_pendulum_mat_file
def average_rectified_sections(data):
'''Returns a slice of an oscillating data vector based on the max and min
of the mean of the sections created by retifiying the data.
Parameters
----------
data : ndarray, shape(n,)
Returns
-------
data : ndarray, shape(m,)
A slice where m is typically less than n.
Notes
-----
This is a function to try to handle the fact that some of the data from the
torsional pendulum had a beating like phenomena and we only want to select
a section of the data that doesn't seem to exhibit the phenomena.
'''
# subtract the mean so that there are zero crossings
meanSubData = data - np.mean(data)
# find the zero crossings
zeroCrossings = np.where(np.diff(np.sign(meanSubData)))[0]
# add a zero to the beginning
crossings = np.concatenate((np.array([0]), zeroCrossings))
# find the mean value of the rectified sections and the local indice
secMean = []
localMeanInd = []
for sec in np.split(np.abs(meanSubData), zeroCrossings):
localMeanInd.append(np.argmax(sec))
secMean.append(np.mean(sec))
meanInd = []
# make the global indices
for i, val in enumerate(crossings):
meanInd.append(val + localMeanInd[i])
# only take the top part of the data because some the zero crossings can be
# a lot at one point mainly due to the resolution of the daq box
threshold = np.mean(secMean)
secMeanOverThresh = []
indice = []
for i, val in enumerate(secMean):
if val > threshold:
secMeanOverThresh.append(val)
indice.append(meanInd[i])
# now return the data based on the max value and the min value
maxInd = indice[np.argmax(secMeanOverThresh)]
minInd = indice[np.argmin(secMeanOverThresh)]
return data[maxInd:minInd]
def calc_periods_for_files(directory, filenames, forkIsSplit):
'''Calculates the period for all filenames in directory.
Parameters
----------
directory : string
This is the path to the RawData directory.
filenames : list
List of all the mat file names in the RawData directory.
forkIsSplit : boolean
True if the fork is broken into a handlebar and fork and false if the
fork and handlebar was measured together.
Returns
-------
periods : dictionary
Contains all the periods for the mat files in the RawData directory.
'''
periods = {}
def pathParts(path):
'''Splits a path into a list of its parts.'''
components = []
while True:
(path,tail) = os.path.split(path)
if tail == "":
components.reverse()
return components
components.append(tail)
pathToRawDataParts = pathParts(directory)
pathToRawDataParts.pop()
pathToBicycleDir = os.path.join(pathToRawDataParts[0],
pathToRawDataParts[1],
pathToRawDataParts[2])
pathToPlotDir = os.path.join(pathToBicycleDir, 'Plots', 'PendulumFit')
# make sure there is a place to save the plots
if not os.path.exists(pathToPlotDir):
os.makedirs(pathToPlotDir)
for f in filenames:
print("Calculating the period for:", f)
# load the pendulum data
pathToMatFile = os.path.join(directory, f)
matData = load_pendulum_mat_file(pathToMatFile)
# generate a variable name for this period
periodKey = get_period_key(matData, forkIsSplit)
# calculate the period
sampleRate = get_sample_rate(matData)
pathToPlotFile = os.path.join(pathToPlotDir,
os.path.splitext(f)[0] + '.png')
period = get_period_from_truncated(matData['data'],
sampleRate,
pathToPlotFile)
print("The period is:", period, "\n")
# either append the the period or if it isn't there yet, then
# make a new list
try:
periods[periodKey].append(period)
except KeyError:
periods[periodKey] = [period]
# now average all the periods
for k, v in periods.items():
if k.startswith('T'):
periods[k] = np.mean(v)
return periods
def check_for_period(mp, forkIsSplit):
'''Returns whether the fork is split into two pieces and whether the period
calculations need to happen again.
Parameters
----------
mp : dictionary
Dictionary the measured parameters.
forkIsSplit : boolean
True if the fork is broken into a handlebar and fork and false if the
fork and handlebar was measured together.
Returns
-------
forcePeriodCalc : boolean
True if there wasn't enough period data in mp, false if there was.
forkIsSplit : boolean
True if the fork is broken into a handlebar and fork and false if the
fork and handlebar was measured together.
'''
forcePeriodCalc = False
#Check to see if mp contains at enough periods to not need
# recalculation
ncTSum = 0
ntTSum = 0
for key in mp.keys():
# check for any periods in the keys
if key[:2] == 'Tc':
ncTSum += 1
elif key[:2] == 'Tt':
ntTSum += 1
# if there isn't enough data then force the period cals again
if forkIsSplit:
if ncTSum < 5 or ntTSum < 11:
forcePeriodCalc = True
else:
if ncTSum < 4 or ntTSum < 8:
forcePeriodCalc = True
return forcePeriodCalc
def fit_goodness(ym, yp):
'''
Calculate the goodness of fit.
Parameters
----------
ym : ndarray, shape(n,)
The vector of measured values.
yp : ndarry, shape(n,)
The vector of predicted values.
Returns
-------
rsq : float
The r squared value of the fit.
SSE : float
The error sum of squares.
SST : float
The total sum of squares.
SSR : float
The regression sum of squares.
'''
SSR = np.sum((yp - np.mean(ym))**2)
SST = np.sum((ym - np.mean(ym))**2)
SSE = SST - SSR
rsq = SSR / SST
return rsq, SSE, SST, SSR
def get_period(data, sampleRate, pathToPlotFile):
'''Returns the period and uncertainty for data resembling a decaying
oscillation.
Parameters
----------
data : ndarray, shape(n,)
A time series that resembles a decaying oscillation.
sampleRate : int
The frequency that data was sampled at.
pathToPlotFile : string
A path to the file to print the plots.
Returns
-------
T : ufloat
The period of oscillation and its uncertainty.
'''
y = data
x = np.linspace(0., (len(y) - 1) / float(sampleRate), num=len(y))
def fitfunc(p, t):
'''Decaying oscillation function.'''
a = p[0]
b = np.exp(-p[3] * p[4] * t)
c = p[1] * np.sin(p[4] * np.sqrt(1 - p[3]**2) * t)
d = p[2] * np.cos(p[4] * np.sqrt(1 - p[3]**2) * t)
return a + b * (c + d)
# initial guesses
#p0 = np.array([1.35, -.5, -.75, 0.01, 3.93]) # guess from delft
#p0 = np.array([2.5, -.75, -.75, 0.001, 4.3]) # guess from ucd
p0 = make_guess(data, sampleRate) # tries to make a good guess
# create the error function
errfunc = lambda p, t, y: fitfunc(p, t) - y
# minimize the error function
p1, success = leastsq(errfunc, p0[:], args=(x, y))
lscurve = fitfunc(p1, x)
# find the uncertainty in the fit parameters
rsq, SSE, SST, SSR = fit_goodness(y, lscurve)
sigma = np.sqrt(SSE / (len(y) - len(p0)))
# calculate the jacobian
L = jac_fitfunc(p1, x)
# the Hessian
H = np.dot(L.T, L)
# the covariance matrix
U = sigma**2. * np.linalg.inv(H)
# the standard deviations
sigp = np.sqrt(U.diagonal())
# natural frequency
wo = ufloat(p1[4], sigp[4])
# damping ratio
zeta = ufloat(p1[3], sigp[3])
# damped natural frequency
wd = (1. - zeta**2.)**(1. / 2.) * wo
# damped natural frequency (hz)
fd = wd / 2. / pi
# period
T = 1. / fd
# plot the data and save it to file
fig = plt.figure()
plot_osfit(x, y, lscurve, p1, rsq, T, m=np.max(x), fig=fig)
plt.savefig(pathToPlotFile)
plt.close()
# return the period
return T
def get_period_from_truncated(data, sampleRate, pathToPlotFile):
#dataRec = average_rectified_sections(data)
dataRec = data
dataGood = select_good_data(dataRec, 0.1)
return get_period(dataGood, sampleRate, pathToPlotFile)
def get_period_key(matData, forkIsSplit):
'''Returns a dictionary key for the period entries.
Parameters
----------
matData : dictionary
The data imported from a pendulum mat file.
forkIsSplit : boolean
True if the fork is broken into a handlebar and fork and false if the
fork and handlebar was measured together.
Returns
-------
key : string
A key of the form 'T[pendulum][part][orientation]'. For example, if it
is the frame that was hung as a torsional pendulum at the second
orientation angle then the key would be 'TtB2'.
'''
# set up the subscripting for the period key
subscripts = {'Fwheel': 'F',
'Rwheel': 'R',
'Frame': 'B',
'Flywheel': 'D'}
# the Flywheel is for the gyro bike and it actually represents the front
# wheel and the flywheel as one rigid body. It was easier to measure the
# the inertia this way. So...the to get the actual flywheel inertia, one
# must subtract the inertia of the Fwheel, F, from the Flywheel, D.
if forkIsSplit:
subscripts['Fork'] = 'S'
subscripts['Handlebar'] = 'G'
else:
subscripts['Fork'] = 'H'
try:
subscripts[matData['rod']] = 'P'
except KeyError:
subscripts['Rod'] = 'P'
# used to convert word ordinals to numbers
ordinal = {'First' : '1',
'Second' : '2',
'Third' : '3',
'Fourth' : '4',
'Fifth' : '5',
'Sixth' : '6'}
try:
orienWord = matData['angleOrder']
except:
orienWord = matData['angle']
pend = matData['pendulum'][0].lower()
part = subscripts[matData['part']]
orienNum = ordinal[orienWord]
return 'T' + pend + part + orienNum
def get_sample_rate(matData):
'''Returns the sample rate for the data.'''
if 'ActualRate' in matData.keys():
sampleRate = matData['ActualRate']
else:
sampleRate = matData['sampleRate']
return sampleRate
def jac_fitfunc(p, t):
'''
Calculate the Jacobian of a decaying oscillation function.
Uses the analytical formulations of the partial derivatives.
Parameters
----------
p : the five parameters of the equation
t : time vector
Returns
-------
jac : The jacobian, the partial of the vector function with respect to the
parameters vector. A 5 x N matrix where N is the number of time steps.
'''
jac = np.zeros((len(p), len(t)))
e = np.exp(-p[3] * p[4] * t)
dampsq = np.sqrt(1 - p[3]**2)
s = np.sin(dampsq * p[4] * t)
c = np.cos(dampsq * p[4] * t)
jac[0] = np.ones_like(t)
jac[1] = e * s
jac[2] = e * c
jac[3] = (-p[4] * t * e * (p[1] * s + p[2] * c) + e * (-p[1] * p[3] * p[4]
* t / dampsq * c + p[2] * p[3] * p[4] * t / dampsq * s))
jac[4] = (-p[3] * t * e * (p[1] * s + p[2] * c) + e * dampsq * t * (p[1] *
c - p[2] * s))
return jac.T
def make_guess(data, sampleRate):
'''Returns a decent starting point for fitting the decaying oscillation
function.
'''
p = np.zeros(5)
# the first unknown is the shift along the y axis
p[0] = np.mean(data)
# work with the mean subtracted data from now on
data = data - p[0]
# what is the initial slope of the curve
if data[10] > data[0]:
slope = 1
else:
slope = -1
# the second is the amplitude for the sin function
p[1] = slope * np.max(data) / 2
# the third is the amplitude for the cos function
p[2] = slope * np.max(data)
# the fourth is the damping ratio and is typically small, 0.001 < zeta < 0.02
p[3] = 0.001
# the fifth is the undamped natural frequency
# first remove the data around zero
dataMasked = ma.masked_inside(data, -0.1, 0.1)
# find the zero crossings
zeroCrossings = np.where(np.diff(np.sign(dataMasked)))[0]
# remove redundant crossings
zero = []
for i, v in enumerate(zeroCrossings):
if abs(v - zeroCrossings[i - 1]) > 20:
zero.append(v)
# get the samples per period
samplesPerPeriod = 2*np.mean(np.diff(zero))
# now the frequency
p[4] = (samplesPerPeriod / float(sampleRate) /2. / pi)**-1
if np.isnan(p[4]):
p[4] = 4.
return p
def plot_osfit(t, ym, yf, p, rsq, T, m=None, fig=None):
'''Plot fitted data over the measured
Parameters
----------
t : ndarray (n,)
Measurement time in seconds
ym : ndarray (n,)
The measured voltage
yf : ndarray (n,)
p : ndarray (5,)
The fit parameters for the decaying osicallation fucntion
rsq : float
The r squared value of y (the fit)
T : float
The period
m : float
The maximum value to plot
Returns
-------
fig : the figure
'''
# figure properties
figwidth = 4. # in inches
goldenMean = (np.sqrt(5) - 1.0) / 2.0
figsize = [figwidth, figwidth * goldenMean]
params = {#'backend': 'ps',
'axes.labelsize': 8,
'axes.titlesize': 8,
'text.fontsize': 8,
'legend.fontsize': 8,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'text.usetex': True,
#'figure.figsize': figsize
}
if fig:
fig = fig
else:
fig = plt.figure(2)
fig.set_size_inches(figsize)
plt.rcParams.update(params)
ax1 = plt.axes([0.125, 0.125, 0.9-0.125, 0.65])
#if m == None:
#end = len(t)
#else:
#end = t[round(m/t[-1]*len(t))]
ax1.plot(t, ym, '.', markersize=2)
plt.plot(t, yf, 'k-')
plt.xlabel('Time [s]')
plt.ylabel('Amplitude [V]')
equation = r'$f(t)={0:1.2f}+e^{{-({3:1.3f})({4:1.1f})t}}\left[{1:1.2f}\sin{{\sqrt{{1-{3:1.3f}^2}}{4:1.1f}t}}+{2:1.2f}\cos{{\sqrt{{1-{3:1.3f}^2}}{4:1.1f}t}}\right]$'.format(p[0], p[1], p[2], p[3], p[4])
rsquare = '$r^2={0:1.3f}$'.format(rsq)
period = '$T={0} s$'.format(T)
plt.title(equation + '\n' + rsquare + ', ' + period)
plt.legend(['Measured', 'Fit'])
if m is not None:
plt.xlim((0, m))
else:
pass
return fig
def select_good_data(data, percent):
'''Returns a slice of the data from the index at maximum value to the index
at a percent of the maximum value.
Parameters
----------
data : ndarray, shape(1,)
This should be a decaying function.
percent : float
The percent of the maximum to clip.
This basically snips of the beginning and end of the data so that the super
damped tails are gone and also any weirdness at the beginning.
'''
meanSub = data - np.mean(data)
maxVal = np.max(np.abs(meanSub))
maxInd = np.argmax(np.abs(meanSub))
for i, v in reversed(list(enumerate(meanSub))):
if v > percent * maxVal:
minInd = i
break
return data[maxInd:minInd]
| 2.875 | 3 |
tectosaur2/analyze.py | tbenthompson/BIE_tutorials | 1 | 6622 | import time
import warnings
import matplotlib.pyplot as plt
import numpy as np
import sympy as sp
from .global_qbx import global_qbx_self
from .mesh import apply_interp_mat, gauss_rule, panelize_symbolic_surface, upsample
def find_dcutoff_refine(kernel, src, tol, plot=False):
# prep step 1: find d_cutoff and d_refine
# The goal is to estimate the error due to the QBX local patch
# The local surface will have singularities at the tips where it is cut off
# These singularities will cause error in the QBX expansion. We want to make
# the local patch large enough that these singularities are irrelevant.
# To isolate the QBX patch cutoff error, we will use a very high upsampling.
# We'll also choose p to be the minimum allowed value since that will result in
# the largest cutoff error. Increasing p will reduce the cutoff error guaranteeing that
# we never need to worry about cutoff error.
density = np.ones_like(src.pts[:, 0]) # np.cos(src.pts[:,0] * src.pts[:,1])
if plot:
plt.figure(figsize=(9, 13))
params = []
d_cutoffs = [1.1, 1.3, 1.6, 2.0]
ps = np.arange(1, 55, 3)
for di, direction in enumerate([-1.0, 1.0]):
baseline = global_qbx_self(kernel, src, p=30, kappa=8, direction=direction)
baseline_v = baseline.dot(density)
# Check that the local qbx method matches the simple global qbx approach when d_cutoff is very large
d_refine_high = 8.0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
local_baseline = kernel.integrate(
src.pts,
src,
d_cutoff=3.0,
tol=1e-20,
max_p=50,
d_refine=d_refine_high,
on_src_direction=direction,
)
local_baseline_v = local_baseline.dot(density)
err = np.max(np.abs(baseline_v - local_baseline_v))
print(err)
assert err < tol / 2
n_qbx_panels = []
drefine_optimal = []
p_for_full_accuracy = []
if plot:
plt.subplot(3, 2, 1 + di)
for i_d, d_cutoff in enumerate(d_cutoffs):
errs = []
for i_p, p in enumerate(ps):
# print(p, d_cutoff)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test, report = kernel.integrate(
src.pts,
src,
d_cutoff=d_cutoff,
tol=1e-15,
max_p=p,
on_src_direction=direction,
d_refine=d_refine_high,
return_report=True,
)
testv = test.dot(density)
err = np.max(np.abs(baseline_v - testv))
errs.append(err)
# print(p, err)
if err < tol:
for d_refine_decrease in np.arange(1.0, d_refine_high, 0.25):
refine_test, refine_report = kernel.integrate(
src.pts,
src,
d_cutoff=d_cutoff,
tol=1e-15,
max_p=p
+ 10, # Increase p here to have a refinement safety margin
on_src_direction=direction,
d_refine=d_refine_decrease,
return_report=True,
)
refine_testv = refine_test.dot(density)
refine_err = np.max(np.abs(baseline_v - refine_testv))
if refine_err < tol:
drefine_optimal.append(d_refine_decrease)
n_qbx_panels.append(refine_report["n_qbx_panels"])
p_for_full_accuracy.append(p)
break
if len(n_qbx_panels) <= i_d:
print(f"Failed to find parameters for {d_cutoff}")
drefine_optimal.append(1000)
n_qbx_panels.append(1e6)
p_for_full_accuracy.append(1e3)
break
if plot:
print(d_cutoff, errs)
plt.plot(ps[: i_p + 1], np.log10(errs), label=str(d_cutoff))
params.append((direction, n_qbx_panels, drefine_optimal, p_for_full_accuracy))
if plot:
plt.legend()
plt.title("interior" if direction > 0 else "exterior")
plt.xlabel(r"$p_{\textrm{max}}$")
if di == 0:
plt.ylabel(r"$\log_{10}(\textrm{error})$")
plt.yticks(-np.arange(0, 16, 3))
plt.xticks(np.arange(0, 61, 10))
plt.ylim([-15, 0])
plt.subplot(3, 2, 3 + di)
plt.plot(d_cutoffs, np.array(n_qbx_panels) / src.n_pts, "k-*")
plt.xlabel(r"$d_{\textrm{cutoff}}$")
plt.ylim([0, 8])
if di == 0:
plt.ylabel("QBX panels per point")
plt.subplot(3, 2, 5 + di)
plt.plot(d_cutoffs, np.array(drefine_optimal), "k-*")
plt.xlabel(r"$d_{\textrm{cutoff}}$")
plt.ylim([0, 6])
if di == 0:
plt.ylabel(r"$d_{\textrm{refine}}$")
if plot:
plt.tight_layout()
plt.show()
total_cost = 0
for i in [0, 1]:
direction, n_qbx_panels, drefine_optimal, p_for_full_accuracy = params[i]
appx_cost = (
np.array(p_for_full_accuracy)
* np.array(n_qbx_panels)
* np.array(drefine_optimal)
)
if plot:
print(direction, appx_cost)
total_cost += appx_cost
if plot:
plt.plot(d_cutoffs, total_cost, "k-o")
plt.show()
best_idx = np.argmin(total_cost)
d_cutoff = d_cutoffs[best_idx]
d_refine = drefine_optimal[best_idx]
return d_cutoff, d_refine
# prep step 2: find the minimum distance at which integrals are computed
# to the required tolerance
def _find_d_up_helper(kernel, nq, max_curvature, start_d, tol, kappa):
t = sp.var("t")
n_panels = 2
while True:
panel_edges = np.linspace(-1, 1, n_panels + 1)
panel_bounds = np.stack((panel_edges[:-1], panel_edges[1:]), axis=1)
circle = panelize_symbolic_surface(
t, sp.cos(sp.pi * t), sp.sin(sp.pi * t), panel_bounds, *gauss_rule(nq)
)
n_panels_new = np.max(circle.panel_length / max_curvature * circle.panel_radius)
if n_panels_new <= n_panels:
break
n_panels = np.ceil(n_panels_new).astype(int)
# print(f"\nusing {n_panels} panels with max_curvature={max_curvature}")
circle_kappa, _ = upsample(circle, kappa)
circle_upsample, interp_mat_upsample = upsample(circle_kappa, 2)
# TODO: Write more about the underlying regularity assumptions!!
# Why is it acceptable to use this test_density here? Empirically, any
# well-resolved density has approximately the same error as integrating sin(x).
# For example, integrating: 1, cos(x)^2.
# If we integrate a poorly resolved density, we do see higher errors.
#
# How poorly resolved does the density need to be in order to see higher error?
# It seems like an interpolation Linfinity error of around 1e-5 causes the d_up value to start to drift upwards.
#
# As a simple heuristic that seems to perform very well, we compute the
# error when integrating a constant and then double the required distance
# in order to account for integrands that are not quite so perfectly
# resolved.
# if assume_regularity:
# omega = 1.0
# else:
# omega = 999.0# / max_curvature
# f = lambda x: np.sin(omega * x)
# test_density = interp_mat_upsample.dot(f(circle.pts[:,0]))
# test_density_upsampled = f(circle_upsample.pts[:,0])
# print('l2 err', np.linalg.norm(test_density - test_density_upsampled) / np.linalg.norm(test_density_upsampled))
# print('linf err', np.max(np.abs(test_density - test_density_upsampled)))
# test_density = f(circle.pts[:,0])
# test_density = np.sin(999 * circle.pts[:,0])
test_density = np.ones(circle_kappa.n_pts)
d_up = 0
for direction in [-1.0, 1.0]:
d = start_d
for i in range(50):
# In actuality, we only need to test interior points because the curvature
# of the surface ensures that more source panels are near the observation
# points and, as a result, the error will be higher for any given value of d.
L = np.repeat(circle_kappa.panel_length, circle_kappa.panel_order)
dist = L * d
test_pts = (
circle_kappa.pts + direction * circle_kappa.normals * dist[:, None]
)
# Check to make sure that the closest distance to a source point is
# truly `dist`. This check might fail if the interior test_pts are
# crossing over into the other half of the circle.
min_src_dist = np.min(
np.linalg.norm((test_pts[:, None] - circle_kappa.pts[None, :]), axis=2),
axis=1,
)
if not np.allclose(min_src_dist, dist):
return False, d
upsample_mat = np.transpose(
apply_interp_mat(
kernel._direct(test_pts, circle_upsample), interp_mat_upsample
),
(0, 2, 1),
)
est_mat = np.transpose(kernel._direct(test_pts, circle_kappa), (0, 2, 1))
# err = np.max(np.abs(upsample_mat - est_mat).sum(axis=2))
err = np.max(
np.abs(upsample_mat.dot(test_density) - est_mat.dot(test_density))
)
# print(d, err)
if err < tol:
d_up = max(d, d_up)
break
d *= 1.2
return True, d_up
def find_d_up(kernel, nq, max_curvature, start_d, tol, kappa):
d = start_d
for i in range(10):
d_up = _find_d_up_helper(kernel, nq, max_curvature * (0.8) ** i, d, tol, kappa)
if d_up[0]:
return d_up[1]
d = d_up[1]
def final_check(kernel, src):
density = np.ones_like(src.pts[:, 0]) # np.cos(source.pts[:,0] * src.pts[:,1])
baseline = global_qbx_self(kernel, src, p=50, kappa=10, direction=1.0)
baseline_v = baseline.dot(density)
tols = 10.0 ** np.arange(0, -15, -1)
errs = []
runtimes = []
for tol in tols:
runs = []
for i in range(10):
start = time.time()
local_baseline, report = kernel.integrate(
src.pts,
src,
tol=tol,
on_src_direction=1.0,
return_report=True,
)
runs.append(time.time() - start)
runtimes.append(np.min(runs))
local_baseline_v = local_baseline.dot(density)
errs.append(np.max(np.abs(baseline_v - local_baseline_v)))
# print(tol, errs[-1], runtime)
# assert(np.max(np.abs(baseline_v-local_baseline_v)) < 5e-14)
plt.figure(figsize=(9, 5))
plt.subplot(1, 2, 1)
plt.plot(-np.log10(tols), np.log10(errs))
plt.subplot(1, 2, 2)
plt.plot(-np.log10(tols), runtimes)
plt.tight_layout()
plt.show()
| 1.8125 | 2 |
celery-getting-started/celeryconfig.py | hustbeta/python-examples | 0 | 6623 | <gh_stars>0
# -*- coding: utf-8 -*-
BROKER_URL = 'amqp://guest@localhost//'
CELERY_ACCEPT_CONTENT = ['json'],
CELERY_RESULT_BACKEND = 'amqp://guest@localhost//'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Shanghai'
CELERY_ENABLE_UTC = False
| 1.515625 | 2 |
smartnlp/utils/basic_log.py | msgi/nlp-tour | 1,559 | 6624 | import logging as log
class Log:
def __init__(self, level):
self.level = level
log.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=level)
self.log = log
def info(self, msg):
self.log.info(msg)
def debug(self, msg):
self.log.debug(msg)
def warn(self, msg):
self.log.warn(msg)
def error(self, msg):
self.log.error(msg)
| 3.15625 | 3 |
people/losses-bkp.py | dluvizon/3d-pose-consensus | 5 | 6625 | <gh_stars>1-10
def structural_loss_dst68j3d(p_pred, v_pred):
v_pred = K.stop_gradient(v_pred)
def getlength(v):
return K.sqrt(K.sum(K.square(v), axis=-1))
"""Arms segments"""
joints_arms = p_pred[:, :, 16:37+1, :]
conf_arms = v_pred[:, :, 16:37+1]
diff_arms_r = joints_arms[:, :, 2:-1:2, :] - joints_arms[:, :, 0:-3:2, :]
diff_arms_l = joints_arms[:, :, 3::2, :] - joints_arms[:, :, 1:-2:2, :]
c2_arms_r = conf_arms[:, :, 2:-1:2] * conf_arms[:, :, 0:-3:2]
c2_arms_l = conf_arms[:, :, 3::2] * conf_arms[:, :, 1:-2:2]
"""Legs segments"""
joints_legs = p_pred[:, :, 48:67+1, :]
conf_legs = v_pred[:, :, 48:67+1]
diff_legs_r = joints_legs[:, :, 2:-1:2, :] - joints_legs[:, :, 0:-3:2, :]
diff_legs_l = joints_legs[:, :, 3::2, :] - joints_legs[:, :, 1:-2:2, :]
c2_legs_r = conf_legs[:, :, 2:-1:2] * conf_legs[:, :, 0:-3:2]
c2_legs_l = conf_legs[:, :, 3::2] * conf_legs[:, :, 1:-2:2]
"""Limbs segments"""
segs_limbs_r = getlength(K.concatenate([diff_arms_r, diff_legs_r], axis=-2))
segs_limbs_l = getlength(K.concatenate([diff_arms_l, diff_legs_l], axis=-2))
c2_limbs_r = K.concatenate([c2_arms_r, c2_legs_r], axis=-1)
c2_limbs_l = K.concatenate([c2_arms_l, c2_legs_l], axis=-1)
len_upperarm_r = K.sum(segs_limbs_r[:, :, 2:5], axis=-1, keepdims=True)
len_upperarm_l = K.sum(segs_limbs_l[:, :, 2:5], axis=-1, keepdims=True)
len_forearm_r = K.sum(segs_limbs_r[:, :, 5:8], axis=-1, keepdims=True)
len_forearm_l = K.sum(segs_limbs_l[:, :, 5:8], axis=-1, keepdims=True)
len_hand_r = K.sum(segs_limbs_r[:, :, 8:10], axis=-1, keepdims=True)
len_hand_l = K.sum(segs_limbs_r[:, :, 8:10], axis=-1, keepdims=True)
c2_upperarm_r = K.sum(c2_limbs_r[:, :, 2:5], axis=-1, keepdims=True)
c2_upperarm_l = K.sum(c2_limbs_l[:, :, 2:5], axis=-1, keepdims=True)
c2_forearm_r = K.sum(c2_limbs_r[:, :, 5:8], axis=-1, keepdims=True)
c2_forearm_l = K.sum(c2_limbs_l[:, :, 5:8], axis=-1, keepdims=True)
c2_hand_r = K.sum(c2_limbs_r[:, :, 8:10], axis=-1, keepdims=True)
c2_hand_l = K.sum(c2_limbs_r[:, :, 8:10], axis=-1, keepdims=True)
len_femur_r = K.sum(K.concatenate([
segs_limbs_r[:, :, 10:11],
segs_limbs_r[:, :, 12:14],
], axis=-1), axis=-1, keepdims=True)
len_femur_l = K.sum(K.concatenate([
segs_limbs_l[:, :, 10:11],
segs_limbs_l[:, :, 12:14],
], axis=-1), axis=-1, keepdims=True)
c2_femur_r = K.sum(K.concatenate([
c2_limbs_r[:, :, 10:11],
c2_limbs_r[:, :, 12:14],
], axis=-1), axis=-1, keepdims=True)
c2_femur_l = K.sum(K.concatenate([
c2_limbs_l[:, :, 10:11],
c2_limbs_l[:, :, 12:14],
], axis=-1), axis=-1, keepdims=True)
len_shin_r = K.sum(segs_limbs_r[:, :, 14:17], axis=-1, keepdims=True)
len_shin_l = K.sum(segs_limbs_l[:, :, 14:17], axis=-1, keepdims=True)
len_feet_r = K.sum(segs_limbs_r[:, :, 17:19], axis=-1, keepdims=True)
len_feet_l = K.sum(segs_limbs_l[:, :, 17:19], axis=-1, keepdims=True)
c2_shin_r = K.sum(c2_limbs_r[:, :, 14:17], axis=-1, keepdims=True)
c2_shin_l = K.sum(c2_limbs_l[:, :, 14:17], axis=-1, keepdims=True)
c2_feet_r = K.sum(c2_limbs_r[:, :, 17:19], axis=-1, keepdims=True)
c2_feet_l = K.sum(c2_limbs_l[:, :, 17:19], axis=-1, keepdims=True)
joints_head = K.concatenate([
p_pred[:, :, 11:11+1, :], p_pred[:, :, 11:11+1, :],
p_pred[:, :, 12:15+1, :],
p_pred[:, :, 8:8+1, :], p_pred[:, :, 8:8+1, :],
p_pred[:, :, 14:15+1, :],
], axis=-2)
conf_head = K.concatenate([
v_pred[:, :, 11:11+1], v_pred[:, :, 11:11+1],
v_pred[:, :, 12:15+1],
v_pred[:, :, 8:8+1], v_pred[:, :, 8:8+1],
v_pred[:, :, 14:15+1],
], axis=-1)
diff_head_r = joints_head[:, :, 2:-1:2, :] - joints_head[:, :, 0:-3:2, :]
diff_head_l = joints_head[:, :, 3::2, :] - joints_head[:, :, 1:-2:2, :]
c2_head_r = conf_head[:, :, 2:-1:2] * conf_head[:, :, 0:-3:2]
c2_head_l = conf_head[:, :, 3::2] * conf_head[:, :, 1:-2:2]
diff_cross_r = K.concatenate([
p_pred[:, :, 3:3+1, :] - p_pred[:, :, 20:20+1, :],
p_pred[:, :, 49:49+1, :] - p_pred[:, :, 3:3+1, :],
], axis=-2)
diff_cross_l = K.concatenate([
p_pred[:, :, 3:3+1, :] - p_pred[:, :, 21:21+1, :],
p_pred[:, :, 48:48+1, :] - p_pred[:, :, 3:3+1, :],
], axis=-2)
diff_spine = K.concatenate([
p_pred[:, :, 0:0+1, :] - p_pred[:, :, 7:7+1, :], # euclidean
p_pred[:, :, 1:7+1, :] - p_pred[:, :, 0:6+1, :], # geodesic
], axis=-2)
segs_spine = getlength(diff_spine)
spine_euclidian = K.stop_gradient(segs_spine[:, :, :1])
len_spine = K.sum(segs_spine[:, :, 1:], axis=-1, keepdims=True)
segs_midhead = getlength(p_pred[:, :, 9:11+1, :] - p_pred[:, :, 8:10+1, :])
len_midhead = K.sum(segs_midhead, axis=-1, keepdims=True)
segs_ears = getlength(K.concatenate([
p_pred[:, :, 12:12+1, :] - p_pred[:, :, 14:14+1, :],
p_pred[:, :, 9:9+1, :] - p_pred[:, :, 12:12+1, :],
p_pred[:, :, 13:13+1, :] - p_pred[:, :, 9:9+1, :],
p_pred[:, :, 15:15+1, :] - p_pred[:, :, 13:13+1, :]
], axis=-2))
len_ears = K.sum(segs_ears, axis=-1, keepdims=True)
len_cross_r = K.sum(getlength(diff_cross_r), axis=-1, keepdims=True)
len_cross_l = K.sum(getlength(diff_cross_l), axis=-1, keepdims=True)
ref_length = K.stop_gradient(
K.clip((len_cross_r + len_cross_l) / 2., 0.1, 1.))
"""Reference lengths based on ground truth poses from Human3.6M:
Spine wrt. ref: 0.715 (0.032 std.)
Spine wrt. euclidean: 1.430 (maximum) (0.046 std.)
MidHead wrt. ref: 0.266 (0.019 std.)
Shoulder wrt. ref: 0.150 (?? std.)
Upper arms wrt. ref: 0.364 (0.019 std.)
Fore arms wrt. ref: 0.326 (0.025 std.)
Hands wrt. ref: 0.155 (0.014 std.)
Femur wrt. ref: 0.721 (0.040 std.)
Shin wrt. ref: 0.549 (0.063 std.)
Feet wrt. ref: 0.294 (0.060 std.)
"""
rules_loss = K.concatenate([
c2_limbs_r * c2_limbs_l * (segs_limbs_r - segs_limbs_l),
len_spine - 0.715 * ref_length,
len_midhead - 0.266 * ref_length,
c2_upperarm_r * (len_upperarm_r - 0.364 * ref_length),
c2_upperarm_l * (len_upperarm_l - 0.364 * ref_length),
c2_forearm_r * (len_forearm_r - 0.326 * ref_length),
c2_forearm_l * (len_forearm_l - 0.326 * ref_length),
c2_hand_r * (len_hand_r - 0.155 * ref_length),
c2_hand_l * (len_hand_l - 0.155 * ref_length),
c2_femur_r * (len_femur_r - 0.721 * ref_length),
c2_femur_l * (len_femur_l - 0.721 * ref_length),
c2_shin_r * (len_shin_r - 0.549 * ref_length),
c2_shin_l * (len_shin_l - 0.549 * ref_length),
c2_feet_r * (len_feet_r - 0.294 * ref_length),
c2_feet_l * (len_feet_l - 0.294 * ref_length),
len_ears - 0.213 * ref_length,
], axis=-1)
rules = K.sum(K.square(rules_loss), axis=-1)
spine_bent = K.squeeze(K.maximum(0., len_spine - 1.430 * spine_euclidian),
axis=-1)
return K.mean(spine_bent + rules, axis=-1)
| 2.015625 | 2 |
Examples/IMAP/FilteringMessagesFromIMAPMailbox.py | Muzammil-khan/Aspose.Email-Python-Dotnet | 5 | 6626 | import aspose.email
from aspose.email.clients.imap import ImapClient
from aspose.email.clients import SecurityOptions
from aspose.email.clients.imap import ImapQueryBuilder
import datetime as dt
def run():
dataDir = ""
#ExStart: FetchEmailMessageFromServer
client = ImapClient("imap.gmail.com", 993, "username", "password")
client.select_folder("Inbox")
builder = ImapQueryBuilder()
builder.subject.contains("Newsletter")
builder.internal_date.on(dt.datetime.now())
query = builder.get_query()
msgsColl = client.list_messages(query)
print("Total Messages fulfilling search criterion: " + str(len(msgsColl)))
#ExEnd: FetchEmailMessageFromServer
if __name__ == '__main__':
run()
| 2.640625 | 3 |
Python.FancyBear/settings.py | 010001111/Vx-Suites | 2 | 6627 | # Server UID
SERVER_UID = 45158729
# Setup Logging system #########################################
#
import os
from FileConsoleLogger import FileConsoleLogger
ServerLogger = FileConsoleLogger( os.path.join(os.path.dirname(os.path.abspath(__file__)), "_w3server.log") )
W3Logger = FileConsoleLogger( os.path.join(os.path.dirname(os.path.abspath(__file__)), "_w3.log") )
#
# Setup Level 2 Protocol - P2Scheme #########################################
#
from P2Scheme import P2Scheme
P2_URL_TOKEN = '760e25f9eb3124'.decode('hex')
P2_SUBJECT_TOKEN = <KEY>'
P2_DATA_TOKEN = <KEY>'
# P2_DATA_TOKEN = 'd8<PASSWORD>'.decode('hex')
MARK = 'itwm='
B64_JUNK_LEN = 9
BIN_JUNK_LEN = 4
P2_Scheme = P2Scheme(_url_token=P2_URL_TOKEN, _data_token=P2_DATA_TOKEN, _mark=MARK, _subj_token=P2_SUBJECT_TOKEN,\
_b64junk_len=B64_JUNK_LEN, _binary_junk_len=BIN_JUNK_LEN)
#
# Setup Level 3 Protocol - P3Scheme #########################################
#
from P3Scheme import P3Scheme
#
P3_PRIVATE_TOKEN = '<KEY>'.decode('hex')
P3_SERVICE_TOKEN = '<KEY>'.decode('hex')
#
P3_Scheme = P3Scheme(private_token=P3_PRIVATE_TOKEN, service_token=P3_SERVICE_TOKEN)
#
# Setup HTTP checker
#
#from HTTPHeadersChecker import HTTPHeadersChecker
#
#HTTPChecker = HTTPHeadersChecker()
# Setup LocalStorage
#
from FSLocalStorage import FSLocalStorage
LocalStorage = FSLocalStorage()
############################################################
# Initialize Server instance #
#
#from W3Server import W3Server
#MAIN_HANDLER = W3Server(p2_scheme=P2_Scheme, p3_scheme=P3_Scheme, http_checker=HTTPChecker, local_storage=LocalStorage, logger=ServerLogger)
############################################################
# Mail Parameters
POP3_MAIL_IP = 'pop.gmail.com'
POP3_PORT = 995
POP3_ADDR = '<EMAIL>'
POP3_PASS = '<PASSWORD>'
SMTP_MAIL_IP = 'smtp.gmail.com'
SMTP_PORT = 587
SMTP_TO_ADDR = '<EMAIL>'
SMTP_FROM_ADDR = '<EMAIL>'
SMTP_PASS = '<PASSWORD>'
# C&C Parametrs
#
XAS_IP = '172.16.31.10'
XAS_GATE = '/updates/'
############################################################
# Setup P3 communication
# wsgi2
#
LS_TIMEOUT = 1 # big loop timeout
FILES_PER_ITER = 5 # count of requests per iter
############################################################
| 1.8125 | 2 |
tools/fileinfo/features/certificates-info/test.py | HoundThe/retdec-regression-tests | 0 | 6628 | <gh_stars>0
from regression_tests import *
class Test1(Test):
settings = TestSettings(
tool='fileinfo',
input='8b280f2b7788520de214fa8d6ea32a30ebb2a51038381448939530fd0f7dfc16',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 2
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert len(first_sig['allCertificates']) == 5
assert first_sig['signatureVerified'] == True
assert len(first_sig['warnings']) == 0
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == 'F6B86E97AEB3E567F58901F799E18FC6F89CC92E'
assert first_sig['signedDigest'] == 'F6B86E97AEB3E567F58901F799E18FC6F89CC92E'
assert first_sig['programName'] == "Broadband Download, Thunder in a Flash!"
assert first_sig['allCertificates'][0]['subject'] == "CN=Symantec Time Stamping Services CA - G2,O=Symantec Corporation,C=US"
assert first_sig['allCertificates'][0]['issuer'] == "CN=Thawte Timestamping CA,OU=Thawte Certification,O=Thawte,L=Durbanville,ST=Western Cape,C=ZA"
assert first_sig['allCertificates'][0]['subjectOneline'] == "/C=US/O=Symantec Corporation/CN=Symantec Time Stamping Services CA - G2"
assert first_sig['allCertificates'][0]['issuerOneline'] == "/C=ZA/ST=Western Cape/L=Durbanville/O=Thawte/OU=Thawte Certification/CN=Thawte Timestamping CA"
assert first_sig['allCertificates'][0]['serialNumber'] == "7e:93:eb:fb:7c:c6:4e:59:ea:4b:9a:77:d4:06:fc:3b"
assert first_sig['allCertificates'][0]['publicKeyAlgorithm'] == "rsaEncryption"
assert first_sig['allCertificates'][0]['signatureAlgorithm'] == "sha1WithRSAEncryption"
assert first_sig['allCertificates'][0]['validSince'] == "Dec 21 00:00:00 2012 GMT"
assert first_sig['allCertificates'][0]['validUntil'] == "Dec 30 23:59:59 2020 GMT"
assert first_sig['allCertificates'][0]['sha1'] == "6C07453FFDDA08B83707C09B82FB3D15F35336B1"
assert first_sig['allCertificates'][0]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
assert first_sig['allCertificates'][0]['publicKey'] == (
'<KEY>'
'xwSCtgleZEiVypv3LgmxENza8K/LlBa+xTCdo5DASVDtKHiRfTot3vDdMwi17SUAAL3Te2/tLdEJGvNX0U70UTOQxJzF4KLabQry5kerHIbJk'
'1xH7Ex3ftRYQJTpqr1SSwFeEWlL4nO55nn/oziVz89xpLcSvh7M+R5CvvwdYhBnP/FA1GZqtdsn5Nph2Upg4XCYBTEyMk7FNrAgfAfDXTekiK'
'ryvf7dHwn5vdKG3+nw54trorqpuaqJxZ9YfeYcRG84lChS+Vd+uUOpyyfqmUg09iW6Mh8pU5IRP8Z4kQHkgvXaISAXWp4ZEXNYEZ+VMETfMV58cnBcQIDAQAB')
attributes = first_sig['allCertificates'][0]['attributes']
assert attributes['subject']['country'] == "US"
assert attributes['subject']['organization'] == "Symantec Corporation"
assert attributes['subject']['commonName'] == "Symantec Time Stamping Services CA - G2"
assert attributes['issuer']['country'] == "ZA"
assert attributes['issuer']['organization'] == "Thawte"
assert attributes['issuer']['organizationalUnit'] == "Thawte Certification"
assert attributes['issuer']['state'] == "Western Cape"
assert attributes['issuer']['commonName'] == "Thawte Timestamping CA"
assert attributes['issuer']['locality'] == "Durbanville"
assert first_sig['allCertificates'][1]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert first_sig['allCertificates'][2]['sha256'] == "8420DFBE376F414BF4C0A81E6936D24CCC03F304835B86C7A39142FCA723A689"
assert first_sig['allCertificates'][3]['sha256'] == "8FB47562286677514075BC38D1CFD2B73481D93CB3F9C23F9AC3E6414EF34A6F"
assert first_sig['allCertificates'][4]['sha256'] == "582DC1D97A790EF04FE2567B1EC88C26B03BF6E99937CAE6A0B50397AD20BBF8"
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "96D052BD1B13E983FC6FE41911F6B49CEB5961B9"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 3
assert first_sig_signer['chain'][0]['sha256'] == "8FB47562286677514075BC38D1CFD2B73481D93CB3F9C23F9AC3E6414EF34A6F"
assert first_sig_signer['chain'][1]['sha256'] == "582DC1D97A790EF04FE2567B1EC88C26B03BF6E99937CAE6A0B50397AD20BBF8"
assert first_sig_signer['chain'][2]['sha256'] == "8420DFBE376F414BF4C0A81E6936D24CCC03F304835B86C7A39142FCA723A689"
first_sig_countersig = first_sig_signer['counterSigners'][0]
assert len(first_sig_countersig['warnings']) == 0
assert first_sig_countersig['signTime'] == "Jun 25 14:19:05 2016 GMT"
assert first_sig_countersig['digest'] == '8F22E222461E03492E8D67948463100465B1B9D0'
assert first_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(first_sig_countersig['chain']) == 2
assert first_sig_countersig['chain'][0]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert first_sig_countersig['chain'][1]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
second_sig = self.fileinfo.output['digitalSignatures']['signatures'][1]
assert second_sig['signatureVerified'] == True
assert len(second_sig['warnings']) == 0
assert second_sig['digestAlgorithm'] == 'sha256'
assert second_sig['fileDigest'] == '9FC3902927BFEDA2A3F61D650B0D2CBEC6D84597989EA6244D4EF954C67CA0B3'
assert second_sig['signedDigest'] == '9FC3902927BFEDA2A3F61D650B0D2CBEC6D84597989EA6244D4EF954C67CA0B3'
assert second_sig['programName'] == "Broadband Download, Thunder in a Flash!"
assert len(second_sig['allCertificates']) == 6
assert second_sig['allCertificates'][0]['sha256'] == "8420DFBE376F414BF4C0A81E6936D24CCC03F304835B86C7A39142FCA723A689"
assert second_sig['allCertificates'][1]['sha256'] == "8FB47562286677514075BC38D1CFD2B73481D93CB3F9C23F9AC3E6414EF34A6F"
assert second_sig['allCertificates'][2]['sha256'] == "582DC1D97A790EF04FE2567B1EC88C26B03BF6E99937CAE6A0B50397AD20BBF8"
assert second_sig['allCertificates'][3]['sha256'] == "43CE166BC567F9887D650A2E624473BE7A43A6F378ABE03CB32FA63F7ABB1E45"
assert second_sig['allCertificates'][4]['sha256'] == "6B6C1E01F590F5AFC5FCF85CD0B9396884048659FC2C6D1170D68B045216C3FD"
assert second_sig['allCertificates'][5]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
second_sig_signer = second_sig['signer']
assert second_sig_signer['digest'] == "E421C1A7625B9CD410B64A0EBEA7D991EA1DBAC65A3404A227235E1C0AB781F1"
assert second_sig_signer['digestAlgorithm'] == 'sha256'
assert len(second_sig_signer['chain']) == 3
assert second_sig_signer['chain'][0]['sha256'] == "8FB47562286677514075BC38D1CFD2B73481D93CB3F9C23F9AC3E6414EF34A6F"
assert second_sig_signer['chain'][1]['sha256'] == "582DC1D97A790EF04FE2567B1EC88C26B03BF6E99937CAE6A0B50397AD20BBF8"
assert second_sig_signer['chain'][2]['sha256'] == "8420DFBE376F414BF4C0A81E6936D24CCC03F304835B86C7A39142FCA723A689"
second_sig_countersig = second_sig_signer['counterSigners'][0]
assert len(second_sig_countersig['warnings']) == 0
assert second_sig_countersig['signTime'] == "Jun 25 14:19:29 2016 GMT"
assert second_sig_countersig['digest'] == 'B36785DD22C1E070DB8A198A16C81BD93FB87F4D5B6301ACB2656C23E4EF80F5'
assert second_sig_countersig['digestAlgorithm'] == 'sha256'
assert len(second_sig_countersig['chain']) == 3
assert second_sig_countersig['chain'][0]['sha256'] == "43CE166BC567F9887D650A2E624473BE7A43A6F378ABE03CB32FA63F7ABB1E45"
assert second_sig_countersig['chain'][1]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
assert second_sig_countersig['chain'][2]['sha256'] == "6B6C1E01F590F5AFC5FCF85CD0B9396884048659FC2C6D1170D68B045216C3FD"
class Test2(Test):
settings = TestSettings(
tool='fileinfo',
input='avgcfgex.ex',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 2
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert len(first_sig['allCertificates']) == 5
assert first_sig['signatureVerified'] == True
assert len(first_sig['warnings']) == 0
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == '3E7B33AB316770BD369BFADF5FB5354730C89991'
assert first_sig['signedDigest'] == '3E7B33AB316770BD369BFADF5FB5354730C89991'
assert first_sig['allCertificates'][0]['subject'] == "CN=Symantec Time Stamping Services CA - G2,O=Symantec Corporation,C=US"
assert first_sig['allCertificates'][0]['issuer'] == "CN=Thawte Timestamping CA,OU=Thawte Certification,O=Thawte,L=Durbanville,ST=Western Cape,C=ZA"
assert first_sig['allCertificates'][0]['serialNumber'] == "7e:93:eb:fb:7c:c6:4e:59:ea:4b:9a:77:d4:06:fc:3b"
assert first_sig['allCertificates'][0]['publicKeyAlgorithm'] == "rsaEncryption"
assert first_sig['allCertificates'][0]['signatureAlgorithm'] == "sha1WithRSAEncryption"
assert first_sig['allCertificates'][0]['validSince'] == "Dec 21 00:00:00 2012 GMT"
assert first_sig['allCertificates'][0]['validUntil'] == "Dec 30 23:59:59 2020 GMT"
assert first_sig['allCertificates'][0]['sha1'] == "6C07453FFDDA08B83707C09B82FB3D15F35336B1"
assert first_sig['allCertificates'][0]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
assert first_sig['allCertificates'][0]['publicKey'] == (
'<KEY>'
'xwSCtgleZEiVypv3LgmxENza8K/LlBa+xTCdo5DASVDtKHiRfTot3vDdMwi17SUAAL3Te2/tLdEJGvNX0U70UTOQxJzF4KLabQry5kerHIbJk'
'1xH7Ex3ftRYQJTpqr1SSwFeEWlL4nO55nn/oziVz89xpLcSvh7M+R5CvvwdYhBnP/FA1GZqtdsn5Nph2Upg4XCYBTEyMk7FNrAgfAfDXTekiK'
'ryvf7dHwn5vdKG3+nw54trorqpuaqJxZ9YfeYcRG84lChS+Vd+uUOpyyfqmUg09iW6Mh8pU5IRP8Z4kQHkgvXaISAXWp4ZEXNYEZ+VMETfMV58cnBcQIDAQAB')
attributes = first_sig['allCertificates'][0]['attributes']
assert attributes['subject']['country'] == "US"
assert attributes['subject']['organization'] == "Symantec Corporation"
assert attributes['subject']['commonName'] == "Symantec Time Stamping Services CA - G2"
assert attributes['issuer']['country'] == "ZA"
assert attributes['issuer']['organization'] == "Thawte"
assert attributes['issuer']['organizationalUnit'] == "Thawte Certification"
assert attributes['issuer']['state'] == "Western Cape"
assert attributes['issuer']['commonName'] == "Thawte Timestamping CA"
assert attributes['issuer']['locality'] == "Durbanville"
assert first_sig['allCertificates'][1]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert first_sig['allCertificates'][2]['sha256'] == "8420DFBE376F414BF4C0A81E6936D24CCC03F304835B86C7A39142FCA723A689"
assert first_sig['allCertificates'][3]['sha256'] == "3B0ABE047D7E84F3BBD12B5E399BED55E4D7E9FCC3F629B8953A8C060EF6D746"
assert first_sig['allCertificates'][4]['sha256'] == "0CFC19DB681B014BFE3F23CB3A78B67208B4E3D8D7B6A7B1807F7CD6ECB2A54E"
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "229A2D7B4C8F2E8EC5B6943D0F0E53B9F59E33B5"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 3
assert first_sig_signer['chain'][0]['sha256'] == "3B0ABE047D7E84F3BBD12B5E399BED55E4D7E9FCC3F629B8953A8C060EF6D746"
assert first_sig_signer['chain'][1]['sha256'] == "0CFC19DB681B014BFE3F23CB3A78B67208B4E3D8D7B6A7B1807F7CD6ECB2A54E"
assert first_sig_signer['chain'][2]['sha256'] == "8420DFBE376F414BF4C0A81E6936D24CCC03F304835B86C7A39142FCA723A689"
first_sig_countersig = first_sig_signer['counterSigners'][0]
assert len(first_sig_countersig['warnings']) == 0
assert first_sig_countersig['signTime'] == "Feb 1 14:02:52 2016 GMT"
assert first_sig_countersig['digest'] == '0DAAC35A77C75EAEA723AE13E61C927F676080A2'
assert first_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(first_sig_countersig['chain']) == 2
assert first_sig_countersig['chain'][0]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert first_sig_countersig['chain'][1]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
second_sig = self.fileinfo.output['digitalSignatures']['signatures'][1]
assert second_sig['signatureVerified'] == True
assert len(second_sig['warnings']) == 0
assert second_sig['digestAlgorithm'] == 'sha256'
assert second_sig['fileDigest'] == '6BE0FA5AB9336DDCC6ACE35ED2BC9744860E80088F35E5D77AF254F246228CDE'
assert second_sig['signedDigest'] == '6BE0FA5AB9336DDCC6ACE35ED2BC9744860E80088F35E5D77AF254F246228CDE'
assert len(second_sig['allCertificates']) == 6
assert second_sig['allCertificates'][0]['sha256'] == "8420DFBE376F414BF4C0A81E6936D24CCC03F304835B86C7A39142FCA723A689"
assert second_sig['allCertificates'][1]['sha256'] == "3A0682AB7FB478BA82FD11CE4DB9B0ADEA55DA05558A0CF737453D51572163D0"
assert second_sig['allCertificates'][2]['sha256'] == "0CFC19DB681B014BFE3F23CB3A78B67208B4E3D8D7B6A7B1807F7CD6ECB2A54E"
assert second_sig['allCertificates'][3]['sha256'] == "43CE166BC567F9887D650A2E624473BE7A43A6F378ABE03CB32FA63F7ABB1E45"
assert second_sig['allCertificates'][4]['sha256'] == "6B6C1E01F590F5AFC5FCF85CD0B9396884048659FC2C6D1170D68B045216C3FD"
assert second_sig['allCertificates'][5]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
second_sig_signer = second_sig['signer']
assert second_sig_signer['digest'] == "0183B70327A59E8006B666E908D798CCD309BC4C2FFFD10E551E040B9B1DC449"
assert second_sig_signer['digestAlgorithm'] == 'sha256'
assert len(second_sig_signer['chain']) == 3
assert second_sig_signer['chain'][0]['sha256'] == "3A0682AB7FB478BA82FD11CE4DB9B0ADEA55DA05558A0CF737453D51572163D0"
assert second_sig_signer['chain'][1]['sha256'] == "0CFC19DB681B014BFE3F23CB3A78B67208B4E3D8D7B6A7B1807F7CD6ECB2A54E"
assert second_sig_signer['chain'][2]['sha256'] == "8420DFBE376F414BF4C0A81E6936D24CCC03F304835B86C7A39142FCA723A689"
second_sig_countersig = second_sig_signer['counterSigners'][0]
assert len(second_sig_countersig['warnings']) == 0
assert second_sig_countersig['signTime'] == "Feb 1 14:02:54 2016 GMT"
assert second_sig_countersig['digest'] == '1C5206936E053F3D79A046D0E359FB32926AA9D8C269812A80A188AE04DC3E34'
assert second_sig_countersig['digestAlgorithm'] == 'sha256'
assert len(second_sig_countersig['chain']) == 3
assert second_sig_countersig['chain'][0]['sha256'] == "43CE166BC567F9887D650A2E624473BE7A43A6F378ABE03CB32FA63F7ABB1E45"
assert second_sig_countersig['chain'][1]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
assert second_sig_countersig['chain'][2]['sha256'] == "6B6C1E01F590F5AFC5FCF85CD0B9396884048659FC2C6D1170D68B045216C3FD"
class Test3(Test):
settings = TestSettings(
tool='fileinfo',
input='c339b87d932b3f86c298b1745db1a28b1214fb7635ba3805851ef8699290f9b8',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 2
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert first_sig['signatureVerified'] == True
assert len(first_sig['warnings']) == 0
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == '0C13D3C2B3C6F48FA3485B36E08AC822C579C1E0'
assert first_sig['signedDigest'] == '0C13D3C2B3C6F48FA3485B36E08AC822C579C1E0'
# 2 certificates are there indeed stored twice, confirmed with LIEF
assert len(first_sig['allCertificates']) == 7
assert first_sig['allCertificates'][0]['sha256'] == "FCB433D6D1AFBEC9E8F5447C2C0FA4AE7553986D5C2703BE82524BE608F35F61"
assert first_sig['allCertificates'][1]['sha256'] == "53793CFC1B2B5096CC4EDBEC527ABC5CBC20470C788162D9E54C370D51625F4A"
assert first_sig['allCertificates'][2]['sha256'] == "C766A9BEF2D4071C863A31AA4920E813B2D198608CB7B7CFE21143B836DF09EA"
assert first_sig['allCertificates'][3]['sha256'] == "53793CFC1B2B5096CC4EDBEC527ABC5CBC20470C788162D9E54C370D51625F4A"
assert first_sig['allCertificates'][4]['sha256'] == "C766A9BEF2D4071C863A31AA4920E813B2D198608CB7B7CFE21143B836DF09EA"
assert first_sig['allCertificates'][5]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert first_sig['allCertificates'][6]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "AC1E29C0611678FA7E5B98A11106A1F9D69B224F"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 3
assert first_sig_signer['chain'][0]['sha256'] == "FCB433D6D1AFBEC9E8F5447C2C0FA4AE7553986D5C2703BE82524BE608F35F61"
assert first_sig_signer['chain'][1]['sha256'] == "53793CFC1B2B5096CC4EDBEC527ABC5CBC20470C788162D9E54C370D51625F4A"
assert first_sig_signer['chain'][2]['sha256'] == "C766A9BEF2D4071C863A31AA4920E813B2D198608CB7B7CFE21143B836DF09EA"
first_sig_countersig = first_sig_signer['counterSigners'][0]
assert len(first_sig_countersig['warnings']) == 0
assert first_sig_countersig['signTime'] == "Feb 1 15:47:14 2016 GMT"
assert first_sig_countersig['digest'] == 'DE8E927CEC0175F4544CAFBBAC55D584DAE15C20'
assert first_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(first_sig_countersig['chain']) == 2
assert first_sig_countersig['chain'][0]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert first_sig_countersig['chain'][1]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
second_sig = self.fileinfo.output['digitalSignatures']['signatures'][1]
assert second_sig['signatureVerified'] == True
assert len(second_sig['warnings']) == 0
assert second_sig['digestAlgorithm'] == 'sha256'
assert second_sig['fileDigest'] == '838379A390118A6562F3E06BE818F5A6407FD7F4FEA9ADF4C36A8B6952B1336B'
assert second_sig['signedDigest'] == '838379A390118A6562F3E06BE818F5A6407FD7F4FEA9ADF4C36A8B6952B1336B'
assert len(second_sig['allCertificates']) == 8
assert second_sig['allCertificates'][0]['sha256'] == "D09EDDF7DA800BCC3AC114852614124706D94EA473A98DB19BC4F4CB6AEE16A4"
assert second_sig['allCertificates'][1]['sha256'] == "5E6D2F88F617DC8B809AEE712445A41B3CDE26AF874A221A9DC98EA1DC68E3D5"
assert second_sig['allCertificates'][2]['sha256'] == "4F32D5DC00F715250ABCC486511E37F501A899DEB3BF7EA8ADBBD3AEF1C412DA"
assert second_sig['allCertificates'][3]['sha256'] == "687FA451382278FFF0C8B11F8D43D576671C6EB2BCEAB413FB83D965D06D2FF2"
assert second_sig['allCertificates'][4]['sha256'] == "52F0E1C4E58EC629291B60317F074671B85D7EA80D5B07273463534B32B40234"
assert second_sig['allCertificates'][5]['sha256'] == "5E6D2F88F617DC8B809AEE712445A41B3CDE26AF874A221A9DC98EA1DC68E3D5"
assert second_sig['allCertificates'][6]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert second_sig['allCertificates'][7]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
second_sig_signer = second_sig['signer']
assert second_sig_signer['digest'] == "FB26A5FA064C2789EEE8560B4F8A82B7FE968B0D776CE02F52AA3BA11D8CB22C"
assert second_sig_signer['digestAlgorithm'] == 'sha256'
assert len(second_sig_signer['chain']) == 3
assert second_sig_signer['chain'][0]['sha256'] == "D09EDDF7DA800BCC3AC114852614124706D94EA473A98DB19BC4F4CB6AEE16A4"
assert second_sig_signer['chain'][1]['sha256'] == "5E6D2F88F617DC8B809AEE712445A41B3CDE26AF874A221A9DC98EA1DC68E3D5"
assert second_sig_signer['chain'][2]['sha256'] == "52F0E1C4E58EC629291B60317F074671B85D7EA80D5B07273463534B32B40234"
second_sig_countersig = second_sig_signer['counterSigners'][0]
assert len(second_sig_countersig['warnings']) == 0
assert second_sig_countersig['signTime'] == "Feb 1 15:47:15 2016 GMT"
assert second_sig_countersig['digest'] == 'C361F36F13601CEAF01F3480C58F98660205981A'
assert second_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(second_sig_countersig['chain']) == 2
assert second_sig_countersig['chain'][0]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert second_sig_countersig['chain'][1]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
class Test4(Test):
settings = TestSettings(
tool='fileinfo',
input='c58e6118bbe12d2c56b2db014c4eb0d3fd32cde7bca1f32a2da8169be1301e23',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 1
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert first_sig['signatureVerified'] == True
assert len(first_sig['warnings']) == 0
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == 'F9D74771FD4A1A2233D266F1F73B53464328EE1E'
assert first_sig['signedDigest'] == 'F9D74771FD4A1A2233D266F1F73B53464328EE1E'
assert first_sig['programName'] == 'Alveo'
assert len(first_sig['allCertificates']) == 5
assert first_sig['allCertificates'][0]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
assert first_sig['allCertificates'][1]['sha256'] == "3A2FBE92891E57FE05D57087F48E730F17E5A5F53EF403D618E5B74D7A7E6ECB"
assert first_sig['allCertificates'][2]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert first_sig['allCertificates'][3]['sha256'] == "973A41276FFD01E027A2AAD49E34C37846D3E976FF6A620B6712E33832041AA6"
assert first_sig['allCertificates'][4]['sha256'] == "E2DBA399BE32992B74DF8A86CFD9886C2304CCC19DA8A9BE2B87809DA006379E"
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "C2072238EB76B1C42F366FD72B85304A88AE5037"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 3
assert first_sig_signer['chain'][0]['sha256'] == "E2DBA399BE32992B74DF8A86CFD9886C2304CCC19DA8A9BE2B87809DA006379E"
assert first_sig_signer['chain'][1]['sha256'] == "973A41276FFD01E027A2AAD49E34C37846D3E976FF6A620B6712E33832041AA6"
assert first_sig_signer['chain'][2]['sha256'] == "3A2FBE92891E57FE05D57087F48E730F17E5A5F53EF403D618E5B74D7A7E6ECB"
first_sig_countersig = first_sig_signer['counterSigners'][0]
assert len(first_sig_countersig['warnings']) == 0
assert first_sig_countersig['signTime'] == "Jul 1 20:02:53 2016 GMT"
assert first_sig_countersig['digest'] == 'BFFD2E4E2707EE7BF5EB9B1381F100771CCCCD45'
assert first_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(first_sig_countersig['chain']) == 2
assert first_sig_countersig['chain'][0]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert first_sig_countersig['chain'][1]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
class Test5(Test):
settings = TestSettings(
tool='fileinfo',
input='crashreporter.ex',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 1
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert first_sig['signatureVerified'] == True
assert len(first_sig['warnings']) == 0
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == '65901089C84EF122BE9397F508580A3EFC674D1D'
assert first_sig['signedDigest'] == '65901089C84EF122BE9397F508580A3EFC674D1D'
assert len(first_sig['allCertificates']) == 5
assert first_sig['allCertificates'][0]['sha1'] == "0563B8630D62D75ABBC8AB1E4BDFB5A899B24D43"
assert first_sig['allCertificates'][1]['sha1'] == "92C1588E85AF2201CE7915E8538B492F605B80C6"
assert first_sig['allCertificates'][2]['sha1'] == "50600FD631998451C8F75EF3F618E31FC74D1585"
assert first_sig['allCertificates'][3]['sha1'] == "65439929B67973EB192D6FF243E6767ADF0834E4"
assert first_sig['allCertificates'][4]['sha1'] == "6C07453FFDDA08B83707C09B82FB3D15F35336B1"
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "21C4C8CCB2A4B1A878D8347D5F07B8BE4A44693E"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 3
assert first_sig_signer['chain'][0]['sha256'] == "1A73BF16814D061CF5930634FBBD8A55E53DF2A556469C48FDF2623DFEEEE8A8"
assert first_sig_signer['chain'][1]['sha256'] == "51044706BD237B91B89B781337E6D62656C69F0FCFFBE8E43741367948127862"
assert first_sig_signer['chain'][2]['sha256'] == "3E9099B5015E8F486C00BCEA9D111EE721FABA355A89BCF1DF69561E3DC6325C"
first_sig_countersig = first_sig_signer['counterSigners'][0]
assert len(first_sig_countersig['warnings']) == 0
assert first_sig_countersig['signTime'] == "Jan 24 02:14:31 2016 GMT"
assert first_sig_countersig['digest'] == 'F5D8409366948F3B1185F0D7032759C5A1E2FAF5'
assert first_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(first_sig_countersig['chain']) == 2
assert first_sig_countersig['chain'][0]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert first_sig_countersig['chain'][1]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
class Test6(Test):
settings = TestSettings(
tool='fileinfo',
input='f77acb4e1523b882f5307864345e5f7d20a657a7f40863bd7ae41d2521703fec',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 2
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == 'A6BE6C062A26427A722571FD634838DD2FE3743D'
assert first_sig['signedDigest'] == 'A6BE6C062A26427A722571FD634838DD2FE3743D'
assert first_sig['signatureVerified'] == True
assert len(first_sig['warnings']) == 0
assert len(first_sig['allCertificates']) == 7
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "5370C469214E0A599238F7FA851BD86E633FB4E2"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 3
first_sig_countersig = first_sig_signer['counterSigners'][0]
assert first_sig_countersig['signTime'] == "Feb 1 14:55:04 2016 GMT"
assert first_sig_countersig['digest'] == 'B49D4C25284D735D3DCD7B3BBCE6FDA6828F774E'
assert first_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(first_sig_countersig['warnings']) == 0
assert len(first_sig_countersig['chain']) == 2
second_sig = self.fileinfo.output['digitalSignatures']['signatures'][1]
assert second_sig['signatureVerified'] == True
assert len(second_sig['warnings']) == 0
assert second_sig['digestAlgorithm'] == 'sha256'
assert second_sig['fileDigest'] == '54227373068BB3F2721F0E9B849142F3B68FDDD43571A9327C3F9CA44420EEA8'
assert second_sig['signedDigest'] == '54227373068BB3F2721F0E9B849142F3B68FDDD43571A9327C3F9CA44420EEA8'
assert len(second_sig['allCertificates']) == 8
assert second_sig['allCertificates'][0]['sha256'] == "D09EDDF7DA800BCC3AC114852614124706D94EA473A98DB19BC4F4CB6AEE16A4"
assert second_sig['allCertificates'][1]['sha256'] == "5E6D2F88F617DC8B809AEE712445A41B3CDE26AF874A221A9DC98EA1DC68E3D5"
assert second_sig['allCertificates'][2]['sha256'] == "4F32D5DC00F715250ABCC486511E37F501A899DEB3BF7EA8ADBBD3AEF1C412DA"
assert second_sig['allCertificates'][3]['sha256'] == "687FA451382278FFF0C8B11F8D43D576671C6EB2BCEAB413FB83D965D06D2FF2"
assert second_sig['allCertificates'][4]['sha256'] == "52F0E1C4E58EC629291B60317F074671B85D7EA80D5B07273463534B32B40234"
assert second_sig['allCertificates'][5]['sha256'] == "5E6D2F88F617DC8B809AEE712445A41B3CDE26AF874A221A9DC98EA1DC68E3D5"
assert second_sig['allCertificates'][6]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert second_sig['allCertificates'][7]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
second_sig_signer = second_sig['signer']
assert second_sig_signer['digest'] == "400EAD9ABBA5A18062E513E78DB4E7535A81F2B250C9E35A50D158DFA82CFD45"
assert second_sig_signer['digestAlgorithm'] == 'sha256'
assert len(second_sig_signer['chain']) == 3
assert second_sig_signer['chain'][0]['sha256'] == "D09EDDF7DA800BCC3AC114852614124706D94EA473A98DB19BC4F4CB6AEE16A4"
assert second_sig_signer['chain'][1]['sha256'] == "5E6D2F88F617DC8B809AEE712445A41B3CDE26AF874A221A9DC98EA1DC68E3D5"
assert second_sig_signer['chain'][2]['sha256'] == "52F0E1C4E58EC629291B60317F074671B85D7EA80D5B07273463534B32B40234"
second_sig_countersig = second_sig_signer['counterSigners'][0]
assert len(second_sig_countersig['warnings']) == 0
assert second_sig_countersig['signTime'] == "Feb 1 14:55:06 2016 GMT"
assert second_sig_countersig['digest'] == '46A76769C69B78945E9B12594F638A943017F26E'
assert second_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(second_sig_countersig['chain']) == 2
assert second_sig_countersig['chain'][0]['sha256'] == "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0"
assert second_sig_countersig['chain'][1]['sha256'] == "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95"
class Test7(Test):
settings = TestSettings(
tool='fileinfo',
input='msenvmnu.dll',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 2
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert first_sig['signatureVerified'] == True
assert len(first_sig['warnings']) == 0
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == '798D33E74F6F28A62A336C61CF81AE0277F47516'
assert first_sig['signedDigest'] == '798D33E74F6F28A62A336C61CF81AE0277F47516'
assert first_sig['programName'] == 'msenvmnu.dll'
assert len(first_sig['allCertificates']) == 4
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "BC70A3256BE34E5FBB8874E3E6D58664F3F27BE5"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 2
first_sig_countersig = first_sig_signer['counterSigners'][0]
assert first_sig_countersig['signTime'] == "Jul 7 07:30:56 2015 GMT"
assert first_sig_countersig['digest'] == '7F95DBB284EFE07428573201F47342592CA9E007'
assert first_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(first_sig_countersig['warnings']) == 0
assert len(first_sig_countersig['chain']) == 2
second_sig = self.fileinfo.output['digitalSignatures']['signatures'][1]
assert second_sig['signatureVerified'] == True
assert len(second_sig['warnings']) == 0
assert second_sig['digestAlgorithm'] == 'sha256'
assert second_sig['fileDigest'] == '5BFB3AB09F359E11D76D95640BACB3A6CD65F2EF0D1763DC47D0B7F7203D22B7'
assert second_sig['signedDigest'] == '5BFB3AB09F359E11D76D95640BACB3A6CD65F2EF0D1763DC47D0B7F7203D22B7'
assert first_sig['programName'] == 'msenvmnu.dll'
assert len(second_sig['allCertificates']) == 2
assert second_sig['allCertificates'][0]['sha1'] == "76DAF3E30F95B244CA4D6107E0243BB97F7DF965"
assert second_sig['allCertificates'][1]['sha1'] == "F252E794FE438E35ACE6E53762C0A234A2C52135"
second_sig_signer = second_sig['signer']
assert second_sig_signer['digest'] == "2B80E8B619EDC847B62A8A58785C70830B10ACA6863FE30C590F5AE4034258E9"
assert second_sig_signer['digestAlgorithm'] == 'sha256'
assert len(second_sig_signer['chain']) == 2
second_sig_countersig = second_sig_signer['counterSigners'][0]
assert len(second_sig_countersig['warnings']) == 1
assert second_sig_countersig['warnings'][0] == "Couldn't parse signature"
class Test8(Test):
settings = TestSettings(
tool='fileinfo',
input='PdfConv_32.dll',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 1
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert len(first_sig['warnings']) == 0
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == '714A802FB13B89160538890320E519F7A9260E84'
assert first_sig['signedDigest'] == '714A802FB13B89160538890320E519F7A9260E84'
assert len(first_sig['allCertificates']) == 4
assert first_sig['allCertificates'][0]['sha1'] == "DF946A5E503015777FD22F46B5624ECD27BEE376"
assert first_sig['allCertificates'][1]['sha1'] == "DF540F8FEDBA6454E039DD5E21B3B7C99E327B51"
assert first_sig['allCertificates'][2]['sha1'] == "F5AD0BCC1AD56CD150725B1C866C30AD92EF21B0"
assert first_sig['allCertificates'][3]['sha1'] == "B69E752BBE88B4458200A7C0F4F5B3CCE6F35B47"
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "807D00A61C50095D308F33F29EDD644A06E5C514"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 3
first_sig_countersig = first_sig_signer['counterSigners'][0]
assert len(first_sig_countersig['warnings']) == 0
assert first_sig_countersig['signTime'] == "Aug 14 07:58:15 2015 GMT"
assert first_sig_countersig['digest'] == 'FDD38655C08F04B887C4992656CD4F35DE6E6A07'
assert first_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(first_sig_countersig['chain']) == 1
assert first_sig_countersig['chain'][0]['sha256'] == "12F0A1DDF83D265B205B4F3BCA43B3FA89A748E9834EC24004774FD2FDE34073"
class Test9(Test):
settings = TestSettings(
tool='fileinfo',
input='thunderbird.ex',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 1
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert len(first_sig['warnings']) == 0
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == '0813562802948CCB60D288A84147671FBFC10CD4'
assert first_sig['signedDigest'] == '0813562802948CCB60D288A84147671FBFC10CD4'
assert len(first_sig['allCertificates']) == 5
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "A6549FE9A61275AD574F53D2A299138E534780E6"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 3
first_sig_countersig = first_sig_signer['counterSigners'][0]
assert len(first_sig_countersig['warnings']) == 0
assert first_sig_countersig['signTime'] == "Feb 11 22:09:49 2016 GMT"
assert first_sig_countersig['digest'] == 'BEFD25FA1E19A6D90B1918D4E06E465FE3BC57E3'
assert first_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(first_sig_countersig['chain']) == 2
class Test10(Test):
settings = TestSettings(
tool='fileinfo',
input='VSTST-FileConverter.ex',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 2
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == '427DC17A763807D2DEAD406DDFD3AAE93F5CE235'
assert first_sig['signedDigest'] == '427DC17A763807D2DEAD406DDFD3AAE93F5CE235'
assert first_sig['programName'] == 'VSTST-FileConverter.exe'
assert first_sig['signatureVerified'] == True
assert len(first_sig['warnings']) == 0
assert len(first_sig['allCertificates']) == 4
assert first_sig['allCertificates'][0]['sha256'] == "E43F82BC40029F17DBB516613D1E1A96EC2940CE76E0A9CD5F53BA50175A8766"
assert first_sig['allCertificates'][1]['sha256'] == "67C529AD57B2AEDD4D248993324270C7064D4F6BDAAF70044D772D05C56001A4"
assert first_sig['allCertificates'][2]['sha256'] == "9CBF22FAE0DD53A7395556CE6154AA14A0D03360AA8C51CFEA05D1FD8819E043"
assert first_sig['allCertificates'][3]['sha256'] == "4F987BBE4E0D1DCF48FCEFC9239AC6E62EE9DF38CAC2D32993B8533CD95C2E49"
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "C66CA59AF0B63A5758EC97F74FA33C686DBD06D0"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 2
first_sig_countersig = first_sig_signer['counterSigners'][0]
assert first_sig_countersig['signTime'] == "Jul 7 07:34:43 2015 GMT"
assert first_sig_countersig['digest'] == 'C29360ED776638FE506A2641A5F13A9975EA9945'
assert first_sig_countersig['digestAlgorithm'] == 'sha1'
assert len(first_sig_countersig['warnings']) == 0
assert len(first_sig_countersig['chain']) == 2
second_sig = self.fileinfo.output['digitalSignatures']['signatures'][1]
assert second_sig['signatureVerified'] == True
assert len(second_sig['warnings']) == 0
assert second_sig['digestAlgorithm'] == 'sha256'
assert second_sig['fileDigest'] == '7E6B06384FF2B27537F0AC76E311C116434D02DBC735FAF113B6EFD6D629F74C'
assert second_sig['signedDigest'] == '7E6B06384FF2B27537F0AC76E311C116434D02DBC735FAF113B6EFD6D629F74C'
assert first_sig['programName'] == 'VSTST-FileConverter.exe'
assert len(second_sig['allCertificates']) == 2
assert second_sig['allCertificates'][0]['sha256'] == "BD3FCED7A02EA9A18CEBC0628AF487A2925960BE8A88A35609666FA7901987AA"
assert second_sig['allCertificates'][1]['sha256'] == "56DA8722AFD94066FFE1E4595473A4854892B843A0827D53FB7D8F4AEED1E18B"
second_sig_signer = second_sig['signer']
assert second_sig_signer['digest'] == "61A1F261448BCD1CC8AB9F03DF0209951734455840B2B0C2CFB11FC1DB0C1A81"
assert second_sig_signer['digestAlgorithm'] == 'sha256'
assert len(second_sig_signer['chain']) == 2
second_sig_countersig = second_sig_signer['counterSigners'][0]
assert len(second_sig_countersig['warnings']) == 1
assert second_sig_countersig['warnings'][0] == "Couldn't parse signature"
class TestEscaping(Test):
settings = TestSettings(
tool='fileinfo',
input='3708882e564ba289416f65cb4cb2b4de',
args='--json --verbose'
)
def test_certificates(self):
assert self.fileinfo.succeeded
self.assertEqual(
len(self.fileinfo.output["digitalSignatures"]["signatures"][0]['allCertificates']), 4)
self.assertEqual(self.fileinfo.output["digitalSignatures"]['signatures'][0]['signer']['chain'][0]
["sha256"], "9D5DC543A16E3B97AA12ABB6A09C9393C1F6778E475D95C81607335D5D19AF8B")
self.assertEqual(self.fileinfo.output["digitalSignatures"]['signatures'][0]['signer']['chain'][1]
["sha256"], "0D34394100E961CE4318DBA9B8DD38EBC25BB07AEF78FDA3FFF632685549BA0F")
self.assertEqual(self.fileinfo.output["digitalSignatures"]['signatures'][0]['signer']['counterSigners'][0]['chain'][0]
["sha256"], "0374881C9B74D31F28DC580B0F2B9D2B14A97CE31CBEC2A05AEB377DCDDCC2B0")
self.assertEqual(self.fileinfo.output["digitalSignatures"]['signatures'][0]['signer']['counterSigners'][0]['chain'][1]
["sha256"], "0625FEE1A80D7B897A9712249C2F55FF391D6661DBD8B87F9BE6F252D88CED95")
self.assertEqual(self.fileinfo.output["digitalSignatures"]['signatures'][0]['signer']['chain'][0]
["attributes"]["subject"]["locality"], R"M\xfcnchen")
class Test11(Test):
settings = TestSettings(
tool='fileinfo',
args='--json --verbose',
input='x86-pe-ff6717faf307cdc5ba2d07e320cb8e33'
)
def test_certificates(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 1
first_sig = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert len(first_sig['warnings']) == 0
assert first_sig['digestAlgorithm'] == 'sha1'
assert first_sig['fileDigest'] == 'F48199821F5D51C334E00532FABB05E3F2D3D92C'
assert first_sig['signedDigest'] == 'F48199821F5D51C334E00532FABB05E3F2D3D92C'
assert len(first_sig['allCertificates']) == 3
assert first_sig['allCertificates'][0]['sha1'] == "C5DAAAEAA82AAF90C2963CE7432E934A8DE17D51"
assert first_sig['allCertificates'][1]['sha1'] == "7C4656C3061F7F4C0D67B319A855F60EBC11FC44"
assert first_sig['allCertificates'][2]['sha1'] == "2796BAE63F1801E277261BA0D77770028F20EEE4"
first_sig_signer = first_sig['signer']
assert first_sig_signer['digest'] == "9C6BCEE73B8C669764AEDB8046C064C71C5B6A27"
assert first_sig_signer['digestAlgorithm'] == 'sha1'
assert len(first_sig_signer['chain']) == 3
class Test12(Test):
settings = TestSettings(
tool='fileinfo',
input='002720d5ed0df9fe550d52145a44268d24b6368c61065be070e3319b9a67b082',
args='-j -v'
)
def test(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 2
assert len(self.fileinfo.output['digitalSignatures']['signatures']) == 2
first_signature = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert first_signature['signatureVerified'] == True
assert len(first_signature['warnings']) == 0
assert len(first_signature['allCertificates']) == 6
assert first_signature['fileDigest'] == 'D643405056A4A16042D47942A8C6A59524BDA64A'
assert first_signature['fileDigest'] == first_signature['signedDigest']
assert first_signature['digestAlgorithm'] == 'sha1'
signer = first_signature['signer']
assert len(signer['warnings']) == 0
assert signer['digest'] == '2C39C585984D98957CA03802F8C255EE4359D8EE'
assert signer['digestAlgorithm'] == 'sha1'
assert len(signer['chain']) == 4
assert len(signer['counterSigners']) == 1
counter_signer = signer['counterSigners'][0]
assert len(counter_signer['warnings']) == 0
assert len(counter_signer['chain']) == 2
assert counter_signer['signTime'] == 'Aug 21 14:53:13 2017 GMT'
assert counter_signer['digest'] == '1530CD732860961182222E7C955AEF70BD0BA570'
assert counter_signer['digestAlgorithm'] == 'sha1'
#######################################################################
second_signature = self.fileinfo.output['digitalSignatures']['signatures'][1]
assert second_signature['signatureVerified'] == True
assert len(second_signature['warnings']) == 0
assert len(first_signature['allCertificates']) == 6
assert second_signature['fileDigest'] == '75CACDF5BE7BAEECB89C70BC01343FB7C9E8FD000CC191F08D2A996359D617FE'
assert second_signature['fileDigest'] == second_signature['signedDigest']
assert second_signature['digestAlgorithm'] == 'sha256'
signer = second_signature['signer']
assert len(signer['warnings']) == 0
assert signer['digest'] == '018A36C7429C0058101D3F087E69E27824CC68FEC8A745B8AF59D5D225BBDB77'
assert signer['digestAlgorithm'] == 'sha256'
assert len(signer['chain']) == 4
assert len(signer['counterSigners']) == 1
counter_signer = signer['counterSigners'][0]
assert len(counter_signer['warnings']) == 0
assert len(counter_signer['chain']) == 2
assert counter_signer['signTime'] == 'Aug 21 14:53:39 2017 GMT'
assert counter_signer['digest'] == '32344850DE23CE4A6312A69CC355AC6D16968964'
assert counter_signer['digestAlgorithm'] == 'sha1'
class TestProgramName(Test):
settings = TestSettings(
tool='fileinfo',
input='0059fb3f225c5784789622eeccb97197d591972851b63d59f5bd107ddfdb7a21',
args='-j -v'
)
def test(self):
assert self.fileinfo.succeeded
assert self.fileinfo.output['digitalSignatures']['numberOfSignatures'] == 1
first_signature = self.fileinfo.output['digitalSignatures']['signatures'][0]
assert first_signature['programName'] == "GoTo Opener"
| 2.21875 | 2 |
app/darn.py | AmitSrourDev/darn | 0 | 6629 | import subprocess
def run(cmd):
subprocess.run(cmd.split(' '))
def ls():
subprocess.call(["ls", "-l"]) | 2.109375 | 2 |
virt/ansible-latest/lib/python2.7/site-packages/ansible/plugins/become/runas.py | lakhlaifi/RedHat-Ansible | 1 | 6630 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
become: runas
short_description: Run As user
description:
- This become plugins allows your remote/login user to execute commands as another user via the windows runas facility.
author: ansible (@core)
version_added: "2.8"
options:
become_user:
description: User you 'become' to execute the task
ini:
- section: privilege_escalation
key: become_user
- section: runas_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_runas_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_RUNAS_USER
required: True
become_flags:
description: Options to pass to runas, a space delimited list of k=v pairs
default: ''
ini:
- section: privilege_escalation
key: become_flags
- section: runas_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_runas_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_RUNAS_FLAGS
become_pass:
description: password
ini:
- section: runas_become_plugin
key: password
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_runas_runas
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_RUNAS_PASS
notes:
- runas is really implemented in the powershell module handler and as such can only be used with winrm connections.
- This plugin ignores the 'become_exe' setting as it uses an API and not an executable.
"""
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'runas'
def build_become_command(self, cmd, shell):
# runas is implemented inside the winrm connection plugin
return cmd
| 1.703125 | 2 |
2017/lab_dh/utils.py | JustHitTheCore/ctf_workshops | 7 | 6631 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
~Gros
'''
from hashlib import sha256
import random
def add_padding(data, block_size=16):
"""add PKCS#7 padding"""
size = block_size - (len(data)%block_size)
return data+chr(size)*size
def strip_padding(data, block_size=16):
"""strip PKCS#7 padding"""
padding = ord(data[-1])
if padding == 0 or padding > block_size or data[-padding:] != chr(padding)*padding:
raise Exception("Invalid padding")
return data[:-padding]
def random_bytes(amount=1):
return ''.join([chr(random.randint(0,255)) for x in range(amount)])
def derive_key(key_int, block_size=16):
return sha256(str(key_int)).digest()[:16] | 3.21875 | 3 |
applications/cli/commands/model/tests/test_export.py | nparkstar/nauta | 390 | 6632 | #
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from click.testing import CliRunner
from cli_text_consts import ModelExportCmdTexts as Texts
from commands.model.common import workflow_description
from commands.model.export import export
from platform_resources.workflow import ArgoWorkflow, QUEUED_PHASE
FEM_NAME = "EXPORT_1"
SEM_NAME = "EXPORT_2"
FEM_PARAMETERS = "PARAMS_1"
SEM_PARAMETERS = "PARAMS_2"
FEM_START_DATE = '2000-01-01'
FEM_NAMESPACE = 'test-namespace'
TEST_AGROWORKFLOW = ArgoWorkflow(name=FEM_NAME, started_at=FEM_START_DATE, finished_at=None,
namespace=FEM_NAMESPACE, phase=None)
TWO_MODEL_OUTPUT = [workflow_description(name=FEM_NAME, parameters=FEM_PARAMETERS),
workflow_description(name=SEM_NAME, parameters=SEM_PARAMETERS)]
def setup_mocks(mocker):
mocker.patch('commands.model.export.get_kubectl_current_context_namespace',
return_value='fake-namespace')
mocker.patch('platform_resources.workflow.ArgoWorkflow.from_yaml',
return_value=mocker.MagicMock())
mocker.patch('platform_resources.workflow.ArgoWorkflow.get',
return_value=TEST_AGROWORKFLOW)
mocker.patch('os.listdir', return_value=['openvino.yaml', 'tensorflow.yaml', 'some_other_file'])
mocker.patch('commands.model.export.NAUTAConfigMap', return_value=mocker.MagicMock(registry='fake-addr'))
mocker.patch('commands.model.export.Config')
mocker.patch('os.path.isdir', return_value=True)
def test_export(mocker):
setup_mocks(mocker)
result = CliRunner().invoke(export, ["/fake/path", "openvino"])
assert result.exit_code == 0
assert "Successfully created export workflow" in result.output
assert QUEUED_PHASE in result.output
assert FEM_NAME in result.output
assert FEM_START_DATE in result.output
assert FEM_NAMESPACE in result.output
def test_export_inexistent_format(mocker):
setup_mocks(mocker)
result = CliRunner().invoke(export, ["/fake/path", "bad"])
assert result.exit_code == 2
assert "Format: bad does not exist. Choose from:" in result.output
def test_export_failure(mocker):
setup_mocks(mocker)
mocker.patch('platform_resources.workflow.ArgoWorkflow.from_yaml',
return_value=mocker.MagicMock(create=lambda: RuntimeError))
result = CliRunner().invoke(export, ["/fake/path", "openvino"])
assert result.exit_code == 1
assert "Failed to create export workflow" in result.output
def test_export_list(mocker):
mocker.patch("commands.model.export.get_list_of_workflows", return_value=TWO_MODEL_OUTPUT)
result = CliRunner().invoke(export, ["formats"])
assert FEM_NAME in result.output
assert SEM_NAME in result.output
assert FEM_PARAMETERS in result.output
assert SEM_PARAMETERS in result.output
def test_export_list_error(mocker):
mocker.patch("commands.model.export.get_list_of_workflows", side_effect=RuntimeError)
result = CliRunner().invoke(export, ["formats"])
assert Texts.EXPORT_LIST_ERROR_MSG in result.output
def test_export_missing_format(mocker):
setup_mocks(mocker)
result = CliRunner().invoke(export, ["wrong-option"])
assert Texts.MISSING_EXPORT_FORMAT.format(formats=["openvino", "tensorflow"]) in result.output
| 1.84375 | 2 |
var/spack/repos/builtin/packages/py-mdanalysis/package.py | LiamBindle/spack | 2,360 | 6633 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMdanalysis(PythonPackage):
"""MDAnalysis is a Python toolkit to analyze molecular dynamics
trajectories generated by a wide range of popular simulation
packages including DL_Poly, CHARMM, Amber, NAMD, LAMMPS, and
Gromacs. (See the lists of supported trajectory formats and
topology formats.)"""
homepage = "https://www.mdanalysis.org"
pypi = "MDAnalysis/MDAnalysis-0.19.2.tar.gz"
version('1.0.0', sha256='f45a024aca45e390ff1c45ca90beb2180b78881be377e2a1aa9cd6c109bcfa81')
version('0.20.1', sha256='d04b71b193b9716d2597ffb9938b93f43487fa535da1bb5c1f2baccf356d7df9')
version('0.19.2', sha256='c5395bbafa5efca2e1aee4715d26129844140c47cb8301da0293106cb969de7d')
version('0.19.1', sha256='ff1d694f8598c0833ec340de6a6adb3b5e62b92d0fa94ee6401718ba972db3cc')
version('0.19.0', sha256='248e3b37fc6150e31c609cc18a3927c32aee37b76d29cbfedf635e7e1aa982cf')
version('0.18.0', sha256='a08acea1755112411e7db55e3f282e164b47a59e15794b38744cce6c596f252a')
version('0.17.0', sha256='9bd61760334698cc7b8a57ad26456451e926e9c9e66722594ad8816561348cde')
version('0.16.2', sha256='407d9a9ff1ab8a5e47973714d06fabff220f8d08a28792dee93e88e70e995b0a')
version('0.16.1', sha256='3dc8f5d639ab3a0d152cbd7259ae9372ec8a9bac0f8cb7d3b80ce5adc1e3ee57')
version('0.16.0', sha256='c4824fa1fddd336daa39371436187ebb023366885fb250c2827ed7fce2546bd4')
version('0.15.0', sha256='9088786048b47339cba1f8a586977bbb3bb04ae1bcd0462b59e45bda37e25533')
variant('analysis', default=True,
description='Enable analysis packages: matplotlib, scipy, seaborn')
variant('amber', default=False,
description='Support AMBER netcdf format.')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@0.17.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.0:', type=('build', 'run'))
depends_on('py-mock', when='@0.18.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.0.0:', type=('build', 'run'))
depends_on('py-joblib', when='@0.16.0:0.20.1', type=('build', 'run'))
depends_on('[email protected]:', when='@1.0.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@:0.15.0', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.0:0.19.2', type=('build', 'run'))
depends_on('[email protected]:', when='@0.20.1:', type=('build', 'run'))
depends_on('[email protected]:', when='@:0.17.0', type=('build', 'run'))
depends_on('[email protected]:', when='@0.18.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@:0.16.2', type=('build', 'run'))
depends_on('[email protected]:', when='@0.17.0:', type=('build', 'run'))
depends_on('py-matplotlib', when='@:0.15.0+analysis', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.0:0.16.1+analysis', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.2:', type=('build', 'run'))
depends_on('py-scipy', when='@:0.16.1+analysis', type=('build', 'run'))
depends_on('py-scipy', when='@0.16.2:0.17.0', type=('build', 'run'))
depends_on('[email protected]:', when='@0.18.0:', type=('build', 'run'))
depends_on('py-scikit-learn', when='@0.16.0:+analysis', type=('build', 'run'))
depends_on('py-seaborn', when='+analysis', type=('build', 'run'))
depends_on('[email protected]:', when='+amber', type=('build', 'run'))
depends_on('hdf5', when='+amber', type=('run'))
| 1.359375 | 1 |
lesley-byte/graphpressure.py | lesley-byte/enviroplus-python | 0 | 6634 | from requests import get
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import datetime as dt
from bme280 import BME280
try:
from smbus2 import SMBus
except ImportError:
from smbus import SMBus
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
xs = []
ys =[]
bus = SMBus(1)
bme280 = BME280(i2c_dev=bus)
def animate(i, xs, ys):
pressure = bme280.get_pressure()
xs.append(dt.datetime.now().strftime('%H:%M:%S'))
ys.append(pressure)
xs = xs[-20:]
ys = ys[-20:]
ax.clear()
ax.plot(xs, ys)
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('Pressure over time')
plt.ylabel("pressure")
ani = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=60000)
plt.show()
| 2.875 | 3 |
bootstrapvz/plugins/ova/tasks.py | brett-smith/bootstrap-vz | 0 | 6635 | <reponame>brett-smith/bootstrap-vz<gh_stars>0
from bootstrapvz.base import Task
from bootstrapvz.common import phases
from bootstrapvz.common.tasks import workspace
import os
import shutil
assets = os.path.normpath(os.path.join(os.path.dirname(__file__), 'assets'))
class CheckOVAPath(Task):
description = 'Checking if the OVA file already exists'
phase = phases.preparation
@classmethod
def run(cls, info):
ova_basename = info.manifest.name.format(**info.manifest_vars)
ova_name = ova_basename + '.ova'
ova_path = os.path.join(info.manifest.bootstrapper['workspace'], ova_name)
if os.path.exists(ova_path):
from bootstrapvz.common.exceptions import TaskError
msg = 'The OVA `{name}\' already exists at `{path}\''.format(name=ova_name, path=ova_path)
raise TaskError(msg)
info._ova['ova_basename'] = ova_basename
info._ova['ova_name'] = ova_name
info._ova['ova_path'] = ova_path
class CreateOVADir(Task):
description = 'Creating directory for the OVA'
phase = phases.preparation
predecessors = [workspace.CreateWorkspace, CheckOVAPath]
@classmethod
def run(cls, info):
info._ova['folder'] = os.path.join(info.workspace, 'ova')
os.mkdir(info._ova['folder'])
class PackageOVA(Task):
description = 'Packaging the volume as an OVA'
phase = phases.image_registration
@classmethod
def run(cls, info):
import random
mac_address = '080027{mac:06X}'.format(mac=random.randrange(16 ** 6))
from bootstrapvz.common.tools import log_check_call
disk_name = info._ova['ova_basename'] + '.' + info.volume.extension
disk_link = os.path.join(info._ova['folder'], disk_name)
log_check_call(['ln', '-s', info.volume.image_path, disk_link])
ovf_path = os.path.join(info._ova['folder'], info._ova['ova_basename'] + '.ovf')
cls.write_ovf(info, ovf_path, mac_address, disk_name)
ova_files = os.listdir(info._ova['folder'])
log_check_call(['ovftool', ovf_path, info._ova['ova_path']]
)
import logging
logging.getLogger(__name__).info('The OVA has been placed at ' + info._ova['ova_path'])
@classmethod
def write_ovf(cls, info, destination, mac_address, disk_name):
namespaces = {'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
'rasd': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData',
'vssd': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'vbox': 'http://www.virtualbox.org/ovf/machine',
}
def attr(element, name, value=None):
for prefix, ns in namespaces.iteritems():
name = name.replace(prefix + ':', '{' + ns + '}')
if value is None:
return element.attrib[name]
else:
element.attrib[name] = str(value)
template_path = os.path.join(assets, 'default.ovf')
if 'ovf' in info.manifest.plugins['ova']:
template_path = info.manifest.plugins['ova']['ovf']
import xml.etree.ElementTree as ET
template = ET.parse(template_path)
root = template.getroot()
[disk_ref] = root.findall('./ovf:References/ovf:File', namespaces)
attr(disk_ref, 'ovf:href', disk_name)
# List of OVF disk format URIs
# Snatched from VBox source (src/VBox/Main/src-server/ApplianceImpl.cpp:47)
# ISOURI = "http://www.ecma-international.org/publications/standards/Ecma-119.htm"
# VMDKStreamURI = "http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized"
# VMDKSparseURI = "http://www.vmware.com/specifications/vmdk.html#sparse"
# VMDKCompressedURI = "http://www.vmware.com/specifications/vmdk.html#compressed"
# VMDKCompressedURI2 = "http://www.vmware.com/interfaces/specifications/vmdk.html#compressed"
# VHDURI = "http://go.microsoft.com/fwlink/?LinkId=137171"
volume_uuid = info.volume.get_uuid()
[disk] = root.findall('./ovf:DiskSection/ovf:Disk', namespaces)
attr(disk, 'ovf:capacity', info.volume.size.bytes.get_qty_in('B'))
attr(disk, 'ovf:format', info.volume.ovf_uri)
attr(disk, 'vbox:uuid', volume_uuid)
[system] = root.findall('./ovf:VirtualSystem', namespaces)
attr(system, 'ovf:id', info._ova['ova_basename'])
# Set the operating system
[os_section] = system.findall('./ovf:OperatingSystemSection', namespaces)
os_info = {'i386': {'id': 96, 'name': 'Debian'},
'amd64': {'id': 96, 'name': 'Debian_64'}
}.get(info.manifest.system['architecture'])
attr(os_section, 'ovf:id', os_info['id'])
[os_desc] = os_section.findall('./ovf:Description', namespaces)
os_desc.text = os_info['name']
[os_type] = os_section.findall('./vbox:OSType', namespaces)
os_type.text = os_info['name']
[sysid] = system.findall('./ovf:VirtualHardwareSection/ovf:System/'
'vssd:VirtualSystemIdentifier', namespaces)
sysid.text = info._ova['ova_basename']
[machine] = system.findall('./vbox:Machine', namespaces)
import uuid
del machine.attrib['uuid']
attr(machine, 'uuid', uuid.uuid4())
del machine.attrib['name']
attr(machine, 'name', info._ova['ova_basename'])
from datetime import datetime
del machine.attrib['lastStateChange']
attr(machine, 'lastStateChange', datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'))
[nic] = machine.findall('./ovf:Hardware/ovf:Network/ovf:Adapter', namespaces)
attr(machine, 'MACAddress', mac_address)
[device_img] = machine.findall('./ovf:StorageControllers'
'/ovf:StorageController[1]'
'/ovf:AttachedDevice/ovf:Image', namespaces)
attr(device_img, 'uuid', '{' + str(volume_uuid) + '}')
template.write(destination, xml_declaration=True) # , default_namespace=namespaces['ovf']
class RemoveOVADir(Task):
description = 'Removing the OVA directory'
phase = phases.cleaning
successors = [workspace.DeleteWorkspace]
@classmethod
def run(cls, info):
shutil.rmtree(info._ova['folder'])
del info._ova['folder']
| 2.34375 | 2 |
docs/conf.py | PhilippJunk/homelette | 0 | 6636 | <gh_stars>0
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import shutil
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'homelette'
copyright = '2021, <NAME>, <NAME>'
author = '<NAME>, <NAME>'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'nbsphinx',
'sphinx_rtd_theme',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_logo = 'logo.png'
html_theme_options = {
'logo_only': False,
'style_nav_header_background': '#000000',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Options for LaTex output ------------------------------------------------
latex_elements = {
'preamble': r'''
\setcounter{tocdepth}{1}
\renewcommand{\hyperref}[2][]{#2}
'''
}
# -- Extension configuration: autodoc ----------------------------------------
autodoc_default_options = {
'member-order': 'bysource',
}
autoclass_content = 'class'
autodoc_mock_imports = ['altmod', 'modeller', 'ost', 'promod3', 'qmean',
'pandas']
# -- Extension configuration: napoleon ---------------------------------------
napoleon_use_ivar = True
# -- Copy notebooks to include in the documentation --------------------------
notebooks = [
'../examples/Tutorial1_Basics.ipynb',
'../examples/Tutorial2_Modelling.ipynb',
'../examples/Tutorial3_Evaluation.ipynb',
'../examples/Tutorial4_ExtendingHomelette.ipynb',
'../examples/Tutorial5_Parallelization.ipynb',
'../examples/Tutorial6_ComplexModelling.ipynb',
'../examples/Tutorial7_AssemblingPipelines.ipynb',
'../examples/Tutorial8_AlignmentGeneration.ipynb',
]
for notebook in notebooks:
if os.path.exists(notebook):
shutil.copy(notebook, '.')
# -- Copy logo ---------------------------------------------------------------
if os.path.exists('../logo/logo.png'):
shutil.copy('../logo/logo.png', '.')
| 1.703125 | 2 |
bytecode2ast/parsers/bases.py | Cologler/bytecode2ast-python | 0 | 6637 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019~2999 - Cologler <<EMAIL>>
# ----------
# some object for parser
# ----------
from typing import List
import enum
import dis
from collections import defaultdict
class ID:
def __init__(self, name):
self._name = name # a name use to debug
def __repr__(self):
return f'ID({self._name})'
def __str__(self):
return repr(self)
class Scope(enum.IntEnum):
NONE = enum.auto()
LOOP = enum.auto()
WITH = enum.auto()
EXCEPT = enum.auto()
FINALLY = enum.auto()
class CodeState:
def __init__(self, *, scope=Scope.NONE):
self._ast_stack = []
self._load_stack = []
self._scope = scope
self._state: dict = None if scope is Scope.NONE else {}
self._blocks = [[]] # ensure has last block
self._instrs = [] # all handled instrs in this state
def __repr__(self):
return f'b({self._blocks!r}), l({self._load_stack!r})'
@property
def scope(self):
return self._scope
# state
@property
def state(self):
return self._state
def add_state(self, id, value):
''' add a state, also ensure it does not exists. '''
assert id not in self._state
self._state[id] = value
# instrs
def add_instr(self, instr: dis.Instruction):
''' add a handled instruction in this state '''
self._instrs.append(instr)
def get_instrs(self, key=None) -> List[dis.Instruction]:
''' get all instructions by key from this state '''
if key is None:
return self._instrs.copy()
else:
return [i for i in self._instrs if i.opcode == key or i.opname == key]
def copy(self):
''' copy a `CodeState` '''
state = CodeState()
state._load_stack = self._load_stack.copy()
state._ast_stack = self._ast_stack.copy()
return state
def copy_with_load(self, load_count):
''' copy a `CodeState` with empty ast stack. '''
state = CodeState()
state._load_stack = self._load_stack[-load_count:]
return state
def push(self, node):
''' push a node into load stack. '''
self._load_stack.append(node)
def pop(self):
''' pop the top node from load stack. '''
return self._load_stack.pop()
def pop_seq(self, count: int) -> list:
''' pop a list of top nodes from load stack. '''
assert count >= 0
if count > 0:
items = self._load_stack[-count:]
self._load_stack = self._load_stack[0:-count]
return items
else:
return []
def dup_top(self):
''' repeat top once. '''
self._load_stack.append(self._load_stack[-1])
def store(self, node):
''' store a node '''
self.add_node(node)
def add_node(self, node):
''' add a final node into ast stmt tree '''
self._blocks[-1].append(node)
def get_value(self) -> list:
''' get stmts from single block. '''
# ensure all status was handled
assert not self._state, self._state
assert not self._load_stack, self._load_stack
# get value
assert len(self._blocks) == 1, self._blocks
return self._blocks[-1]
def new_block(self):
''' make a new stmts block '''
self._blocks.append([])
def get_blocks(self) -> list:
''' get all stmts blocks. '''
# ensure all status was handled
assert not self._state, self._state
assert not self._load_stack, self._load_stack
# get value
return self._blocks
def get_block_count(self) -> int:
''' get count of stmts blocks. '''
return len(self._blocks)
class CodeReaderIter:
__slots__ = ('_reader', '_condition')
def __init__(self, reader, condition):
self._reader: CodeReader = reader
self._condition = condition
def __iter__(self):
while self._condition():
yield self._reader.pop()
def fill_state(self, state: CodeState):
''' iter self into the `CodeState` and return it. '''
for instr in self:
handler = get_instr_handler(instr)
handler(self._reader, state, instr)
state.add_instr(instr)
return state
def get_state(self, *, scope=Scope.NONE):
''' iter self into a new `CodeState`, return the `CodeState` '''
state = CodeState(scope=scope)
return self.fill_state(state)
def get_value(self, *, scope=Scope.NONE):
''' iter self into a new `CodeState`, return value from `CodeState`. '''
return self.get_state(scope=scope).get_value()
def get_blocks(self, *, scope=Scope.NONE):
''' iter self into a new `CodeState`, return blocks from `CodeState`. '''
return self.get_state(scope=scope).get_blocks()
class CodeReader:
def __init__(self, instructions):
# reversed will fast
self._instructions = list(reversed(instructions))
self._lineno = None
def __bool__(self):
return bool(self._instructions)
def __repr__(self):
return repr(list(reversed(self._instructions)))
@property
def co_consts(self):
return self._co_consts
def get_instrs_count(self) -> int:
return len(self._instructions)
def get_lineno(self) -> int:
return self._lineno
def peek(self) -> dis.Instruction:
''' peek one instr '''
if not self._instructions:
return None
return self._instructions[-1]
def pop(self) -> dis.Instruction:
''' pop one instr '''
instr = self._instructions.pop()
if instr.starts_line is not None:
self._lineno = instr.starts_line
return instr
def pop_assert(self, opcode: int) -> dis.Instruction:
instr = self.pop()
assert instr.opcode == opcode
return instr
def pop_if(self, opcode: int) -> dis.Instruction:
if self._instructions and self._instructions[-1].opcode == opcode:
return self.pop()
# read methods
def read_until_end(self):
''' read until reader end. '''
return CodeReaderIter(self, lambda: self)
def read_until_offset(self, offset: int):
''' read until come to the offset '''
return CodeReaderIter(self, lambda: self.peek().offset != offset)
def read_until_opcodes(self, *opcodes):
''' read until visit some opcodes '''
return CodeReaderIter(self, lambda: self.peek().opcode not in opcodes)
def read_until_count(self, count: int):
''' read until handled count of instrs '''
end_count = self.get_instrs_count() - count
return CodeReaderIter(self, lambda: self.get_instrs_count() > end_count)
def read_until_scoped_count(self, count: int):
''' read until handled count of instrs in current scope. '''
if count <= 0:
raise ValueError(count)
def cond():
nonlocal count
count -= 1
return count >= 0
return CodeReaderIter(self, cond)
_OPCODE_MAP = {}
def op(opname, opcode, **kwargs):
def wrapper(func):
def func_wrapper(reader, state, instr: dis.Instruction):
func(reader, state, instr, **kwargs)
assert opcode not in _OPCODE_MAP
_OPCODE_MAP[(opname, opcode)] = func_wrapper
return func
return wrapper
def get_instr_handler(instr):
'''
the return function `(reader, state, instr) -> None`
'''
k = (instr.opname, instr.opcode)
try:
return _OPCODE_MAP[k]
except KeyError:
raise NotImplementedError(k, instr)
| 2.640625 | 3 |
netbox/extras/forms.py | orphanedgamboa/netbox | 1 | 6638 | <filename>netbox/extras/forms.py
from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from dcim.models import DeviceRole, DeviceType, Platform, Region, Site, SiteGroup
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
add_blank_choice, APISelectMultiple, BootstrapMixin, BulkEditForm, BulkEditNullBooleanSelect, ColorSelect,
CommentField, ContentTypeMultipleChoiceField, CSVModelForm, DateTimePicker, DynamicModelMultipleChoiceField,
JSONField, SlugField, StaticSelect2, BOOLEAN_WITH_BLANK_CHOICES,
)
from virtualization.models import Cluster, ClusterGroup
from .choices import *
from .models import ConfigContext, CustomField, ImageAttachment, JournalEntry, ObjectChange, Tag
from .utils import FeatureQuery
#
# Custom fields
#
class CustomFieldForm(forms.Form):
"""
Extend Form to include custom field support.
"""
model = None
def __init__(self, *args, **kwargs):
if self.model is None:
raise NotImplementedError("CustomFieldForm must specify a model class.")
self.custom_fields = []
super().__init__(*args, **kwargs)
# Append relevant custom fields to the form instance
obj_type = ContentType.objects.get_for_model(self.model)
for cf in CustomField.objects.filter(content_types=obj_type):
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field()
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
class CustomFieldModelForm(forms.ModelForm):
"""
Extend ModelForm to include custom field support.
"""
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self._meta.model)
self.custom_fields = []
super().__init__(*args, **kwargs)
self._append_customfield_fields()
def _append_customfield_fields(self):
"""
Append form fields for all CustomFields assigned to this model.
"""
# Append form fields; assign initial values if modifying and existing object
for cf in CustomField.objects.filter(content_types=self.obj_type):
field_name = 'cf_{}'.format(cf.name)
if self.instance.pk:
self.fields[field_name] = cf.to_form_field(set_initial=False)
self.fields[field_name].initial = self.instance.custom_field_data.get(cf.name)
else:
self.fields[field_name] = cf.to_form_field()
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
def clean(self):
# Save custom field data on instance
for cf_name in self.custom_fields:
self.instance.custom_field_data[cf_name[3:]] = self.cleaned_data.get(cf_name)
return super().clean()
class CustomFieldModelCSVForm(CSVModelForm, CustomFieldModelForm):
def _append_customfield_fields(self):
# Append form fields
for cf in CustomField.objects.filter(content_types=self.obj_type):
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field(for_csv_import=True)
# Annotate the field in the list of CustomField form fields
self.custom_fields.append(field_name)
class CustomFieldBulkEditForm(BulkEditForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.custom_fields = []
self.obj_type = ContentType.objects.get_for_model(self.model)
# Add all applicable CustomFields to the form
custom_fields = CustomField.objects.filter(content_types=self.obj_type)
for cf in custom_fields:
# Annotate non-required custom fields as nullable
if not cf.required:
self.nullable_fields.append(cf.name)
self.fields[cf.name] = cf.to_form_field(set_initial=False, enforce_required=False)
# Annotate this as a custom field
self.custom_fields.append(cf.name)
class CustomFieldFilterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self.model)
super().__init__(*args, **kwargs)
# Add all applicable CustomFields to the form
custom_fields = CustomField.objects.filter(content_types=self.obj_type).exclude(
filter_logic=CustomFieldFilterLogicChoices.FILTER_DISABLED
)
for cf in custom_fields:
field_name = 'cf_{}'.format(cf.name)
self.fields[field_name] = cf.to_form_field(set_initial=True, enforce_required=False)
#
# Tags
#
class TagForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = [
'name', 'slug', 'color', 'description'
]
fieldsets = (
('Tag', ('name', 'slug', 'color', 'description')),
)
class TagCSVForm(CSVModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = Tag.csv_headers
help_texts = {
'color': mark_safe('RGB color in hexadecimal (e.g. <code>00ff00</code>)'),
}
class AddRemoveTagsForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Add add/remove tags fields
self.fields['add_tags'] = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
self.fields['remove_tags'] = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
class TagFilterForm(BootstrapMixin, forms.Form):
model = Tag
q = forms.CharField(
required=False,
label=_('Search')
)
content_type_id = ContentTypeMultipleChoiceField(
queryset=ContentType.objects.filter(FeatureQuery('tags').get_query()),
required=False,
label=_('Tagged object type')
)
class TagBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
widget=forms.MultipleHiddenInput
)
color = forms.CharField(
max_length=6,
required=False,
widget=ColorSelect()
)
description = forms.CharField(
max_length=200,
required=False
)
class Meta:
nullable_fields = ['description']
#
# Config contexts
#
class ConfigContextForm(BootstrapMixin, forms.ModelForm):
regions = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False
)
site_groups = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False
)
sites = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False
)
device_types = DynamicModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
required=False
)
roles = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False
)
platforms = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False
)
cluster_groups = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False
)
clusters = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False
)
tenant_groups = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
required=False
)
tenants = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
required=False
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False
)
data = JSONField(
label=''
)
class Meta:
model = ConfigContext
fields = (
'name', 'weight', 'description', 'is_active', 'regions', 'site_groups', 'sites', 'roles', 'device_types',
'platforms', 'cluster_groups', 'clusters', 'tenant_groups', 'tenants', 'tags', 'data',
)
class ConfigContextBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=ConfigContext.objects.all(),
widget=forms.MultipleHiddenInput
)
weight = forms.IntegerField(
required=False,
min_value=0
)
is_active = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect()
)
description = forms.CharField(
required=False,
max_length=100
)
class Meta:
nullable_fields = [
'description',
]
class ConfigContextFilterForm(BootstrapMixin, forms.Form):
field_order = [
'q', 'region_id', 'site_group_id', 'site_id', 'role_id', 'platform_id', 'cluster_group_id', 'cluster_id',
'tenant_group_id', 'tenant_id',
]
q = forms.CharField(
required=False,
label=_('Search')
)
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Regions')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site groups')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
label=_('Sites')
)
device_type_id = DynamicModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
required=False,
label=_('Device types')
)
role_id = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False,
label=_('Roles')
)
platform_id = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False,
label=_('Platforms')
)
cluster_group_id = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False,
label=_('Cluster groups')
)
cluster_id = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False,
label=_('Clusters')
)
tenant_group_id = DynamicModelMultipleChoiceField(
queryset=TenantGroup.objects.all(),
required=False,
label=_('Tenant groups')
)
tenant_id = DynamicModelMultipleChoiceField(
queryset=Tenant.objects.all(),
required=False,
label=_('Tenant')
)
tag = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
to_field_name='slug',
required=False,
label=_('Tags')
)
#
# Filter form for local config context data
#
class LocalConfigContextFilterForm(forms.Form):
local_context_data = forms.NullBooleanField(
required=False,
label=_('Has local config context data'),
widget=StaticSelect2(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
#
# Image attachments
#
class ImageAttachmentForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ImageAttachment
fields = [
'name', 'image',
]
#
# Journal entries
#
class JournalEntryForm(BootstrapMixin, forms.ModelForm):
comments = CommentField()
class Meta:
model = JournalEntry
fields = ['assigned_object_type', 'assigned_object_id', 'kind', 'comments']
widgets = {
'assigned_object_type': forms.HiddenInput,
'assigned_object_id': forms.HiddenInput,
}
class JournalEntryBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=JournalEntry.objects.all(),
widget=forms.MultipleHiddenInput
)
kind = forms.ChoiceField(
choices=JournalEntryKindChoices,
required=False
)
comments = forms.CharField(
required=False,
widget=forms.Textarea()
)
class Meta:
nullable_fields = []
class JournalEntryFilterForm(BootstrapMixin, forms.Form):
model = JournalEntry
q = forms.CharField(
required=False,
label=_('Search')
)
created_after = forms.DateTimeField(
required=False,
label=_('After'),
widget=DateTimePicker()
)
created_before = forms.DateTimeField(
required=False,
label=_('Before'),
widget=DateTimePicker()
)
created_by_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
label=_('User'),
widget=APISelectMultiple(
api_url='/api/users/users/',
)
)
assigned_object_type_id = DynamicModelMultipleChoiceField(
queryset=ContentType.objects.all(),
required=False,
label=_('Object Type'),
widget=APISelectMultiple(
api_url='/api/extras/content-types/',
)
)
kind = forms.ChoiceField(
choices=add_blank_choice(JournalEntryKindChoices),
required=False,
widget=StaticSelect2()
)
#
# Change logging
#
class ObjectChangeFilterForm(BootstrapMixin, forms.Form):
model = ObjectChange
q = forms.CharField(
required=False,
label=_('Search')
)
time_after = forms.DateTimeField(
required=False,
label=_('After'),
widget=DateTimePicker()
)
time_before = forms.DateTimeField(
required=False,
label=_('Before'),
widget=DateTimePicker()
)
action = forms.ChoiceField(
choices=add_blank_choice(ObjectChangeActionChoices),
required=False,
widget=StaticSelect2()
)
user_id = DynamicModelMultipleChoiceField(
queryset=User.objects.all(),
required=False,
label=_('User'),
widget=APISelectMultiple(
api_url='/api/users/users/',
)
)
changed_object_type_id = DynamicModelMultipleChoiceField(
queryset=ContentType.objects.all(),
required=False,
label=_('Object Type'),
widget=APISelectMultiple(
api_url='/api/extras/content-types/',
)
)
#
# Scripts
#
class ScriptForm(BootstrapMixin, forms.Form):
_commit = forms.BooleanField(
required=False,
initial=True,
label="Commit changes",
help_text="Commit changes to the database (uncheck for a dry-run)"
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Move _commit to the end of the form
commit = self.fields.pop('_commit')
self.fields['_commit'] = commit
@property
def requires_input(self):
"""
A boolean indicating whether the form requires user input (ignore the _commit field).
"""
return bool(len(self.fields) > 1)
| 2.09375 | 2 |
unwarp_models.py | zgjslc/Film-Recovery-master1 | 0 | 6639 | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.misc import modules
constrain_path = {
('threeD', 'normal'): (True, True, ''),
('threeD', 'depth'): (True, True, ''),
('normal', 'depth'): (True, True, ''),
('depth', 'normal'): (True, True, ''),
}
class UnwarpNet(nn.Module):
def __init__(self, use_simple=False, combine_num=3, use_constrain=True, constrain_configure=None):
super(UnwarpNet, self).__init__()
self.combine_num = combine_num
self.use_simple = use_simple
self.use_constrain = use_constrain
self.constrain_configure = constrain_configure
self.geo_encoder = modules.Encoder(downsample=6, in_channels=3)
self.threeD_decoder = modules.Decoder(downsample=6, out_channels=3, combine_num=self.combine_num)
self.normal_decoder = modules.Decoder(downsample=6, out_channels=3, combine_num=self.combine_num)
self.depth_decoder = modules.Decoder(downsample=6, out_channels=1, combine_num=self.combine_num)
self.mask_decoder = modules.Decoder(downsample=6, out_channels=1, combine_num=0)
bottle_neck = sum([2 ** (i + 4) for i in range(self.combine_num)])
self.second_encoder = modules.Encoder(downsample=6, in_channels=bottle_neck * 3 + 3)
self.uv_decoder = modules.Decoder(downsample=6, out_channels=2, combine_num=0)
# self.albedo_decoder = modules.AlbedoDecoder(downsample=6, out_channels=1)
self.albedo_decoder = modules.Decoder(downsample=6, out_channels=1, combine_num=0)
self.deform_decoder = modules.Decoder(downsample=6, out_channels=2, combine_num=0)
self.dep2nor = None
self.threeD_to_nor2dep = None
self.nor2dep = None
def forward(self, x):
gxvals, gx_encode = self.geo_encoder(x)
threeD_map, threeD_feature = self.threeD_decoder(gxvals, gx_encode)
threeD_map = nn.functional.tanh(threeD_map)
dep_map, dep_feature = self.depth_decoder(gxvals, gx_encode)
dep_map = nn.functional.tanh(dep_map)
nor_map, nor_feature = self.normal_decoder(gxvals, gx_encode)
nor_map = nn.functional.tanh(nor_map)
mask_map, mask_feature = self.mask_decoder(gxvals, gx_encode)
mask_map = torch.nn.functional.sigmoid(mask_map)
# geo_feature = torch.cat([threeD_feature, nor_feature, dep_feature], dim=1)
geo_feature = torch.cat([threeD_feature, nor_feature, dep_feature, x], dim=1)
b, c, h, w = geo_feature.size()
geo_feature_mask = geo_feature.mul(mask_map.expand(b, c, h, w))
secvals, sec_encode = self.second_encoder(geo_feature_mask)
uv_map, _ = self.uv_decoder(secvals, sec_encode)
uv_map = nn.functional.tanh(uv_map)
alb_map, _ = self.albedo_decoder(secvals, sec_encode)
alb_map = nn.functional.tanh(alb_map)
deform_map, _ = self.deform_decoder(secvals, sec_encode)
deform_map = nn.functional.tanh(deform_map)
return uv_map, threeD_map, nor_map, alb_map, dep_map, mask_map, \
None, None, None, None, None, deform_map
| 2.046875 | 2 |
endpoint/test_endpoint/update.py | pansila/Auto-Test-System | 14 | 6640 | <filename>endpoint/test_endpoint/update.py
import configparser
import os
import hashlib
import json
import shutil
import sys
import tempfile
import subprocess
import tarfile
import re
import stat
from functools import cmp_to_key
from contextlib import closing
from gzip import GzipFile
from pathlib import Path
from urllib.error import HTTPError
from urllib.request import Request
from urllib.request import urlopen
WINDOWS = sys.platform == "win32"
BOOTSTRAP = """\
import os, sys
import re
import subprocess
def _which_python():
allowed_executables = ["python3", "python"]
if sys.platform == 'win32':
# in favor of 32 bit python to be compatible with the 32bit dlls of test libraries
allowed_executables[:0] = ["py.exe -3-32", "py.exe -2-32", "py.exe -3-64", "py.exe -2-64"]
# \d in regex ensures we can convert to int later
version_matcher = re.compile(r"^Python (?P<major>\d+)\.(?P<minor>\d+)\..+$")
fallback = None
for executable in allowed_executables:
try:
raw_version = subprocess.check_output(
executable + " --version", stderr=subprocess.STDOUT, shell=True
).decode("utf-8")
except subprocess.CalledProcessError:
continue
match = version_matcher.match(raw_version.strip())
if match and tuple(map(int, match.groups())) >= (3, 0):
# favor the first py3 executable we can find.
return executable
if fallback is None:
# keep this one as the fallback; it was the first valid executable we found.
fallback = executable
if fallback is None:
# Avoid breaking existing scripts
fallback = "python"
return fallback
if __name__ == '__main__':
py_executable = _which_python()
subprocess.run(py_executable + r' {collie_bin} ' + ' '.join(sys.argv[1:]), shell=True)
"""
BIN = """#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import argparse
lib = os.path.normpath(os.path.join(os.path.realpath(__file__), "..", "..", "lib", "collie"))
sys.path.insert(0, lib)
from test_endpoint.app import main
if __name__ == "__main__":
sys.exit(main())
"""
BAT = '@echo off\r\n{python_executable} "{collie_bootstrap}" %*\r\n'
SH = '#!/bin/sh\npython3 "{collie_bootstrap}" $*\n'
def expanduser(path):
"""
Expand ~ and ~user constructions.
Includes a workaround for http://bugs.python.org/issue14768
"""
expanded = os.path.expanduser(path)
if path.startswith("~/") and expanded.startswith("//"):
expanded = expanded[1:]
return expanded
class SelfUpdate:
VERSION_REGEX = re.compile(
r"v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.(\d+))?"
"("
"[._-]?"
r"(?:(stable|beta|b|RC|alpha|a|patch|pl|p)((?:[.-]?\d+)*)?)?"
"([.-]?dev)?"
")?"
r"(?:\+[^\s]+)?"
)
def __init__(self, version=None, force=False):
config = configparser.ConfigParser()
config.read(self.config)
self.server_host = config['tool.collie.settings']['server_host']
self.server_port = config['tool.collie.settings']['server_port']
self.join_id = config['tool.collie.settings']['join_id']
self.uuid = config['tool.collie.settings']['uuid']
server_host = self.server_host.strip('"')
server_port = self.server_port.strip('"')
self.SERVER_URL = f'http://{server_host}:{server_port}/api_v1'
self.METADATA_URL = self.SERVER_URL + "/setting/get-endpoint/json"
self.BASE_URL = self.SERVER_URL + "/setting/download"
self._version = None if isinstance(version, bool) else version
self._force = force
@property
def home(self):
if os.environ.get("COLLIE_HOME"):
return Path(expanduser(os.environ["COLLIE_HOME"]))
home = Path(expanduser("~"))
return home / ".collie"
@property
def bin(self):
return self.home / "bin"
@property
def lib(self):
return self.home / "lib"
@property
def lib_backup(self):
return self.home / "lib-backup"
@property
def config(self):
return self.home / "lib" / 'collie' / 'pyproject.toml'
def get_version(self):
from .__version__ import __version__
metadata = json.loads(self._get(self.METADATA_URL).decode())
def _compare_versions(x, y):
mx = self.VERSION_REGEX.match(x)
my = self.VERSION_REGEX.match(y)
vx = tuple(int(p) for p in mx.groups()[:3]) + (mx.group(5),)
vy = tuple(int(p) for p in my.groups()[:3]) + (my.group(5),)
if vx < vy:
return -1
elif vx > vy:
return 1
return 0
releases = sorted(
metadata["releases"], key=cmp_to_key(_compare_versions)
)
if self._version and self._version not in releases:
print("Version {} does not exist.".format(self._version))
return None, None
version = self._version
if not version:
for release in reversed(releases):
m = self.VERSION_REGEX.match(release)
if m.group(5) and not self.allows_prereleases():
continue
version = release
break
current_version = __version__
if current_version == version and not self._force:
print("Latest version already installed.")
return None, current_version
return version, current_version
def run(self):
version, current_version = self.get_version()
if not version:
return
self.update(version)
self.restore_config()
print(f'Succeeded to update collie to version {version}')
def update(self, version):
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
# Backup the current installation
if self.lib.exists():
shutil.copytree(str(self.lib), str(self.lib_backup))
shutil.rmtree(str(self.lib))
try:
self._update(version)
except Exception:
if not self.lib_backup.exists():
raise
shutil.copytree(str(self.lib_backup), str(self.lib))
shutil.rmtree(str(self.lib_backup))
raise
finally:
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
self.make_bin()
def _update(self, version):
release_name = self._get_release_name(version)
base_url = self.BASE_URL + '?'
name = f"{release_name}.tar.gz"
checksum = f"{release_name}.sha256sum"
try:
r = urlopen(base_url + "file={}".format(checksum))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(checksum))
raise
checksum = r.read().decode().strip()
try:
r = urlopen(base_url + "file={}".format(name))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(name))
raise
meta = r.info()
size = int(meta["Content-Length"])
current = 0
block_size = 8192
sha = hashlib.sha256()
with tempfile.TemporaryDirectory(prefix="collie-updater-") as dir_:
tar = os.path.join(dir_, name)
with open(tar, "wb") as f:
while True:
buffer = r.read(block_size)
if not buffer:
break
current += len(buffer)
f.write(buffer)
sha.update(buffer)
# Checking hashes
if checksum != sha.hexdigest():
raise RuntimeError(
"Hashes for {} do not match: {} != {}".format(
name, checksum, sha.hexdigest()
)
)
gz = GzipFile(tar, mode="rb")
try:
with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f:
f.extractall(str(self.lib))
finally:
gz.close()
def restore_config(self):
config = configparser.ConfigParser()
config.read(self.config)
config['tool.collie.settings']['server_host'] = self.server_host
config['tool.collie.settings']['server_port'] = self.server_port
config['tool.collie.settings']['join_id'] = self.join_id
config['tool.collie.settings']['uuid'] = self.uuid
with open(self.config, 'w') as config_file:
config.write(config_file)
def process(self, *args):
return subprocess.check_output(list(args), stderr=subprocess.STDOUT)
def _check_recommended_installation(self):
current = Path(__file__)
try:
current.relative_to(self.home)
except ValueError:
raise RuntimeError(
"Collie was not installed with the recommended installer. "
"Cannot update automatically."
)
def _get_release_name(self, version):
platform = sys.platform
if platform == "linux2":
platform = "linux"
return "collie-{}-{}".format(version, platform)
def _bin_path(self, base_path, bin):
if WINDOWS:
return (base_path / "Scripts" / bin).with_suffix(".exe")
return base_path / "bin" / bin
def make_bin(self):
self.bin.mkdir(0o755, parents=True, exist_ok=True)
python_executable = self._which_python()
with self.bin.joinpath("bootstrap.py").open("w", newline="") as f:
f.write(BOOTSTRAP.format(collie_bin=str(self.bin / "collie.py")))
if WINDOWS:
with self.bin.joinpath("collie.bat").open("w", newline="") as f:
f.write(
BAT.format(
python_executable=python_executable,
collie_bootstrap=str(self.bin / "bootstrap.py").replace(
os.environ["USERPROFILE"], "%USERPROFILE%"
),
)
)
else:
with self.bin.joinpath("collie").open("w", newline="") as f:
f.write(
SH.format(
collie_bootstrap=str(self.bin / "bootstrap.py").replace(
os.getenv("HOME", ""), "$HOME"
),
)
)
bin_content = BIN
if not WINDOWS:
bin_content = "#!/usr/bin/env {}\n".format(python_executable) + bin_content
self.bin.joinpath("collie.py").write_text(bin_content, encoding="utf-8")
if not WINDOWS:
# Making the file executable
st = os.stat(str(self.bin.joinpath("collie")))
os.chmod(str(self.bin.joinpath("collie")), st.st_mode | stat.S_IEXEC)
def _which_python(self):
"""
Decides which python executable we'll embed in the launcher script.
"""
allowed_executables = ["python", "python3"]
if WINDOWS:
allowed_executables += ["py.exe -3", "py.exe -2"]
# \d in regex ensures we can convert to int later
version_matcher = re.compile(r"^Python (?P<major>\d+)\.(?P<minor>\d+)\..+$")
fallback = None
for executable in allowed_executables:
try:
raw_version = subprocess.check_output(
executable + " --version", stderr=subprocess.STDOUT, shell=True
).decode("utf-8")
except subprocess.CalledProcessError:
continue
match = version_matcher.match(raw_version.strip())
if match and tuple(map(int, match.groups())) >= (3, 0):
# favor the first py3 executable we can find.
return executable
if fallback is None:
# keep this one as the fallback; it was the first valid executable we found.
fallback = executable
if fallback is None:
# Avoid breaking existing scripts
fallback = "python"
return fallback
def _get(self, url):
request = Request(url, headers={"User-Agent": "Python Robotest"})
with closing(urlopen(request)) as r:
return r.read()
def update_join_id(self, join_id):
config = configparser.ConfigParser()
config.read(self.config)
config['tool.collie.settings']['join_id'] = f'"{join_id}"'
with open(self.config, 'w') as config_file:
config.write(config_file)
| 2.125 | 2 |
lib/jbgp/jbgpneighbor.py | routedo/junos-pyez-example | 0 | 6641 | <gh_stars>0
"""
Query BGP neighbor table on a Juniper network device.
"""
import sys
from jnpr.junos import Device
from jnpr.junos.factory import loadyaml
def juniper_bgp_state(dev, bgp_neighbor):
"""
This function queries the BGP neighbor table on a Juniper network device.
dev = Juniper device connection
bgp_neighbor = IP address of BGP neighbor
return = Returns state of BGP neighbor
"""
try:
globals().update(loadyaml('yaml/bgp_neighbor.yml'))
bgp_ni = bgp_neighbor_info(dev).get(neighbor_address=bgp_neighbor)
return bgp_ni
except Exception as err:
print(err)
dev.close()
sys.exit(1)
return
return
| 2.390625 | 2 |
lib/cherrypy/cherrypy/test/test_sessionauthenticate.py | MiCHiLU/google_appengine_sdk | 790 | 6642 | <gh_stars>100-1000
import cherrypy
from cherrypy.test import helper
class SessionAuthenticateTest(helper.CPWebCase):
def setup_server():
def check(username, password):
# Dummy check_username_and_password function
if username != 'test' or password != 'password':
return 'Wrong login/password'
def augment_params():
# A simple tool to add some things to request.params
# This is to check to make sure that session_auth can handle request
# params (ticket #780)
cherrypy.request.params["test"] = "test"
cherrypy.tools.augment_params = cherrypy.Tool('before_handler',
augment_params, None, priority=30)
class Test:
_cp_config = {'tools.sessions.on': True,
'tools.session_auth.on': True,
'tools.session_auth.check_username_and_password': check,
'tools.augment_params.on': True,
}
def index(self, **kwargs):
return "Hi %s, you are logged in" % cherrypy.request.login
index.exposed = True
cherrypy.tree.mount(Test())
setup_server = staticmethod(setup_server)
def testSessionAuthenticate(self):
# request a page and check for login form
self.getPage('/')
self.assertInBody('<form method="post" action="do_login">')
# setup credentials
login_body = 'username=test&password=password&from_page=/'
# attempt a login
self.getPage('/do_login', method='POST', body=login_body)
self.assertStatus((302, 303))
# get the page now that we are logged in
self.getPage('/', self.cookies)
self.assertBody('Hi test, you are logged in')
# do a logout
self.getPage('/do_logout', self.cookies, method='POST')
self.assertStatus((302, 303))
# verify we are logged out
self.getPage('/', self.cookies)
self.assertInBody('<form method="post" action="do_login">')
| 2.453125 | 2 |
2021/day-12/solve.py | amochtar/adventofcode | 1 | 6643 | #!/usr/bin/env python
from typing import List
import aoc
from collections import defaultdict
@aoc.timing
def solve(inp: str, part2=False):
def find_path(current: str, path: List[str] = []):
if current == 'end':
yield path
return
for nxt in caves[current]:
if nxt == 'start':
continue
if nxt.islower() and nxt in path:
if not part2:
continue
elif any(path.count(c) > 1 for c in path if c.islower()):
continue
yield from find_path(nxt, path + [nxt])
caves = defaultdict(list)
for line in inp.splitlines():
parts = line.split('-')
caves[parts[0]].append(parts[1])
caves[parts[1]].append(parts[0])
return len(list(find_path('start')))
@aoc.timing
def part2(inp: str):
return inp
with open('test2.txt', 'r') as f:
inp = f.read()
print("Part 1:", solve(inp))
print("Part 2:", solve(inp, True))
with open('input.txt', 'r') as f:
inp = f.read()
print("Part 1:", solve(inp))
print("Part 2:", solve(inp, True))
| 3.4375 | 3 |
PaddleCV/tracking/ltr/data/processing.py | suytingwan/models | 5 | 6644 | <gh_stars>1-10
import numpy as np
from ltr.data import transforms
import ltr.data.processing_utils as prutils
from pytracking.libs import TensorDict
class BaseProcessing:
""" Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it
through the network. For example, it can be used to crop a search region around the object, apply various data
augmentations, etc."""
def __init__(self,
transform=transforms.ToArray(),
train_transform=None,
test_transform=None,
joint_transform=None):
"""
args:
transform - The set of transformations to be applied on the images. Used only if train_transform or
test_transform is None.
train_transform - The set of transformations to be applied on the train images. If None, the 'transform'
argument is used instead.
test_transform - The set of transformations to be applied on the test images. If None, the 'transform'
argument is used instead.
joint_transform - The set of transformations to be applied 'jointly' on the train and test images. For
example, it can be used to convert both test and train images to grayscale.
"""
self.transform = {
'train': transform if train_transform is None else train_transform,
'test': transform if test_transform is None else test_transform,
'joint': joint_transform
}
def __call__(self, data: TensorDict):
raise NotImplementedError
class SiamFCProcessing(BaseProcessing):
def __init__(self,
search_area_factor,
output_sz,
center_jitter_factor,
scale_jitter_factor,
mode='pair',
scale_type='context',
border_type='meanpad',
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.mode = mode
self.scale_type = scale_type
self.border_type = border_type
def _get_jittered_box(self, box, mode, rng):
jittered_size = box[2:4] * np.exp(
rng.randn(2) * self.scale_jitter_factor[mode])
max_offset = (np.sqrt(jittered_size.prod()) *
self.center_jitter_factor[mode])
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (rng.rand(2)
- 0.5)
return np.concatenate(
(jittered_center - 0.5 * jittered_size, jittered_size), axis=0)
def __call__(self, data: TensorDict, rng=None):
# Apply joint transforms
if self.transform['joint'] is not None:
num_train_images = len(data['train_images'])
all_images = data['train_images'] + data['test_images']
all_images_trans = self.transform['joint'](*all_images)
data['train_images'] = all_images_trans[:num_train_images]
data['test_images'] = all_images_trans[num_train_images:]
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [
self._get_jittered_box(a, s, rng) for a in data[s + '_anno']
]
# Crop image region centered at jittered_anno box
try:
crops, boxes = prutils.jittered_center_crop(
data[s + '_images'],
jittered_anno,
data[s + '_anno'],
self.search_area_factor[s],
self.output_sz[s],
scale_type=self.scale_type,
border_type=self.border_type)
except Exception as e:
print('{}, anno: {}'.format(data['dataset'], data[s + '_anno']))
raise e
# Apply transforms
data[s + '_images'] = [self.transform[s](x) for x in crops]
data[s + '_anno'] = boxes
# Prepare output
if self.mode == 'sequence':
data = data.apply(prutils.stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
class ATOMProcessing(BaseProcessing):
""" The processing class used for training ATOM. The images are processed in the following way.
First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )
centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is
cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is
always at the center of the search region. The search region is then resized to a fixed size given by the
argument output_sz. A set of proposals are then generated for the test images by jittering the ground truth box.
"""
def __init__(self,
search_area_factor,
output_sz,
center_jitter_factor,
scale_jitter_factor,
proposal_params,
mode='pair',
*args,
**kwargs):
"""
args:
search_area_factor - The size of the search region relative to the target size.
output_sz - An integer, denoting the size to which the search region is resized. The search region is always
square.
center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before
extracting the search region. See _get_jittered_box for how the jittering is done.
scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before
extracting the search region. See _get_jittered_box for how the jittering is done.
proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.
mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames
"""
super().__init__(*args, **kwargs)
self.search_area_factor = search_area_factor
self.output_sz = output_sz
self.center_jitter_factor = center_jitter_factor
self.scale_jitter_factor = scale_jitter_factor
self.proposal_params = proposal_params
self.mode = mode
def _get_jittered_box(self, box, mode, rng):
""" Jitter the input box
args:
box - input bounding box
mode - string 'train' or 'test' indicating train or test data
returns:
Variable - jittered box
"""
jittered_size = box[2:4] * np.exp(
rng.randn(2) * self.scale_jitter_factor[mode])
max_offset = (np.sqrt(jittered_size.prod()) *
self.center_jitter_factor[mode])
jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (rng.rand(2)
- 0.5)
return np.concatenate(
(jittered_center - 0.5 * jittered_size, jittered_size), axis=0)
def _generate_proposals(self, box, rng):
""" Generates proposals by adding noise to the input box
args:
box - input box
returns:
array - Array of shape (num_proposals, 4) containing proposals
array - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The
IoU is mapped to [-1, 1]
"""
# Generate proposals
num_proposals = self.proposal_params['boxes_per_frame']
proposals = np.zeros((num_proposals, 4))
gt_iou = np.zeros(num_proposals)
for i in range(num_proposals):
proposals[i, :], gt_iou[i] = prutils.perturb_box(
box,
min_iou=self.proposal_params['min_iou'],
sigma_factor=self.proposal_params['sigma_factor'],
rng=rng)
# Map to [-1, 1]
gt_iou = gt_iou * 2 - 1
return proposals, gt_iou
def __call__(self, data: TensorDict, rng=None):
"""
args:
data - The input data, should contain the following fields:
'train_images' -
'test_images' -
'train_anno' -
'test_anno' -
returns:
TensorDict - output data block with following fields:
'train_images' -
'test_images' -
'train_anno' -
'test_anno' -
'test_proposals'-
'proposal_iou' -
"""
# Apply joint transforms
if self.transform['joint'] is not None:
num_train_images = len(data['train_images'])
all_images = data['train_images'] + data['test_images']
all_images_trans = self.transform['joint'](*all_images)
data['train_images'] = all_images_trans[:num_train_images]
data['test_images'] = all_images_trans[num_train_images:]
for s in ['train', 'test']:
assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \
"In pair mode, num train/test frames must be 1"
# Add a uniform noise to the center pos
jittered_anno = [
self._get_jittered_box(a, s, rng) for a in data[s + '_anno']
]
# Crop image region centered at jittered_anno box
try:
crops, boxes = prutils.jittered_center_crop(
data[s + '_images'], jittered_anno, data[s + '_anno'],
self.search_area_factor, self.output_sz)
except Exception as e:
print('{}, anno: {}'.format(data['dataset'], data[s + '_anno']))
raise e
# Apply transforms
data[s + '_images'] = [self.transform[s](x) for x in crops]
data[s + '_anno'] = boxes
# Generate proposals
frame2_proposals, gt_iou = zip(
* [self._generate_proposals(a, rng) for a in data['test_anno']])
data['test_proposals'] = list(frame2_proposals)
data['proposal_iou'] = list(gt_iou)
# Prepare output
if self.mode == 'sequence':
data = data.apply(prutils.stack_tensors)
else:
data = data.apply(lambda x: x[0] if isinstance(x, list) else x)
return data
| 3.09375 | 3 |
tqcli/config/config.py | Tranquant/tqcli | 0 | 6645 | <reponame>Tranquant/tqcli
import logging
from os.path import expanduser
#TQ_API_ROOT_URL = 'http://127.0.1.1:8090/dataset'
TQ_API_ROOT_URL = 'http://elb-tranquant-ecs-cluster-tqapi-1919110681.us-west-2.elb.amazonaws.com/dataset'
LOG_PATH = expanduser('~/tqcli.log')
# the chunk size must be at least 5MB for multipart upload
DEFAULT_CHUNK_SIZE = 1024 * 1024 * 5 # 5MB
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
filename=LOG_PATH,
filemode='w'
)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
| 2.21875 | 2 |
fqf_iqn_qrdqn/agent/base_agent.py | rainwangphy/fqf-iqn-qrdqn.pytorch | 0 | 6646 | <gh_stars>0
from abc import ABC, abstractmethod
import os
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from fqf_iqn_qrdqn.memory import LazyMultiStepMemory, \
LazyPrioritizedMultiStepMemory
from fqf_iqn_qrdqn.utils import RunningMeanStats, LinearAnneaer
class BaseAgent(ABC):
def __init__(self, env, test_env, log_dir, num_steps=5 * (10 ** 7),
batch_size=32, memory_size=10 ** 6, gamma=0.99, multi_step=1,
update_interval=4, target_update_interval=10000,
start_steps=50000, epsilon_train=0.01, epsilon_eval=0.001,
epsilon_decay_steps=250000, double_q_learning=False,
dueling_net=False, noisy_net=False, use_per=False,
log_interval=100, eval_interval=250000, num_eval_steps=125000,
max_episode_steps=27000, grad_cliping=5.0, cuda=True, seed=0):
self.env = env
self.test_env = test_env
torch.manual_seed(seed)
np.random.seed(seed)
self.env.seed(seed)
self.test_env.seed(2 ** 31 - 1 - seed)
# torch.backends.cudnn.deterministic = True # It harms a performance.
# torch.backends.cudnn.benchmark = False # It harms a performance.
self.device = torch.device(
"cuda" if cuda and torch.cuda.is_available() else "cpu")
self.online_net = None
self.target_net = None
# Replay memory which is memory-efficient to store stacked frames.
if use_per:
beta_steps = (num_steps - start_steps) / update_interval
self.memory = LazyPrioritizedMultiStepMemory(
memory_size, self.env.observation_space.shape,
self.device, gamma, multi_step, beta_steps=beta_steps)
else:
self.memory = LazyMultiStepMemory(
memory_size, self.env.observation_space.shape,
self.device, gamma, multi_step)
self.log_dir = log_dir
self.model_dir = os.path.join(log_dir, 'model')
self.summary_dir = os.path.join(log_dir, 'summary')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
if not os.path.exists(self.summary_dir):
os.makedirs(self.summary_dir)
self.writer = SummaryWriter(log_dir=self.summary_dir)
self.train_return = RunningMeanStats(log_interval)
self.steps = 0
self.learning_steps = 0
self.episodes = 0
self.best_eval_score = -np.inf
self.num_actions = self.env.action_space.n
self.num_steps = num_steps
self.batch_size = batch_size
self.double_q_learning = double_q_learning
self.dueling_net = dueling_net
self.noisy_net = noisy_net
self.use_per = use_per
self.log_interval = log_interval
self.eval_interval = eval_interval
self.num_eval_steps = num_eval_steps
self.gamma_n = gamma ** multi_step
self.start_steps = start_steps
self.epsilon_train = LinearAnneaer(
1.0, epsilon_train, epsilon_decay_steps)
self.epsilon_eval = epsilon_eval
self.update_interval = update_interval
self.target_update_interval = target_update_interval
self.max_episode_steps = max_episode_steps
self.grad_cliping = grad_cliping
def run(self):
while True:
self.train_episode()
if self.steps > self.num_steps:
break
def is_update(self):
return self.steps % self.update_interval == 0 \
and self.steps >= self.start_steps
def is_random(self, eval=False):
# Use e-greedy for evaluation.
if self.steps < self.start_steps:
return True
if eval:
return np.random.rand() < self.epsilon_eval
if self.noisy_net:
return False
return np.random.rand() < self.epsilon_train.get()
def update_target(self):
self.target_net.load_state_dict(
self.online_net.state_dict())
def explore(self):
# Act with randomness.
action = self.env.action_space.sample()
return action
def exploit(self, state):
# Act without randomness.
state = torch.ByteTensor(
state).unsqueeze(0).to(self.device).float() / 255.
with torch.no_grad():
action = self.online_net.calculate_q(states=state).argmax().item()
return action
@abstractmethod
def learn(self):
pass
def save_models(self, save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(
self.online_net.state_dict(),
os.path.join(save_dir, 'online_net.pth'))
torch.save(
self.target_net.state_dict(),
os.path.join(save_dir, 'target_net.pth'))
def load_models(self, save_dir):
self.online_net.load_state_dict(torch.load(
os.path.join(save_dir, 'online_net.pth')))
self.target_net.load_state_dict(torch.load(
os.path.join(save_dir, 'target_net.pth')))
def train_episode(self):
self.online_net.train()
self.target_net.train()
self.episodes += 1
episode_return = 0.
episode_steps = 0
done = False
state = self.env.reset()
while (not done) and episode_steps <= self.max_episode_steps:
# NOTE: Noises can be sampled only after self.learn(). However, I
# sample noises before every action, which seems to lead better
# performances.
self.online_net.sample_noise()
if self.is_random(eval=False):
action = self.explore()
else:
action = self.exploit(state)
next_state, reward, done, _ = self.env.step(action)
# To calculate efficiently, I just set priority=max_priority here.
self.memory.append(state, action, reward, next_state, done)
self.steps += 1
episode_steps += 1
episode_return += reward
state = next_state
self.train_step_interval()
# We log running mean of stats.
self.train_return.append(episode_return)
# We log evaluation results along with training frames = 4 * steps.
if self.episodes % self.log_interval == 0:
self.writer.add_scalar(
'return/train', self.train_return.get(), 4 * self.steps)
print(f'Episode: {self.episodes:<4} '
f'episode steps: {episode_steps:<4} '
f'return: {episode_return:<5.1f}')
def train_step_interval(self):
self.epsilon_train.step()
if self.steps % self.target_update_interval == 0:
self.update_target()
if self.is_update():
self.learn()
if self.steps % self.eval_interval == 0:
self.evaluate()
self.save_models(os.path.join(self.model_dir, 'final'))
self.online_net.train()
def evaluate(self):
self.online_net.eval()
num_episodes = 0
num_steps = 0
total_return = 0.0
while True:
state = self.test_env.reset()
episode_steps = 0
episode_return = 0.0
done = False
while (not done) and episode_steps <= self.max_episode_steps:
if self.is_random(eval=True):
action = self.explore()
else:
action = self.exploit(state)
next_state, reward, done, _ = self.test_env.step(action)
num_steps += 1
episode_steps += 1
episode_return += reward
state = next_state
num_episodes += 1
total_return += episode_return
if num_steps > self.num_eval_steps:
break
mean_return = total_return / num_episodes
if mean_return > self.best_eval_score:
self.best_eval_score = mean_return
self.save_models(os.path.join(self.model_dir, 'best'))
# We log evaluation results along with training frames = 4 * steps.
self.writer.add_scalar(
'return/test', mean_return, 4 * self.steps)
print('-' * 60)
print(f'Num steps: {self.steps:<5} '
f'return: {mean_return:<5.1f}')
print('-' * 60)
def __del__(self):
self.env.close()
self.test_env.close()
self.writer.close()
| 2.046875 | 2 |
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/italian/timeperiod_extractor_config.py | felaray/Recognizers-Text | 0 | 6647 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List, Pattern
from recognizers_text.utilities import RegExpUtility
from recognizers_text.extractor import Extractor
from recognizers_number.number.italian.extractors import ItalianIntegerExtractor
from ...resources.italian_date_time import ItalianDateTime
from ..extractors import DateTimeExtractor
from ..base_timeperiod import TimePeriodExtractorConfiguration, MatchedIndex
from ..base_time import BaseTimeExtractor
from ..base_timezone import BaseTimeZoneExtractor
from .time_extractor_config import ItalianTimeExtractorConfiguration
from .base_configs import ItalianDateTimeUtilityConfiguration
from .timezone_extractor_config import ItalianTimeZoneExtractorConfiguration
class ItalianTimePeriodExtractorConfiguration(TimePeriodExtractorConfiguration):
@property
def check_both_before_after(self) -> bool:
return self._check_both_before_after
@property
def simple_cases_regex(self) -> List[Pattern]:
return self._simple_cases_regex
@property
def till_regex(self) -> Pattern:
return self._till_regex
@property
def time_of_day_regex(self) -> Pattern:
return self._time_of_day_regex
@property
def general_ending_regex(self) -> Pattern:
return self._general_ending_regex
@property
def single_time_extractor(self) -> DateTimeExtractor:
return self._single_time_extractor
@property
def integer_extractor(self) -> Extractor:
return self._integer_extractor
@property
def token_before_date(self) -> str:
return self._token_before_date
@property
def pure_number_regex(self) -> List[Pattern]:
return self._pure_number_regex
@property
def time_zone_extractor(self) -> DateTimeExtractor:
return self._time_zone_extractor
def __init__(self):
super().__init__()
self._check_both_before_after = ItalianDateTime.CheckBothBeforeAfter
self._single_time_extractor = BaseTimeExtractor(
ItalianTimeExtractorConfiguration())
self._integer_extractor = ItalianIntegerExtractor()
self.utility_configuration = ItalianDateTimeUtilityConfiguration()
self._simple_cases_regex: List[Pattern] = [
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PureNumFromTo),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PureNumBetweenAnd),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PmRegex),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.AmRegex)
]
self._till_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.TillRegex)
self._time_of_day_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.TimeOfDayRegex)
self._general_ending_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.GeneralEndingRegex)
self.from_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.FromRegex2)
self.connector_and_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.ConnectorAndRegex)
self.before_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.BeforeRegex2)
self._token_before_date = ItalianDateTime.TokenBeforeDate
self._pure_number_regex = [ItalianDateTime.PureNumFromTo, ItalianDateTime.PureNumFromTo]
self._time_zone_extractor = BaseTimeZoneExtractor(
ItalianTimeZoneExtractorConfiguration())
def get_from_token_index(self, source: str) -> MatchedIndex:
match = self.from_regex.search(source)
if match:
return MatchedIndex(True, match.start())
return MatchedIndex(False, -1)
def get_between_token_index(self, source: str) -> MatchedIndex:
match = self.before_regex.search(source)
if match:
return MatchedIndex(True, match.start())
return MatchedIndex(False, -1)
def is_connector_token(self, source: str):
return self.connector_and_regex.match(source)
| 2.25 | 2 |
quartet_condor.py | BotanyHunter/QuartetAnalysis | 0 | 6648 | <reponame>BotanyHunter/QuartetAnalysis
#quartet_condor.py
#version 2.0.2
import random, sys
def addToDict(d):
'''
Ensures each quartet has three concordance factors (CFs)
a dictionary d has less than three CFs, add CFs with the value 0 until there are three
Input: a dictionary containing CFs, a counter of how many CFs are in the dictionary
'''
if ("{1,2|3,4}" not in d):
d["{1,2|3,4}"] = 0.0
if ("{1,3|2,4}" not in d):
d["{1,3|2,4}"] = 0.0
if ("{1,4|2,3}" not in d):
d["{1,4|2,3}"] = 0.0
class quartet:
'''
Picks individual quartets and isolates concordance factors
'''
def __init__(self):
#length of a split in *.concordance file
self.length_of_splits = 10
self.quartet_length = 4
#list to hold the 4 taxa
self.taxa = []
#dictionaries to hold cfs with splits
self.d = {}
self.d2 = {}
def pick_random_quartet(self, ntax):
'''
Randomly select the 4 taxa to be included in the quartet analysis
:Input: The total number of taxa in an analysis
:Return: A sorted list of 4 unique taxa
'''
self.taxa = []
while len(self.taxa) < self.quartet_length:
num = random.randint(0, ntax-1)
if num not in self.taxa:
self.taxa.append(num)
self.taxa = sorted(self.taxa)
#return a sorted list of 4 random taxa
return self.taxa
def isolateCFs(self, file, num_genes):
'''
Isolates the CFs within a *.concordance file, and sorts the three from largest to smallest
:Input: A *.concordance file
:Return: A sorted dictionary of three CFs
'''
self.d = {}
self.ciLow = {}
self.ciHigh = {}
split = ""
cf = 0
#counter to ensure 3 entries
counter = 0
for line in file:
#finds all splits, which have CFs associated with them
if (line[0] == '{' and len(line) == self.length_of_splits):
split = line
#find CF associated with the split found above
if (line.startswith('mean')):
words = line.split()
#CF guarenteed by BUCKy to be the 4th "word"
cf = float(words[3])
#add split/CF pair to dictionary
self.d[split.strip()] = cf
counter += 1
if( line.startswith('95% CI for CF')):
useline = line.translate(None,"()")
useline = useline.replace(","," ")
words = useline.split()
self.ciLow[split.strip()] = float(words[5]) / num_genes
self.ciHigh[split.strip()] = float(words[6]) / num_genes
#fill out dictionary if there were less than 3 splits
if (counter < 3):
addToDict(self.d)
addToDict(self.ciLow)
addToDict(self.ciHigh)
return self.d, self.ciLow, self.ciHigh
| 3.234375 | 3 |
src/profiles/forms.py | rahulroshan96/CloudVisual | 0 | 6649 | <gh_stars>0
from django import forms
from models import UserInputModel
class UserInputForm(forms.ModelForm):
class Meta:
model = UserInputModel
fields = ['user_input'] | 1.960938 | 2 |
tests_oval_graph/test_arf_xml_parser/test_arf_xml_parser.py | Honny1/oval-graph | 21 | 6650 | from pathlib import Path
import pytest
from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser
def get_arf_report_path(src="global_test_data/ssg-fedora-ds-arf.xml"):
return str(Path(__file__).parent.parent / src)
@pytest.mark.parametrize("rule_id, result", [
(
"xccdf_org.ssgproject.content_rule_accounts_passwords_pam_faillock_deny",
"false",
),
(
"xccdf_org.ssgproject.content_rule_sshd_disable_gssapi_auth",
"false",
),
(
"xccdf_org.ssgproject.content_rule_service_debug-shell_disabled",
"true",
),
(
"xccdf_org.ssgproject.content_rule_mount_option_dev_shm_noexec",
"false",
),
(
"xccdf_org.ssgproject.content_rule_audit_rules_unsuccessful_file_modification_creat",
"false",
),
(
"xccdf_org.ssgproject.content_rule_audit_rules_file_deletion_events_rmdir",
"false",
),
(
"xccdf_org.ssgproject.content_rule_require_singleuser_auth",
"true",
),
])
def test_parsing_and_evaluate_scan_rule(rule_id, result):
path = get_arf_report_path()
parser = ARFXMLParser(path)
oval_tree = parser.get_oval_tree(rule_id)
assert oval_tree.evaluate_tree() == result
def test_parsing_arf_report_without_system_data():
path = get_arf_report_path("global_test_data/arf_no_system_data.xml")
rule_id = "xccdf_com.example.www_rule_test-fail"
parser = ARFXMLParser(path)
oval_tree = parser.get_oval_tree(rule_id)
assert oval_tree.evaluate_tree() == "false"
@pytest.mark.parametrize("rule_id, pattern", [
("hello", "404 rule \"hello\" not found!"),
("xccdf_org.ssgproject.content_rule_ntpd_specify_remote_server", "notselected"),
("xccdf_org.ssgproject.content_rule_configure_bind_crypto_policy", "notchecked"),
("xccdf_org.ssgproject.content_rule_ensure_gpgcheck_local_packages", "notapplicable"),
])
def test_parsing_bad_rule(rule_id, pattern):
path = get_arf_report_path()
parser = ARFXMLParser(path)
with pytest.raises(Exception, match=pattern):
assert parser.get_oval_tree(rule_id)
def test_use_bad_report_file():
src = 'global_test_data/xccdf_org.ssgproject.content_profile_ospp-results-initial.xml'
path = get_arf_report_path(src)
with pytest.raises(Exception, match=r"arf\b|ARF\b"):
assert ARFXMLParser(path)
| 2.15625 | 2 |
main.py | scottjr632/trump-twitter-bot | 0 | 6651 | import os
import logging
import argparse
import sys
import signal
import subprocess
from functools import wraps
from dotenv import load_dotenv
load_dotenv(verbose=True)
from app.config import configure_app
from app.bot import TrumpBotScheduler
from app.sentimentbot import SentimentBot
parser = argparse.ArgumentParser(description=r"""
""")
ROOT = os.getcwd()
PID_FILE_PATH = os.path.join(ROOT, 'var/run-dev.pid')
CMDS = []
FNCS = []
try:
os.setpgrp()
if not os.path.exists(os.path.dirname(PID_FILE_PATH)):
os.makedirs(os.path.dirname(PID_FILE_PATH))
with open(PID_FILE_PATH, 'w+') as file:
file.write(str(os.getpgrp()) + '\n')
except Exception as e:
logging.error(e)
def _file_path_sanity_check(*args):
for path in args:
if not os.path.exists(path):
raise Exception('Unable to find file %s' % path)
def _start_client_server(*args, **kwargs):
cmd = [
'npm', '--prefix', '%s/client' % ROOT, 'run', 'start'
]
CMDS.append(cmd)
def inject_file_paths(fn):
requests_path = os.environ.get('REQUESTS_FILE_PATH', 'requests/request.json')
auth_path = os.environ.get('AUTH_FILE_PATH', 'requests/auth.json')
_file_path_sanity_check(requests_path, auth_path)
@wraps(fn)
def wrapper(*args, **kwargs):
return fn(requests_path=requests_path, auth_path=auth_path, *args, **kwargs)
return wrapper
@inject_file_paths
def _initialize_trump_bot(auth_path, requests_path,
send_posts: bool=True,
*args, **kwargs) -> TrumpBotScheduler:
trump_bot: TrumpBotScheduler = None
if send_posts:
logging.info('Post requests are not being sent.')
class PostOverride(TrumpBotScheduler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __send_tweet_msg__(self, content, headers=None):
return 200
trump_bot = PostOverride(file_path=requests_path, auth_file_path=auth_path)
else:
trump_bot = TrumpBotScheduler(file_path=requests_path, auth_file_path=auth_path)
# this functions initialize the trump bot by getting the latest tweets
# and trying to send any tweets that contained errors
trump_bot.send_latest_tweets()
trump_bot.resend_bad_tweets()
logging.info('Trump bot initialization finished... please press ctrl-c to close program if finished.')
return trump_bot
@inject_file_paths
def _start_sentiment_bot(auth_path: str, requests_path: str,
trump_bot: TrumpBotScheduler,
send_posts: bool=True) -> SentimentBot:
bot: SentimentBot = None
if send_posts:
logging.info('Sentiment bot is not running')
class PostOverride(SentimentBot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __send_tweet_msg__(self, content) -> int:
return 200
bot = PostOverride(file_path=requests_path, auth_file_path=auth_path)
else:
bot = SentimentBot(auth_file_path=auth_path, file_path=requests_path)
trump_bot.add_job(bot.send_todays_tone, 'interval', hours=24, max_instances=1)
return bot
def _start_flask_server(*args, **kwargs):
from app import app
logging.info('Starting the flask server...')
level = os.environ.get('CONFIG_LEVEL')
configure_app(app, status='production' if level is None else level)
port = app.config.get('PORT')
app.run(host='0.0.0.0', port=port)
def _start_dev_server(*args, **kwargs):
_start_client_server()
FNCS.append(_start_flask_server)
def _start_prod_server(*args, **kwargs):
_start_trump_bot(*args, **kwargs)
_start_flask_server(*args, **kwargs)
def _start_trump_bot(send_posts=True, start_sentiment_bot=False, *args, **kwargs):
logging.info('Starting the trump bot...')
# requests_path = os.environ.get('REQUESTS_FILE_PATH', 'requests/request.json')
# auth_path = os.environ.get('AUTH_FILE_PATH', 'requests/auth.json')
# _file_path_sanity_check(requests_path, auth_path)
bot = _initialize_trump_bot(send_posts=send_posts)
if not start_sentiment_bot:
_start_sentiment_bot(trump_bot=bot, send_posts=send_posts)
bot.start()
ACTIONS = {
"initialize": _initialize_trump_bot,
"client": _start_client_server,
"trumpbot": _start_trump_bot,
"flask": _start_flask_server,
"dev": _start_dev_server,
"prod": _start_prod_server,
}
parser.add_argument('action',
help='start the Flask app',
type=str,
choices=[key for key, v in ACTIONS.items()])
parser.add_argument('-np', '--no-post',
dest='send_posts',
action='store_true',
help='Do not send post requests')
parser.add_argument('-nsb', '--no-sentiment-bot',
dest='start_sentiment_bot',
action='store_true',
help='Do not to start the sentiment bot')
def signal_handler(sig, frame):
os.killpg(0, signal.SIGTERM)
os.remove(PID_FILE_PATH)
sys.exit(0)
def main():
options = parser.parse_args()
for s in (signal.SIGINT, signal.SIGTERM):
signal.signal(s, signal_handler)
ACTIONS.get(options.action)(**options.__dict__)
env = os.environ.copy()
for cmd in CMDS:
subprocess.Popen(cmd, env=env)
for fn in FNCS:
subprocess.Popen(fn(), env=env)
signal.pause()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| 2.140625 | 2 |
010-summation-of-primes.py | dendi239/euler | 0 | 6652 | #! /usr/bin/env python3
import itertools
import typing as tp
def primes() -> tp.Generator[int, None, None]:
primes_ = []
d = 2
while True:
is_prime = True
for p in primes_:
if p * p > d:
break
if d % p == 0:
is_prime = False
break
if is_prime:
primes_.append(d)
yield d
d += 1
def sum_primes_below(n: int) -> int:
return sum(itertools.takewhile(lambda x: x < n, primes()))
def test_ten() -> None:
assert sum_primes_below(10) == 17
def main() -> None:
print(sum_primes_below(2_000_000))
if __name__ == '__main__':
main()
| 3.9375 | 4 |
setup.py | letmaik/lensfunpy | 94 | 6653 | from setuptools import setup, Extension, find_packages
import subprocess
import errno
import re
import os
import shutil
import sys
import zipfile
from urllib.request import urlretrieve
import numpy
from Cython.Build import cythonize
isWindows = os.name == 'nt'
isMac = sys.platform == 'darwin'
is64Bit = sys.maxsize > 2**32
# adapted from cffi's setup.py
# the following may be overridden if pkg-config exists
libraries = ['lensfun']
include_dirs = []
library_dirs = []
extra_compile_args = []
extra_link_args = []
def _ask_pkg_config(resultlist, option, result_prefix='', sysroot=False):
pkg_config = os.environ.get('PKG_CONFIG','pkg-config')
try:
p = subprocess.Popen([pkg_config, option, 'lensfun'],
stdout=subprocess.PIPE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
t = p.stdout.read().decode().strip()
if p.wait() == 0:
res = t.split()
# '-I/usr/...' -> '/usr/...'
for x in res:
assert x.startswith(result_prefix)
res = [x[len(result_prefix):] for x in res]
sysroot = sysroot and os.environ.get('PKG_CONFIG_SYSROOT_DIR', '')
if sysroot:
# old versions of pkg-config don't support this env var,
# so here we emulate its effect if needed
res = [path if path.startswith(sysroot)
else sysroot + path
for path in res]
resultlist[:] = res
def use_pkg_config():
_ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True)
_ask_pkg_config(extra_compile_args, '--cflags-only-other')
_ask_pkg_config(library_dirs, '--libs-only-L', '-L', sysroot=True)
_ask_pkg_config(extra_link_args, '--libs-only-other')
_ask_pkg_config(libraries, '--libs-only-l', '-l')
if isWindows or isMac:
cmake_build = os.path.abspath('external/lensfun/build')
install_dir = os.path.join(cmake_build, 'install')
include_dirs += [os.path.join(install_dir, 'include', 'lensfun')]
library_dirs += [os.path.join(install_dir, 'lib')]
else:
use_pkg_config()
# this must be after use_pkg_config()!
include_dirs += [numpy.get_include()]
# for version_helper.h
include_dirs += [os.path.abspath('lensfunpy')]
def clone_submodules():
if not os.path.exists('external/lensfun/README.md'):
print('lensfun git submodule not cloned yet, will invoke "git submodule update --init" now')
if os.system('git submodule update --init') != 0:
raise Exception('git failed')
def windows_lensfun_compile():
clone_submodules()
cwd = os.getcwd()
# Download cmake to build lensfun
cmake_version = '3.13.4'
cmake_url = 'https://github.com/Kitware/CMake/releases/download/v{v}/cmake-{v}-win32-x86.zip'.format(v=cmake_version)
cmake = os.path.abspath('external/cmake-{}-win32-x86/bin/cmake.exe'.format(cmake_version))
# Download vcpkg to build dependencies of lensfun
vcpkg_commit = '2021.05.12'
vcpkg_url = 'https://github.com/Microsoft/vcpkg/archive/{}.zip'.format(vcpkg_commit)
vcpkg_dir = os.path.abspath('external/vcpkg-{}'.format(vcpkg_commit))
vcpkg_bootstrap = os.path.join(vcpkg_dir, 'bootstrap-vcpkg.bat')
vcpkg = os.path.join(vcpkg_dir, 'vcpkg.exe')
files = [(cmake_url, 'external', cmake),
(vcpkg_url, 'external', vcpkg_bootstrap)]
for url, extractdir, extractcheck in files:
if not os.path.exists(extractcheck):
path = 'external/' + os.path.basename(url)
if not os.path.exists(path):
print('Downloading', url)
try:
urlretrieve(url, path)
except:
# repeat once in case of network issues
urlretrieve(url, path)
with zipfile.ZipFile(path) as z:
print('Extracting', path, 'into', extractdir)
z.extractall(extractdir)
if not os.path.exists(path):
raise RuntimeError(path + ' not found!')
# Bootstrap vcpkg
os.chdir(vcpkg_dir)
if not os.path.exists(vcpkg):
code = os.system(vcpkg_bootstrap)
if code != 0:
sys.exit(code)
# lensfun depends on glib2, so let's build it with vcpkg
vcpkg_arch = 'x64' if is64Bit else 'x86'
vcpkg_triplet = '{}-windows'.format(vcpkg_arch)
code = os.system(vcpkg + ' install glib:' + vcpkg_triplet)
if code != 0:
sys.exit(code)
vcpkg_install_dir = os.path.join(vcpkg_dir, 'installed', vcpkg_triplet)
# bundle runtime dlls
vcpkg_bin_dir = os.path.join(vcpkg_install_dir, 'bin')
glib2_dll = os.path.join(vcpkg_bin_dir, 'glib-2.0-0.dll')
# configure and compile lensfun
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
# temporary hack to avoid https://stackoverflow.com/a/53547931
# (python module not needed here anyway)
patch_path = '../apps/CMakeLists.txt'
with open(patch_path) as f:
content = f.read()
content = content.replace('IF(PYTHON)', 'IF(FALSE)')
with open(patch_path, 'w') as f:
f.write(content)
cmds = [cmake + ' .. -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_TOOLCHAIN_FILE={}/scripts/buildsystems/vcpkg.cmake '.format(vcpkg_dir) +\
'-DGLIB2_BASE_DIR={} -DGLIB2_DLL={} -DCMAKE_INSTALL_PREFIX=install'.format(vcpkg_install_dir, glib2_dll),
cmake + ' --build .',
cmake + ' --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
dll_runtime_libs = [('lensfun.dll', os.path.join(install_dir, 'bin')),
('glib-2.0-0.dll', vcpkg_bin_dir),
# dependencies of glib
('pcre.dll', vcpkg_bin_dir),
('iconv-2.dll', vcpkg_bin_dir),
('charset-1.dll', vcpkg_bin_dir),
('intl-8.dll', vcpkg_bin_dir),
]
for filename, folder in dll_runtime_libs:
src = os.path.join(folder, filename)
dest = 'lensfunpy/' + filename
print('copying', src, '->', dest)
shutil.copyfile(src, dest)
def mac_lensfun_compile():
clone_submodules()
# configure and compile lensfun
cwd = os.getcwd()
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
install_name_dir = os.path.join(install_dir, 'lib')
cmds = ['cmake .. -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_INSTALL_PREFIX=install ' +\
'-DCMAKE_INSTALL_NAME_DIR=' + install_name_dir,
'cmake --build .',
'cmake --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
def bundle_db_files():
import glob
db_files = 'lensfunpy/db_files'
if not os.path.exists(db_files):
os.makedirs(db_files)
for path in glob.glob('external/lensfun/data/db/*.xml'):
dest = os.path.join(db_files, os.path.basename(path))
print('copying', path, '->', dest)
shutil.copyfile(path, dest)
package_data = {'lensfunpy': []}
# evil hack, check cmd line for relevant commands
# custom cmdclasses didn't work out in this case
cmdline = ''.join(sys.argv[1:])
needsCompile = any(s in cmdline for s in ['install', 'bdist', 'build_ext', 'wheel', 'nosetests'])
if isWindows and needsCompile:
windows_lensfun_compile()
package_data['lensfunpy'].append('*.dll')
elif isMac and needsCompile:
mac_lensfun_compile()
if any(s in cmdline for s in ['clean', 'sdist']):
# When running sdist after a previous run of bdist or build_ext
# then even with the 'clean' command the .egg-info folder stays.
# This folder contains SOURCES.txt which in turn is used by sdist
# to include package data files, but we don't want .dll's and .xml
# files in our source distribution. Therefore, to prevent accidents,
# we help a little...
egg_info = 'lensfunpy.egg-info'
print('removing', egg_info)
shutil.rmtree(egg_info, ignore_errors=True)
if 'sdist' not in cmdline:
# This assumes that the lensfun version from external/lensfun was used.
# If that's not the case, the bundled files may fail to load, for example,
# if lensfunpy was linked against an older lensfun version already on
# the system (Linux mostly) and the database format changed in an incompatible way.
# In that case, loading of bundled files can still be disabled
# with Database(load_bundled=False).
package_data['lensfunpy'].append('db_files/*.xml')
bundle_db_files()
# Support for optional Cython line tracing
# run the following to generate a test coverage report:
# $ export LINETRACE=1
# $ python setup.py build_ext --inplace
# $ nosetests --with-coverage --cover-html --cover-package=lensfunpy
compdirectives = {}
macros = []
if (os.environ.get('LINETRACE', False)):
compdirectives['linetrace'] = True
macros.append(('CYTHON_TRACE', '1'))
extensions = cythonize([Extension("lensfunpy._lensfun",
include_dirs=include_dirs,
sources=[os.path.join('lensfunpy', '_lensfun.pyx')],
libraries=libraries,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
define_macros=macros
)],
compiler_directives=compdirectives)
# make __version__ available (https://stackoverflow.com/a/16084844)
exec(open('lensfunpy/_version.py').read())
setup(
name = 'lensfunpy',
version = __version__,
description = 'Lens distortion correction for Python, a wrapper for lensfun',
long_description = open('README.rst').read(),
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/letmaik/lensfunpy',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Libraries',
],
packages = find_packages(),
ext_modules = extensions,
package_data = package_data,
install_requires=['numpy']
)
| 1.78125 | 2 |
chapter_13/mailtools/__init__.py | bimri/programming_python | 0 | 6654 | <gh_stars>0
"The mailtools Utility Package"
'Initialization File'
"""
##################################################################################
mailtools package: interface to mail server transfers, used by pymail2, PyMailGUI,
and PyMailCGI; does loads, sends, parsing, composing, and deleting, with part
attachments, encodings (of both the email and Unicdode kind), etc.; the parser,
fetcher, and sender classes here are designed to be mixed-in to subclasses which
use their methods, or used as embedded or standalone objects;
this package also includes convenience subclasses for silent mode, and more;
loads all mail text if pop server doesn't do top; doesn't handle threads or UI
here, and allows askPassword to differ per subclass; progress callback funcs get
status; all calls raise exceptions on error--client must handle in GUI/other;
this changed from file to package: nested modules imported here for bw compat;
4E: need to use package-relative import syntax throughout, because in Py 3.X
package dir in no longer on module import search path if package is imported
elsewhere (from another directory which uses this package); also performs
Unicode decoding on mail text when fetched (see mailFetcher), as well as for
some text part payloads which might have been email-encoded (see mailParser);
TBD: in saveparts, should file be opened in text mode for text/ contypes?
TBD: in walkNamedParts, should we skip oddballs like message/delivery-status?
TBD: Unicode support has not been tested exhaustively: see Chapter 13 for more
on the Py3.1 email package and its limitations, and the policies used here;
##################################################################################
"""
# collect contents of all modules here, when package dir imported directly
from .mailFetcher import *
from .mailSender import * # 4E: package-relative
from .mailParser import *
# export nested modules here, when from mailtools import *
__all__ = 'mailFetcher', 'mailSender', 'mailParser'
# self-test code is in selftest.py to allow mailconfig's path
# to be set before running thr nested module imports above
| 1.429688 | 1 |
TreeModelLib/BelowgroundCompetition/__init__.py | jvollhueter/pyMANGA-1 | 0 | 6655 | <reponame>jvollhueter/pyMANGA-1
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 15:25:03 2018
@author: bathmann
"""
from .BelowgroundCompetition import BelowgroundCompetition
from .SimpleTest import SimpleTest
from .FON import FON
from .OGSWithoutFeedback import OGSWithoutFeedback
from .OGSLargeScale3D import OGSLargeScale3D
from .OGS.helpers import CellInformation
from .FixedSalinity import FixedSalinity
| 1.03125 | 1 |
server.py | SDelhey/websocket-chat | 0 | 6656 | <reponame>SDelhey/websocket-chat
from flask import Flask, render_template
from flask_socketio import SocketIO, send, emit
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
if __name__ == '__main__':
socketio.run(app) | 2.109375 | 2 |
services/postprocess/src/postprocess.py | hadarohana/myCosmos | 0 | 6657 | """
Post processing on detected objects
"""
import pymongo
from pymongo import MongoClient
import time
import logging
logging.basicConfig(format='%(levelname)s :: %(asctime)s :: %(message)s', level=logging.DEBUG)
from joblib import Parallel, delayed
import click
from xgboost_model.inference import run_inference, PostprocessException
import os
def load_detected_pages(db, buffer_size):
"""
"""
current_docs = []
for doc in db.propose_pages.find({'postprocess': None, 'ocr': True}, no_cursor_timeout=True):
current_docs.append(doc)
if len(current_docs) == buffer_size:
yield current_docs
current_docs = []
yield current_docs
def do_skip(page, client):
db = client.pdfs
coll = db.postprocess_pages
return coll.count_documents({'pdf_name': page['pdf_name'], 'page_num': page['page_num']}, limit=1) != 0
def postprocess(db_insert_fn, num_processes, weights_pth, skip):
logging.info('Starting post-processing over detected objects')
start_time = time.time()
client = MongoClient(os.environ["DBCONNECT"])
logging.info(f'Connected to client: {client}.')
db = client.pdfs
for batch in load_detected_pages(db, 100):
logging.info('Loaded next batch. Running postprocessing')
try:
pages = Parallel(n_jobs=num_processes)(delayed(run_inference)(page, weights_pth) for page in batch)
except PostprocessException as e:
logging.error(f'Postprocessing error in referenced page: {e.page}')
logging.error(f'Original Exception: {e.original_exception}')
continue
db_insert_fn(pages, client)
end_time = time.time()
logging.info(f'Exiting post-processing. Time up: {end_time - start_time}')
def mongo_insert_fn(objs, client):
db = client.pdfs
for obj in objs:
try:
result = db.propose_pages.update_one({'_id': obj['_id']},
{'$set':
{
'pp_detected_objs': obj['pp_detected_objs'],
'postprocess': True
}
}, upsert=False)
logging.info(f'Updated result: {result}')
except pymongo.errors.WriterError as e:
logging.error(f'Document write error: {e}\n Document id: obj["_id"]')
@click.command()
@click.argument("num_processes")
@click.argument("weights_pth")
@click.option('--skip/--no-skip')
def click_wrapper(num_processes, weights_pth, skip):
postprocess(mongo_insert_fn, int(num_processes), weights_pth, skip)
if __name__ == '__main__':
click_wrapper()
| 2.375 | 2 |
model-optimizer/mo/front/common/partial_infer/multi_box_prior_test.py | calvinfeng/openvino | 0 | 6658 | <reponame>calvinfeng/openvino
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.front.common.partial_infer.multi_box_prior import multi_box_prior_infer_mxnet
from mo.graph.graph import Node
from mo.utils.unittest.graph import build_graph
nodes_attributes = {'node_1': {'value': None, 'kind': 'data'},
'node_2': {'value': None, 'kind': 'data'},
'prior_box_1': {'type': 'PriorBox', 'kind': 'op'},
'node_3': {'type': 'Identity', 'value': None, 'kind': 'data'}
}
class TestMultiBoxPriorInfer(unittest.TestCase):
def test_prior_box_infer_ideal(self):
graph = build_graph(nodes_attributes,
[('node_1', 'prior_box_1'),
('node_2', 'prior_box_1'),
('prior_box_1', 'node_3')],
{'node_1': {'shape': np.array([1, 1024, 19, 19])},
'node_2': {'shape': np.array([1, 3, 300, 300])},
'prior_box_1': {'aspect_ratio': [1.0, 2.0, 0.5, 3.0, 0.333333333333],
'min_size': [0.2, 0.272],
'max_size': '', 'offset': 0.5, 'step': 0.2, 'sizes': [0.2, 0.272]},
'node_3': {'shape': np.array([1, 2, 3])},
})
multi_box_prior_node = Node(graph, 'prior_box_1')
multi_box_prior_infer_mxnet(multi_box_prior_node)
exp_shape = np.array([1, 2, 8664])
res_shape = graph.node['node_3']['shape']
for i in range(0, len(exp_shape)):
self.assertEqual(exp_shape[i], res_shape[i])
self.assertEqual(multi_box_prior_node.min_size, [0.2, 0.272])
self.assertEqual(multi_box_prior_node.max_size, '')
self.assertEqual(multi_box_prior_node.aspect_ratio, [1.0, 2.0, 0.5, 3.0, 0.333333333333])
self.assertEqual(round(multi_box_prior_node.step, 1), 0.2)
self.assertEqual(round(multi_box_prior_node.offset, 1), 0.5)
| 1.359375 | 1 |
bin/mem_monitor.py | Samahu/ros-system-monitor | 68 | 6659 | <reponame>Samahu/ros-system-monitor<filename>bin/mem_monitor.py
#!/usr/bin/env python
############################################################################
# Copyright (C) 2009, <NAME>, Inc. #
# Copyright (C) 2013 by <NAME> #
# <EMAIL> #
# Copyright (C) 2013 by <NAME> #
# <EMAIL> #
# #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# 3. The name of the copyright holders may be used to endorse or #
# promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
from __future__ import with_statement
import rospy
import traceback
import threading
from threading import Timer
import sys, os, time
from time import sleep
import subprocess
import string
import socket
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
mem_level_warn = 0.95
mem_level_error = 0.99
stat_dict = { 0: 'OK', 1: 'Warning', 2: 'Error' }
def update_status_stale(stat, last_update_time):
time_since_update = rospy.get_time() - last_update_time
stale_status = 'OK'
if time_since_update > 20 and time_since_update <= 35:
stale_status = 'Lagging'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.WARN)
if time_since_update > 35:
stale_status = 'Stale'
if stat.level == DiagnosticStatus.OK:
stat.message = stale_status
elif stat.message.find(stale_status) < 0:
stat.message = ', '.join([stat.message, stale_status])
stat.level = max(stat.level, DiagnosticStatus.ERROR)
stat.values.pop(0)
stat.values.pop(0)
stat.values.insert(0, KeyValue(key = 'Update Status', value = stale_status))
stat.values.insert(1, KeyValue(key = 'Time Since Update', value = str(time_since_update)))
class MemMonitor():
def __init__(self, hostname, diag_hostname):
self._diag_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size = 100)
self._mutex = threading.Lock()
self._mem_level_warn = rospy.get_param('~mem_level_warn', mem_level_warn)
self._mem_level_error = rospy.get_param('~mem_level_error', mem_level_error)
self._usage_timer = None
self._usage_stat = DiagnosticStatus()
self._usage_stat.name = 'Memory Usage (%s)' % diag_hostname
self._usage_stat.level = 1
self._usage_stat.hardware_id = hostname
self._usage_stat.message = 'No Data'
self._usage_stat.values = [ KeyValue(key = 'Update Status', value = 'No Data' ),
KeyValue(key = 'Time Since Last Update', value = 'N/A') ]
self._last_usage_time = 0
self._last_publish_time = 0
# Start checking everything
self.check_usage()
## Must have the lock to cancel everything
def cancel_timers(self):
if self._usage_timer:
self._usage_timer.cancel()
def check_memory(self):
values = []
level = DiagnosticStatus.OK
msg = ''
mem_dict = { 0: 'OK', 1: 'Low Memory', 2: 'Very Low Memory' }
try:
p = subprocess.Popen('free -tm',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
values.append(KeyValue(key = "\"free -tm\" Call Error", value = str(retcode)))
return DiagnosticStatus.ERROR, values
rows = stdout.split('\n')
data = rows[1].split()
total_mem_physical = data[1]
used_mem_physical = data[2]
free_mem_physical = data[3]
data = rows[2].split()
total_mem_swap = data[1]
used_mem_swap = data[2]
free_mem_swap = data[3]
data = rows[3].split()
total_mem = data[1]
used_mem = data[2]
free_mem = data[3]
level = DiagnosticStatus.OK
mem_usage = float(used_mem_physical)/float(total_mem_physical)
if (mem_usage < self._mem_level_warn):
level = DiagnosticStatus.OK
elif (mem_usage < self._mem_level_error):
level = DiagnosticStatus.WARN
else:
level = DiagnosticStatus.ERROR
values.append(KeyValue(key = 'Memory Status', value = mem_dict[level]))
values.append(KeyValue(key = 'Total Memory (Physical)', value = total_mem_physical+"M"))
values.append(KeyValue(key = 'Used Memory (Physical)', value = used_mem_physical+"M"))
values.append(KeyValue(key = 'Free Memory (Physical)', value = free_mem_physical+"M"))
values.append(KeyValue(key = 'Total Memory (Swap)', value = total_mem_swap+"M"))
values.append(KeyValue(key = 'Used Memory (Swap)', value = used_mem_swap+"M"))
values.append(KeyValue(key = 'Free Memory (Swap)', value = free_mem_swap+"M"))
values.append(KeyValue(key = 'Total Memory', value = total_mem+"M"))
values.append(KeyValue(key = 'Used Memory', value = used_mem+"M"))
values.append(KeyValue(key = 'Free Memory', value = free_mem+"M"))
msg = mem_dict[level]
except Exception, e:
rospy.logerr(traceback.format_exc())
msg = 'Memory Usage Check Error'
values.append(KeyValue(key = msg, value = str(e)))
level = DiagnosticStatus.ERROR
return level, mem_dict[level], values
def check_usage(self):
if rospy.is_shutdown():
with self._mutex:
self.cancel_timers()
return
diag_level = 0
diag_vals = [ KeyValue(key = 'Update Status', value = 'OK' ),
KeyValue(key = 'Time Since Last Update', value = 0 )]
diag_msgs = []
# Check memory
mem_level, mem_msg, mem_vals = self.check_memory()
diag_vals.extend(mem_vals)
if mem_level > 0:
diag_msgs.append(mem_msg)
diag_level = max(diag_level, mem_level)
if diag_msgs and diag_level > 0:
usage_msg = ', '.join(set(diag_msgs))
else:
usage_msg = stat_dict[diag_level]
# Update status
with self._mutex:
self._last_usage_time = rospy.get_time()
self._usage_stat.level = diag_level
self._usage_stat.values = diag_vals
self._usage_stat.message = usage_msg
if not rospy.is_shutdown():
self._usage_timer = threading.Timer(5.0, self.check_usage)
self._usage_timer.start()
else:
self.cancel_timers()
def publish_stats(self):
with self._mutex:
# Update everything with last update times
update_status_stale(self._usage_stat, self._last_usage_time)
msg = DiagnosticArray()
msg.header.stamp = rospy.get_rostime()
msg.status.append(self._usage_stat)
if rospy.get_time() - self._last_publish_time > 0.5:
self._diag_pub.publish(msg)
self._last_publish_time = rospy.get_time()
if __name__ == '__main__':
hostname = socket.gethostname()
hostname = hostname.replace('-', '_')
import optparse
parser = optparse.OptionParser(usage="usage: mem_monitor.py [--diag-hostname=cX]")
parser.add_option("--diag-hostname", dest="diag_hostname",
help="Computer name in diagnostics output (ex: 'c1')",
metavar="DIAG_HOSTNAME",
action="store", default = hostname)
options, args = parser.parse_args(rospy.myargv())
try:
rospy.init_node('mem_monitor_%s' % hostname)
except rospy.exceptions.ROSInitException:
print >> sys.stderr, 'Memory monitor is unable to initialize node. Master may not be running.'
sys.exit(0)
mem_node = MemMonitor(hostname, options.diag_hostname)
rate = rospy.Rate(1.0)
try:
while not rospy.is_shutdown():
rate.sleep()
mem_node.publish_stats()
except KeyboardInterrupt:
pass
except Exception, e:
traceback.print_exc()
rospy.logerr(traceback.format_exc())
mem_node.cancel_timers()
sys.exit(0)
| 1.28125 | 1 |
cmake/utils/gen-ninja-deps.py | stamhe/bitcoin-abc | 1,266 | 6660 | <filename>cmake/utils/gen-ninja-deps.py
#!/usr/bin/env python3
import argparse
import os
import subprocess
parser = argparse.ArgumentParser(description='Produce a dep file from ninja.')
parser.add_argument(
'--build-dir',
help='The build directory.',
required=True)
parser.add_argument(
'--base-dir',
help='The directory for which dependencies are rewriten.',
required=True)
parser.add_argument('--ninja', help='The ninja executable to use.')
parser.add_argument(
'base_target',
help="The target from the base's perspective.")
parser.add_argument(
'targets', nargs='+',
help='The target for which dependencies are extracted.')
parser.add_argument(
'--extra-deps', nargs='+',
help='Extra dependencies.')
args = parser.parse_args()
build_dir = os.path.abspath(args.build_dir)
base_dir = os.path.abspath(args.base_dir)
ninja = args.ninja
base_target = args.base_target
targets = args.targets
extra_deps = args.extra_deps
# Make sure we operate in the right folder.
os.chdir(build_dir)
if ninja is None:
ninja = subprocess.check_output(['command', '-v', 'ninja'])[:-1]
# Construct the set of all targets
all_targets = set()
doto_targets = set()
for t in subprocess.check_output([ninja, '-t', 'targets', 'all']).splitlines():
t, r = t.split(b':')
all_targets.add(t)
if r[:13] == b' C_COMPILER__' or r[:15] == b' CXX_COMPILER__':
doto_targets.add(t)
def parse_ninja_query(query):
deps = dict()
lines = query.splitlines()
while len(lines):
line = lines.pop(0)
if line[0] == ord(' '):
continue
# We have a new target
target = line.split(b':')[0]
assert lines.pop(0)[:8] == b' input:'
inputs = set()
while True:
i = lines.pop(0)
if i[:4] != b' ':
break
'''
ninja has 3 types of input:
1. Explicit dependencies, no prefix;
2. Implicit dependencies, | prefix.
3. Order only dependencies, || prefix.
Order only dependency do not require the target to be rebuilt
and so we ignore them.
'''
i = i[4:]
if i[0] == ord('|'):
if i[1] == ord('|'):
# We reached the order only dependencies.
break
i = i[2:]
inputs.add(i)
deps[target] = inputs
return deps
def extract_deps(workset):
# Recursively extract the dependencies of the target.
deps = dict()
while len(workset) > 0:
query = subprocess.check_output([ninja, '-t', 'query'] + list(workset))
target_deps = parse_ninja_query(query)
deps.update(target_deps)
workset = set()
for d in target_deps.values():
workset.update(t for t in d if t in all_targets and t not in deps)
# Extract build time dependencies.
bt_targets = [t for t in deps if t in doto_targets]
if len(bt_targets) == 0:
return deps
ndeps = subprocess.check_output(
[ninja, '-t', 'deps'] + bt_targets,
stderr=subprocess.DEVNULL)
lines = ndeps.splitlines()
while len(lines) > 0:
line = lines.pop(0)
t, m = line.split(b':')
if m == b' deps not found':
continue
inputs = set()
while True:
i = lines.pop(0)
if i == b'':
break
assert i[:4] == b' '
inputs.add(i[4:])
deps[t] = inputs
return deps
base_dir = base_dir.encode()
def rebase_deps(deps):
rebased = dict()
cache = dict()
def rebase(path):
if path in cache:
return cache[path]
abspath = os.path.abspath(path)
newpath = path if path == abspath else os.path.relpath(
abspath, base_dir)
cache[path] = newpath
return newpath
for t, s in deps.items():
rebased[rebase(t)] = set(rebase(d) for d in s)
return rebased
deps = extract_deps(set(targets))
deps = rebase_deps(deps)
def dump(deps):
for t, d in deps.items():
if len(d) == 0:
continue
str = t.decode() + ": \\\n "
str += " \\\n ".join(sorted(map((lambda x: x.decode()), d)))
print(str)
# Collapse everything under the base target.
basedeps = set() if extra_deps is None else set(d.encode() for d in extra_deps)
for d in deps.values():
basedeps.update(d)
base_target = base_target.encode()
basedeps.discard(base_target)
dump({base_target: basedeps})
| 2.34375 | 2 |
src/webpy1/src/manage/checkPic.py | ptphp/PyLib | 1 | 6661 | <gh_stars>1-10
'''
Created on 2011-6-22
@author: dholer
'''
| 1.085938 | 1 |
tests/__init__.py | coleb/sendoff | 2 | 6662 | """Tests for the `sendoff` library."""
"""
The `sendoff` library tests validate the expected function of the library.
"""
| 0.894531 | 1 |
sysinv/sysinv/sysinv/sysinv/helm/garbd.py | Wind-River/starlingx-config | 0 | 6663 | #
# Copyright (c) 2018-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils
from sysinv.helm import common
from sysinv.helm import base
class GarbdHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the galera arbitrator chart"""
# The service name is used to build the standard docker image location.
# It is intentionally "mariadb" and not "garbd" as they both use the
# same docker image.
SERVICE_NAME = common.HELM_CHART_MARIADB
CHART = common.HELM_CHART_GARBD
SUPPORTED_NAMESPACES = \
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK]
SUPPORTED_APP_NAMESPACES = {
constants.HELM_APP_OPENSTACK:
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK]
}
def _is_enabled(self, app_name, chart_name, namespace):
# First, see if this chart is enabled by the user then adjust based on
# system conditions
enabled = super(GarbdHelm, self)._is_enabled(
app_name, chart_name, namespace)
# If there are fewer than 2 controllers or we're on AIO-DX or we are on
# distributed cloud system controller, we'll use a single mariadb server
# and so we don't want to run garbd.
if enabled and (self._num_controllers() < 2 or
utils.is_aio_duplex_system(self.dbapi) or
(self._distributed_cloud_role() ==
constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER)):
enabled = False
return enabled
def execute_manifest_updates(self, operator):
# On application load this chart is enabled in the mariadb chart group
if not self._is_enabled(operator.APP,
self.CHART, common.HELM_NS_OPENSTACK):
operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART])
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {
}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides
| 1.828125 | 2 |
dataloader/frame_counter/frame_counter.py | aaron-zou/pretraining-twostream | 0 | 6664 | #!/usr/bin/env python
"""Generate frame counts dict for a dataset.
Usage:
frame_counter.py [options]
Options:
-h, --help Print help message
--root=<str> Path to root of dataset (should contain video folders that contain images)
[default: /vision/vision_users/azou/data/hmdb51_flow/u/]
--output=<str> Output filename [default: hmdb_frame_count.pickle]
"""
from __future__ import print_function
from docopt import docopt
import os
import sys
import pickle
if __name__ == '__main__':
args = docopt(__doc__)
print(args)
# Final counts
counts = {}
min_count = sys.maxint
# Generate list of video folders
for root, dirs, files in os.walk(args['--root']):
# Skip the root directory
if len(dirs) != 0:
continue
# Process a directory and frame count into a dictionary entry
name = os.path.basename(os.path.normpath(root))
print('{}: {} frames'.format(name, len(files)))
counts[name] = len(files)
# Track minimum count
if len(files) < min_count:
min_count = len(files)
with open(args['--output'], 'wb') as ofile:
pickle.dump(counts, ofile)
print('Minimum frame count = {}'.format(min_count))
| 3.109375 | 3 |
Problem_30/main.py | jdalzatec/EulerProject | 1 | 6665 | total = 0
for n in range(1000, 1000000):
suma = 0
for i in str(n):
suma += int(i)**5
if (n == suma):
total += n
print(total) | 3.515625 | 4 |
armi/physics/fuelCycle/settings.py | celikten/armi | 1 | 6666 | <gh_stars>1-10
"""Settings for generic fuel cycle code."""
import re
import os
from armi.settings import setting
from armi.operators import settingsValidation
CONF_ASSEMBLY_ROTATION_ALG = "assemblyRotationAlgorithm"
CONF_ASSEM_ROTATION_STATIONARY = "assemblyRotationStationary"
CONF_CIRCULAR_RING_MODE = "circularRingMode"
CONF_CIRCULAR_RING_ORDER = "circularRingOrder"
CONF_CUSTOM_FUEL_MANAGEMENT_INDEX = "customFuelManagementIndex"
CONF_RUN_LATTICE_BEFORE_SHUFFLING = "runLatticePhysicsBeforeShuffling"
CONF_SHUFFLE_LOGIC = "shuffleLogic"
CONF_PLOT_SHUFFLE_ARROWS = "plotShuffleArrows"
CONF_FUEL_HANDLER_NAME = "fuelHandlerName"
CONF_JUMP_RING_NUM = "jumpRingNum"
CONF_LEVELS_PER_CASCADE = "levelsPerCascade"
def getFuelCycleSettings():
"""Define settings for fuel cycle."""
settings = [
setting.Setting(
CONF_ASSEMBLY_ROTATION_ALG,
default="",
label="Assembly Rotation Algorithm",
description="The algorithm to use to rotate the detail assemblies while shuffling",
options=["", "buReducingAssemblyRotation", "simpleAssemblyRotation"],
enforcedOptions=True,
),
setting.Setting(
CONF_ASSEM_ROTATION_STATIONARY,
default=False,
label="Rotate stationary assems",
description=(
"Whether or not to rotate assemblies that are not shuffled."
"This can only be True if 'rotation' is true."
),
),
setting.Setting(
CONF_CIRCULAR_RING_MODE,
default=False,
description="Toggle between circular ring definitions to hexagonal ring definitions",
label="Use Circular Rings",
),
setting.Setting(
CONF_CIRCULAR_RING_ORDER,
default="angle",
description="Order by which locations are sorted in circular rings for equilibrium shuffling",
label="Eq. circular sort type",
options=["angle", "distance", "distanceSmart"],
),
setting.Setting(
CONF_CUSTOM_FUEL_MANAGEMENT_INDEX,
default=0,
description=(
"An index that determines which of various options is used in management. "
"Useful for optimization sweeps. "
),
label="Custom Shuffling Index",
),
setting.Setting(
CONF_RUN_LATTICE_BEFORE_SHUFFLING,
default=False,
description=(
"Forces the Generation of Cross Sections Prior to Shuffling the Fuel Assemblies. "
"Note: This is recommended when performing equilibrium shuffling branching searches."
),
label="Generate XS Prior to Fuel Shuffling",
),
setting.Setting(
CONF_SHUFFLE_LOGIC,
default="",
label="Shuffle Logic",
description=(
"Python script written to handle the fuel shuffling for this case. "
"This is user-defined per run as a dynamic input."
),
# schema here could check if file exists, but this is a bit constraining in testing.
# For example, some tests have relative paths for this but aren't running in
# the right directory, and IsFile doesn't seem to work well with relative paths.
# This is left here as an FYI about how we could check existence of files if we get
# around these problem.
# schema=vol.All(
# vol.IsFile(), # pylint: disable=no-value-for-parameter
# msg="Shuffle logic input must be an existing file",
# ),
),
setting.Setting(
CONF_FUEL_HANDLER_NAME,
default="",
label="Fuel Handler Name",
description="The name of the FuelHandler class in the shuffle logic module to activate",
),
setting.Setting(
CONF_PLOT_SHUFFLE_ARROWS,
default=False,
description="Make plots with arrows showing each move.",
label="Plot shuffle arrows",
),
setting.Setting(
CONF_JUMP_RING_NUM, default=8, label="Jump Ring Number", description="None"
),
setting.Setting(
CONF_LEVELS_PER_CASCADE,
default=14,
label="Move per cascade",
description="None",
),
]
return settings
def getFuelCycleSettingValidators(inspector):
queries = []
queries.append(
settingsValidation.Query(
lambda: bool(inspector.cs["shuffleLogic"])
^ bool(inspector.cs["fuelHandlerName"]),
"A value was provided for `fuelHandlerName` or `shuffleLogic`, but not "
"the other. Either both `fuelHandlerName` and `shuffleLogic` should be "
"defined, or neither of them.",
"",
inspector.NO_ACTION,
)
)
# Check for code fixes for input code on the fuel shuffling outside the version control of ARMI
# These are basically auto-migrations for untracked code using
# the ARMI API. (This may make sense at a higher level)
regex_solutions = [
(
r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*[1-3]{1}\s*)\)",
r"\1runLog.important(\2)",
),
(
r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*[4-5]{1,2}\s*)\)",
r"\1runLog.info(\2)",
),
(
r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*[6-8]{1,2}\s*)\)",
r"\1runLog.extra(\2)",
),
(
r"(#{0,20}?)[^\s#]*output\s*?\((.*?)(,\s*\d{1,2}\s*)\)",
r"\1runLog.debug(\2)",
),
(r"(#{0,20}?)[^\s#]*output\s*?\((.*?)\)", r"\1runLog.important(\2)"),
(r"output = self.cs.output", r""),
(r"cs\.getSetting\(\s*([^\)]+)\s*\)", r"cs[\1]"),
(r"cs\.setSetting\(\s*([^\)]+)\s*,\s*([^\)]+)\s*\)", r"cs[\1] = \2"),
(
r"import\s*armi\.components\s*as\s*components",
r"from armi.reactor import components",
),
(r"\[['\"]caseTitle['\"]\]", r".caseTitle"),
(
r"self.r.core.bolAssems\['(.*?)'\]",
r"self.r.blueprints.assemblies['\1']",
),
(r"copyAssembly", r"duplicate"),
]
def _locateRegexOccurences():
with open(inspector._csRelativePath(inspector.cs["shuffleLogic"])) as src:
src = src.read()
matches = []
for pattern, _sub in regex_solutions:
matches += re.findall(pattern, src)
return matches
def _applyRegexSolutions():
srcFile = inspector._csRelativePath(inspector.cs["shuffleLogic"])
destFile = os.path.splitext(srcFile)[0] + "migrated.py"
with open(srcFile) as src, open(destFile, "w") as dest:
srcContent = src.read() # get the buffer content
regexContent = srcContent # keep the before and after changes separate
for pattern, sub in regex_solutions:
regexContent = re.sub(pattern, sub, regexContent)
if regexContent != srcContent:
dest.write("from armi import runLog\n")
dest.write(regexContent)
inspector.cs["shuffleLogic"] = destFile
queries.append(
settingsValidation.Query(
lambda: " " in inspector.cs["shuffleLogic"],
"Spaces are not allowed in shuffleLogic file location. You have specified {0}. "
"Shuffling will not occur.".format(inspector.cs["shuffleLogic"]),
"",
inspector.NO_ACTION,
)
)
def _clearShufflingInput():
inspector._assignCS("shuffleLogic", "")
inspector._assignCS("fuelHandlerName", "")
queries.append(
settingsValidation.Query(
lambda: inspector.cs["shuffleLogic"]
and not inspector._csRelativePathExists(inspector.cs["shuffleLogic"]),
"The specified shuffle logic file '{0}' cannot be found. "
"Shuffling will not occur.".format(inspector.cs["shuffleLogic"]),
"Clear specified file value?",
_clearShufflingInput,
)
)
queries.append(
settingsValidation.Query(
lambda: inspector.cs["shuffleLogic"]
and inspector._csRelativePathExists(inspector.cs["shuffleLogic"])
and _locateRegexOccurences(),
"The shuffle logic file {} uses deprecated code."
" It will not work unless you permit some automated changes to occur."
" The logic file will be backed up to the current directory under a timestamped name"
"".format(inspector.cs["shuffleLogic"]),
"Proceed?",
_applyRegexSolutions,
)
)
return queries
| 2.296875 | 2 |
nl/predictor.py | jclosure/donkus | 1 | 6667 | <gh_stars>1-10
from nltk.corpus import gutenberg
from nltk import ConditionalFreqDist
from random import choice
#create the distribution object
cfd = ConditionalFreqDist()
## for each token count the current word given the previous word
prev_word = None
for word in gutenberg.words('austen-persuasion.txt'):
cfd[prev_word][word] += 1
prev_word = word
## start predicting at given word, say "therefore"
word = "therefore"
i = 1
## find all words that can follow the given word and choose one at random
while i<20:
print word,
lwords = cfd.get(word).keys()
follower = choice(lwords)
word = follower
i += 1
| 3.578125 | 4 |
eve/workers/pykmip/bin/run_server.py | mmg-3/cloudserver | 762 | 6668 | #!/usr/bin/env python
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging # noqa: E402
logging.basicConfig(level=logging.DEBUG)
from kmip.services.server import server # noqa: E402
if __name__ == '__main__':
print('Starting PyKMIP server on 0.0.0.0:5696')
server.main()
| 1.9375 | 2 |
tests/test_tempo_event.py | yokaze/crest-python | 0 | 6669 | #
# test_tempo_event.py
# crest-python
#
# Copyright (C) 2017 <NAME>
# Distributed under the MIT License.
#
import crest_loader
import unittest
from crest.events.meta import TempoEvent
class TestTempoEvent(unittest.TestCase):
def test_ctor(self):
TempoEvent()
TempoEvent(120)
def test_message(self):
evt = TempoEvent(120)
self.assertEqual(evt.Message, [0xFF, 0x51, 0x03, 0x07, 0xA1, 0x20])
def test_property(self):
evt = TempoEvent(120)
self.assertEqual(evt.Tempo, 120)
self.assertEqual(evt.MicroSeconds, 500000)
evt.Tempo = 60
self.assertEqual(evt.Tempo, 60)
self.assertEqual(evt.MicroSeconds, 1000000)
evt.MicroSeconds = 250000
self.assertEqual(evt.Tempo, 240)
self.assertEqual(evt.MicroSeconds, 250000)
if (__name__ == '__main__'):
unittest.main()
| 2.421875 | 2 |
tensorflow_quantum/python/differentiators/__init__.py | PyJedi/quantum | 1,501 | 6670 | <gh_stars>1000+
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module functions for tfq.differentiators.*"""
from tensorflow_quantum.python.differentiators.adjoint import (
Adjoint,)
from tensorflow_quantum.python.differentiators.linear_combination import (
ForwardDifference,
CentralDifference,
LinearCombination,
)
from tensorflow_quantum.python.differentiators.parameter_shift import (
ParameterShift,)
from tensorflow_quantum.python.differentiators.differentiator import (
Differentiator,)
| 1.96875 | 2 |
tests/test_color_background.py | erykoff/redmapper | 17 | 6671 | <reponame>erykoff/redmapper
import unittest
import numpy.testing as testing
import numpy as np
import fitsio
import tempfile
import os
from redmapper import ColorBackground
from redmapper import ColorBackgroundGenerator
from redmapper import Configuration
class ColorBackgroundTestCase(unittest.TestCase):
"""
Tests for the redmapper.ColorBackground and
redmapper.ColorBackgroundGenerator classes.
"""
def runTest(self):
"""
Run the ColorBackground and ColorBackgroundGenerator tests.
"""
file_name = 'test_dr8_col_bkg.fit'
file_path = 'data_for_tests'
cbkg = ColorBackground('%s/%s' % (file_path, file_name))
col1 = np.array([0.572300, 1.39560])
col2 = np.array([0.7894, 0.9564])
refmags = np.array([17.587, 18.956])
refmagindex = np.array([258, 395])
col1index = np.array([1, 17])
col2index = np.array([15, 19])
# These are new values that are based on improvements in the binning.
idl_bkg1 = np.array([0.76778, 0.80049])
idl_bkg2 = np.array([0.04012, 0.10077])
idl_bkg12 = np.array([0.01085, 0.081])
# Test color1
py_outputs = cbkg.lookup_diagonal(1, col1, refmags)
testing.assert_almost_equal(py_outputs, idl_bkg1, decimal=5)
# Test color2
py_outputs = cbkg.lookup_diagonal(2, col2, refmags)
testing.assert_almost_equal(py_outputs, idl_bkg2, decimal=5)
# Test off-diagonal
py_outputs = cbkg.lookup_offdiag(1, 2, col1, col2, refmags)
testing.assert_almost_equal(py_outputs, idl_bkg12, decimal=5)
# And a test sigma_g with the usehdrarea=True
cbkg2 = ColorBackground('%s/%s' % (file_path, file_name), usehdrarea=True)
col1 = np.array([0.572300, 1.39560, 1.0])
col2 = np.array([0.7894, 0.9564, 1.0])
refmags = np.array([17.587, 18.956, 25.0])
idl_sigma_g1 = np.array([127.698, 591.112, np.inf])
idl_sigma_g2 = np.array([7.569, 82.8938, np.inf])
# Test color1
py_outputs = cbkg2.sigma_g_diagonal(1, col1, refmags)
testing.assert_almost_equal(py_outputs, idl_sigma_g1, decimal=3)
# Test color2
py_outputs = cbkg2.sigma_g_diagonal(2, col2, refmags)
testing.assert_almost_equal(py_outputs, idl_sigma_g2, decimal=3)
#####################################################
# Now a test of the generation of a color background
conf_filename = 'testconfig.yaml'
config = Configuration(file_path + "/" + conf_filename)
tfile = tempfile.mkstemp()
os.close(tfile[0])
config.bkgfile_color = tfile[1]
config.d.nside = 128
config.d.hpix = [8421]
config.border = 0.0
cbg = ColorBackgroundGenerator(config, minrangecheck=5)
# Need to set clobber=True because the tempfile was created
cbg.run(clobber=True)
fits = fitsio.FITS(config.bkgfile_color)
# Make sure we have 11 extensions
testing.assert_equal(len(fits), 11)
# These tests are obsolete, but could be refactored
# Check the 01_01 and 01_02
# bkg11 = fits['01_01_REF'].read()
# bkg11_compare = fitsio.read(file_path + "/test_dr8_bkg_zredc_sub.fits", ext='01_01_REF')
# testing.assert_almost_equal(bkg11['BC'], bkg11_compare['BC'], 3)
# testing.assert_almost_equal(bkg11['N'], bkg11_compare['N'], 3)
# bkg12 = fits['01_02_REF'].read()
# bkg12_compare = fitsio.read(file_path + "/test_dr8_bkg_zredc_sub.fits", ext='01_02_REF')
# testing.assert_almost_equal(bkg12['BC'], bkg12_compare['BC'], 2)
# testing.assert_almost_equal(bkg12['N'], bkg12_compare['N'], 4)
# And delete the tempfile
os.remove(config.bkgfile_color)
if __name__=='__main__':
unittest.main()
| 2.5625 | 3 |
src/metpy/calc/basic.py | Exi666/MetPy | 0 | 6672 | # Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of basic calculations.
These include:
* wind components
* heat index
* windchill
"""
import warnings
import numpy as np
from scipy.ndimage import gaussian_filter
from .. import constants as mpconsts
from ..package_tools import Exporter
from ..units import atleast_1d, check_units, masked_array, units
from ..xarray import preprocess_xarray
exporter = Exporter(globals())
# The following variables are constants for a standard atmosphere
t0 = 288. * units.kelvin
p0 = 1013.25 * units.hPa
@exporter.export
@preprocess_xarray
@check_units('[speed]', '[speed]')
def wind_speed(u, v):
r"""Compute the wind speed from u and v-components.
Parameters
----------
u : `pint.Quantity`
Wind component in the X (East-West) direction
v : `pint.Quantity`
Wind component in the Y (North-South) direction
Returns
-------
wind speed: `pint.Quantity`
The speed of the wind
See Also
--------
wind_components
"""
speed = np.sqrt(u * u + v * v)
return speed
@exporter.export
@preprocess_xarray
@check_units('[speed]', '[speed]')
def wind_direction(u, v, convention='from'):
r"""Compute the wind direction from u and v-components.
Parameters
----------
u : `pint.Quantity`
Wind component in the X (East-West) direction
v : `pint.Quantity`
Wind component in the Y (North-South) direction
convention : str
Convention to return direction. 'from' returns the direction the wind is coming from
(meteorological convention). 'to' returns the direction the wind is going towards
(oceanographic convention). Default is 'from'.
Returns
-------
direction: `pint.Quantity`
The direction of the wind in interval [0, 360] degrees, with 360 being North, with the
direction defined by the convention kwarg.
See Also
--------
wind_components
Notes
-----
In the case of calm winds (where `u` and `v` are zero), this function returns a direction
of 0.
"""
wdir = 90. * units.deg - np.arctan2(-v, -u)
origshape = wdir.shape
wdir = atleast_1d(wdir)
# Handle oceanographic convection
if convention == 'to':
wdir -= 180 * units.deg
elif convention not in ('to', 'from'):
raise ValueError('Invalid kwarg for "convention". Valid options are "from" or "to".')
wdir[wdir <= 0] += 360. * units.deg
# avoid unintended modification of `pint.Quantity` by direct use of magnitude
calm_mask = (np.asarray(u.magnitude) == 0.) & (np.asarray(v.magnitude) == 0.)
# np.any check required for legacy numpy which treats 0-d False boolean index as zero
if np.any(calm_mask):
wdir[calm_mask] = 0. * units.deg
return wdir.reshape(origshape).to('degrees')
@exporter.export
@preprocess_xarray
@check_units('[speed]')
def wind_components(speed, wdir):
r"""Calculate the U, V wind vector components from the speed and direction.
Parameters
----------
speed : `pint.Quantity`
The wind speed (magnitude)
wdir : `pint.Quantity`
The wind direction, specified as the direction from which the wind is
blowing (0-2 pi radians or 0-360 degrees), with 360 degrees being North.
Returns
-------
u, v : tuple of `pint.Quantity`
The wind components in the X (East-West) and Y (North-South)
directions, respectively.
See Also
--------
wind_speed
wind_direction
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.wind_components(10. * units('m/s'), 225. * units.deg)
(<Quantity(7.071067811865475, 'meter / second')>,
<Quantity(7.071067811865477, 'meter / second')>)
"""
wdir = _check_radians(wdir, max_radians=4 * np.pi)
u = -speed * np.sin(wdir)
v = -speed * np.cos(wdir)
return u, v
@exporter.export
@preprocess_xarray
@check_units(temperature='[temperature]', speed='[speed]')
def windchill(temperature, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the Wind Chill Temperature Index (WCTI).
Calculates WCTI from the current temperature and wind speed using the formula
outlined by the FCM [FCMR192003]_.
Specifically, these formulas assume that wind speed is measured at
10m. If, instead, the speeds are measured at face level, the winds
need to be multiplied by a factor of 1.5 (this can be done by specifying
`face_level_winds` as `True`.)
Parameters
----------
temperature : `pint.Quantity`
The air temperature
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill is undefined masked. These are values where
the temperature > 50F or wind speed <= 3 miles per hour. Defaults
to `True`.
Returns
-------
`pint.Quantity`
The corresponding Wind Chill Temperature Index value(s)
See Also
--------
heat_index
"""
# Correct for lower height measurement of winds if necessary
if face_level_winds:
# No in-place so that we copy
# noinspection PyAugmentAssignment
speed = speed * 1.5
temp_limit, speed_limit = 10. * units.degC, 3 * units.mph
speed_factor = speed.to('km/hr').magnitude ** 0.16
wcti = units.Quantity((0.6215 + 0.3965 * speed_factor) * temperature.to('degC').magnitude
- 11.37 * speed_factor + 13.12, units.degC).to(temperature.units)
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array((temperature > temp_limit) | (speed <= speed_limit))
if mask.any():
wcti = masked_array(wcti, mask=mask)
return wcti
@exporter.export
@preprocess_xarray
@check_units('[temperature]')
def heat_index(temperature, rh, mask_undefined=True):
r"""Calculate the Heat Index from the current temperature and relative humidity.
The implementation uses the formula outlined in [Rothfusz1990]_, which is a
multi-variable least-squares regression of the values obtained in [Steadman1979]_.
Additional conditional corrections are applied to match what the National
Weather Service operationally uses. See Figure 3 of [Anderson2013]_ for a
depiction of this algorithm and further discussion.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
rh : `pint.Quantity`
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
Returns
-------
`pint.Quantity`
The corresponding Heat Index value(s)
Other Parameters
----------------
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values masked where the temperature < 80F. Defaults to `True`.
See Also
--------
windchill
"""
temperature = atleast_1d(temperature)
rh = atleast_1d(rh)
# assign units to rh if they currently are not present
if not hasattr(rh, 'units'):
rh = rh * units.dimensionless
delta = temperature.to(units.degF) - 0. * units.degF
rh2 = rh * rh
delta2 = delta * delta
# Simplifed Heat Index -- constants converted for RH in [0, 1]
a = -10.3 * units.degF + 1.1 * delta + 4.7 * units.delta_degF * rh
# More refined Heat Index -- constants converted for RH in [0, 1]
b = (-42.379 * units.degF
+ 2.04901523 * delta
+ 1014.333127 * units.delta_degF * rh
- 22.475541 * delta * rh
- 6.83783e-3 / units.delta_degF * delta2
- 5.481717e2 * units.delta_degF * rh2
+ 1.22874e-1 / units.delta_degF * delta2 * rh
+ 8.5282 * delta * rh2
- 1.99e-2 / units.delta_degF * delta2 * rh2)
# Create return heat index
hi = np.full(np.shape(temperature), np.nan) * units.degF
# Retain masked status of temperature with resulting heat index
if hasattr(temperature, 'mask'):
hi = masked_array(hi)
# If T <= 40F, Heat Index is T
sel = (temperature <= 40. * units.degF)
if np.any(sel):
hi[sel] = temperature[sel].to(units.degF)
# If a < 79F and hi is unset, Heat Index is a
sel = (a < 79. * units.degF) & np.isnan(hi)
if np.any(sel):
hi[sel] = a[sel]
# Use b now for anywhere hi has yet to be set
sel = np.isnan(hi)
if np.any(sel):
hi[sel] = b[sel]
# Adjustment for RH <= 13% and 80F <= T <= 112F
sel = ((rh <= 13. * units.percent) & (temperature >= 80. * units.degF)
& (temperature <= 112. * units.degF))
if np.any(sel):
rh15adj = ((13. - rh * 100.) / 4.
* ((17. * units.delta_degF - np.abs(delta - 95. * units.delta_degF))
/ 17. * units.delta_degF) ** 0.5)
hi[sel] = hi[sel] - rh15adj[sel]
# Adjustment for RH > 85% and 80F <= T <= 87F
sel = ((rh > 85. * units.percent) & (temperature >= 80. * units.degF)
& (temperature <= 87. * units.degF))
if np.any(sel):
rh85adj = 0.02 * (rh * 100. - 85.) * (87. * units.delta_degF - delta)
hi[sel] = hi[sel] + rh85adj[sel]
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array(temperature < 80. * units.degF)
if mask.any():
hi = masked_array(hi, mask=mask)
return hi
@exporter.export
@preprocess_xarray
@check_units(temperature='[temperature]', speed='[speed]')
def apparent_temperature(temperature, rh, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the current apparent temperature.
Calculates the current apparent temperature based on the wind chill or heat index
as appropriate for the current conditions. Follows [NWS10201]_.
Parameters
----------
temperature : `pint.Quantity`
The air temperature
rh : `pint.Quantity`
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill or heat_index is undefined masked. For wind
chill, these are values where the temperature > 50F or
wind speed <= 3 miles per hour. For heat index, these are values
where the temperature < 80F.
Defaults to `True`.
Returns
-------
`pint.Quantity`
The corresponding apparent temperature value(s)
See Also
--------
heat_index, windchill
"""
is_not_scalar = isinstance(temperature.m, (list, tuple, np.ndarray))
temperature = atleast_1d(temperature)
rh = atleast_1d(rh)
speed = atleast_1d(speed)
# NB: mask_defined=True is needed to know where computed values exist
wind_chill_temperature = windchill(temperature, speed, face_level_winds=face_level_winds,
mask_undefined=True).to(temperature.units)
heat_index_temperature = heat_index(temperature, rh,
mask_undefined=True).to(temperature.units)
# Combine the heat index and wind chill arrays (no point has a value in both)
# NB: older numpy.ma.where does not return a masked array
app_temperature = masked_array(
np.ma.where(masked_array(wind_chill_temperature).mask,
heat_index_temperature.to(temperature.units),
wind_chill_temperature.to(temperature.units)
), temperature.units)
# If mask_undefined is False, then set any masked values to the temperature
if not mask_undefined:
app_temperature[app_temperature.mask] = temperature[app_temperature.mask]
# If no values are masked and provided temperature does not have a mask
# we should return a non-masked array
if not np.any(app_temperature.mask) and not hasattr(temperature, 'mask'):
app_temperature = np.array(app_temperature.m) * temperature.units
if is_not_scalar:
return app_temperature
else:
return atleast_1d(app_temperature)[0]
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def pressure_to_height_std(pressure):
r"""Convert pressure data to heights using the U.S. standard atmosphere [NOAA1976]_.
The implementation uses the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Notes
-----
.. math:: Z = \frac{T_0}{\Gamma}[1-\frac{p}{p_0}^\frac{R\Gamma}{g}]
"""
gamma = 6.5 * units('K/km')
return (t0 / gamma) * (1 - (pressure / p0).to('dimensionless')**(
mpconsts.Rd * gamma / mpconsts.g))
@exporter.export
@preprocess_xarray
@check_units('[length]')
def height_to_geopotential(height):
r"""Compute geopotential for a given height.
Calculates the geopotential from height using the following formula, which is derived from
the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq 3.21:
.. math:: \Phi = G m_e \left( \frac{1}{R_e} - \frac{1}{R_e + z}\right)
(where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth
radius, :math:`G` is the (universal) gravitational constant, and :math:`m_e` is the
approximate mass of Earth.)
Parameters
----------
height : `pint.Quantity`
Height above sea level
Returns
-------
`pint.Quantity`
The corresponding geopotential value(s)
Examples
--------
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0, 10000, num=11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305887
39251.39289118 49056.54621087 58858.62446524 68657.62910064
78453.56156252 88246.42329544 98036.21574305], 'meter ** 2 / second ** 2')>
"""
# Direct implementation of formula from Hobbs yields poor numerical results (see
# gh-1075), so was replaced with algebraic equivalent.
return (mpconsts.G * mpconsts.me / mpconsts.Re) * (height / (mpconsts.Re + height))
@exporter.export
@preprocess_xarray
def geopotential_to_height(geopot):
r"""Compute height from a given geopotential.
Calculates the height from geopotential using the following formula, which is derived from
the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq 3.21:
.. math:: z = \frac{1}{\frac{1}{R_e} - \frac{\Phi}{G m_e}} - R_e
(where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth
radius, :math:`G` is the (universal) gravitational constant, and :math:`m_e` is the
approximate mass of Earth.)
Parameters
----------
geopotential : `pint.Quantity`
Geopotential
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Examples
--------
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0, 10000, num=11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305887
39251.39289118 49056.54621087 58858.62446524 68657.62910064
78453.56156252 88246.42329544 98036.21574305], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
"""
# Direct implementation of formula from Hobbs yields poor numerical results (see
# gh-1075), so was replaced with algebraic equivalent.
scaled = geopot * mpconsts.Re
return scaled * mpconsts.Re / (mpconsts.G * mpconsts.me - scaled)
@exporter.export
@preprocess_xarray
@check_units('[length]')
def height_to_pressure_std(height):
r"""Convert height data to pressures using the U.S. standard atmosphere [NOAA1976]_.
The implementation inverts the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
Returns
-------
`pint.Quantity`
The corresponding pressure value(s)
Notes
-----
.. math:: p = p_0 e^{\frac{g}{R \Gamma} \text{ln}(1-\frac{Z \Gamma}{T_0})}
"""
gamma = 6.5 * units('K/km')
return p0 * (1 - (gamma / t0) * height) ** (mpconsts.g / (mpconsts.Rd * gamma))
@exporter.export
@preprocess_xarray
def coriolis_parameter(latitude):
r"""Calculate the coriolis parameter at each point.
The implementation uses the formula outlined in [Hobbs1977]_ pg.370-371.
Parameters
----------
latitude : array_like
Latitude at each point
Returns
-------
`pint.Quantity`
The corresponding coriolis force at each point
"""
latitude = _check_radians(latitude, max_radians=np.pi / 2)
return (2. * mpconsts.omega * np.sin(latitude)).to('1/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]')
def add_height_to_pressure(pressure, height):
r"""Calculate the pressure at a certain height above another pressure level.
This assumes a standard atmosphere [NOAA1976]_.
Parameters
----------
pressure : `pint.Quantity`
Pressure level
height : `pint.Quantity`
Height above a pressure level
Returns
-------
`pint.Quantity`
The corresponding pressure value for the height above the pressure level
See Also
--------
pressure_to_height_std, height_to_pressure_std, add_pressure_to_height
"""
pressure_level_height = pressure_to_height_std(pressure)
return height_to_pressure_std(pressure_level_height + height)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[pressure]')
def add_pressure_to_height(height, pressure):
r"""Calculate the height at a certain pressure above another height.
This assumes a standard atmosphere [NOAA1976]_.
Parameters
----------
height : `pint.Quantity`
Height level
pressure : `pint.Quantity`
Pressure above height level
Returns
-------
`pint.Quantity`
The corresponding height value for the pressure above the height level
See Also
--------
pressure_to_height_std, height_to_pressure_std, add_height_to_pressure
"""
pressure_at_height = height_to_pressure_std(height)
return pressure_to_height_std(pressure_at_height - pressure)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]', '[pressure]', '[pressure]')
def sigma_to_pressure(sigma, psfc, ptop):
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
"""
if np.any(sigma < 0) or np.any(sigma > 1):
raise ValueError('Sigma values should be bounded by 0 and 1')
if psfc.magnitude < 0 or ptop.magnitude < 0:
raise ValueError('Pressure input should be non-negative')
return sigma * (psfc - ptop) + ptop
@exporter.export
@preprocess_xarray
def smooth_gaussian(scalar_grid, n):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : `pint.Quantity`
Some n-dimensional scalar grid. If more than two axes, smoothing
is only done across the last two.
n : int
Degree of filtering
Returns
-------
`pint.Quantity`
The filtered 2D scalar grid
Notes
-----
This function is a close replication of the GEMPAK function GWFS,
but is not identical. The following notes are incorporated from
the GEMPAK source code:
This function smoothes a scalar grid using a moving average
low-pass filter whose weights are determined by the normal
(Gaussian) probability distribution function for two dimensions.
The weight given to any grid point within the area covered by the
moving average for a target grid point is proportional to
EXP [ -( D ** 2 ) ],
where D is the distance from that point to the target point divided
by the standard deviation of the normal distribution. The value of
the standard deviation is determined by the degree of filtering
requested. The degree of filtering is specified by an integer.
This integer is the number of grid increments from crest to crest
of the wave for which the theoretical response is 1/e = .3679. If
the grid increment is called delta_x, and the value of this integer
is represented by N, then the theoretical filter response function
value for the N * delta_x wave will be 1/e. The actual response
function will be greater than the theoretical value.
The larger N is, the more severe the filtering will be, because the
response function for all wavelengths shorter than N * delta_x
will be less than 1/e. Furthermore, as N is increased, the slope
of the filter response function becomes more shallow; so, the
response at all wavelengths decreases, but the amount of decrease
lessens with increasing wavelength. (The theoretical response
function can be obtained easily--it is the Fourier transform of the
weight function described above.)
The area of the patch covered by the moving average varies with N.
As N gets bigger, the smoothing gets stronger, and weight values
farther from the target grid point are larger because the standard
deviation of the normal distribution is bigger. Thus, increasing
N has the effect of expanding the moving average window as well as
changing the values of weights. The patch is a square covering all
points whose weight values are within two standard deviations of the
mean of the two dimensional normal distribution.
The key difference between GEMPAK's GWFS and this function is that,
in GEMPAK, the leftover weight values representing the fringe of the
distribution are applied to the target grid point. In this
function, the leftover weights are not used.
When this function is invoked, the first argument is the grid to be
smoothed, the second is the value of N as described above:
GWFS ( S, N )
where N > 1. If N <= 1, N = 2 is assumed. For example, if N = 4,
then the 4 delta x wave length is passed with approximate response
1/e.
"""
# Compute standard deviation in a manner consistent with GEMPAK
n = int(round(n))
if n < 2:
n = 2
sgma = n / (2 * np.pi)
# Construct sigma sequence so smoothing occurs only in horizontal direction
nax = len(scalar_grid.shape)
# Assume the last two axes represent the horizontal directions
sgma_seq = [sgma if i > nax - 3 else 0 for i in range(nax)]
# Compute smoothed field and reattach units
res = gaussian_filter(scalar_grid, sgma_seq, truncate=2 * np.sqrt(2))
if hasattr(scalar_grid, 'units'):
res = res * scalar_grid.units
return res
@exporter.export
@preprocess_xarray
def smooth_n_point(scalar_grid, n=5, passes=1):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : array-like or `pint.Quantity`
Some 2D scalar grid to be smoothed.
n: int
The number of points to use in smoothing, only valid inputs
are 5 and 9. Defaults to 5.
passes : int
The number of times to apply the filter to the grid. Defaults
to 1.
Returns
-------
array-like or `pint.Quantity`
The filtered 2D scalar grid.
Notes
-----
This function is a close replication of the GEMPAK function SM5S
and SM9S depending on the choice of the number of points to use
for smoothing. This function can be applied multiple times to
create a more smoothed field and will only smooth the interior
points, leaving the end points with their original values. If a
masked value or NaN values exists in the array, it will propagate
to any point that uses that particular grid point in the smoothing
calculation. Applying the smoothing function multiple times will
propogate NaNs further throughout the domain.
"""
if n == 9:
p = 0.25
q = 0.125
r = 0.0625
elif n == 5:
p = 0.5
q = 0.125
r = 0.0
else:
raise ValueError('The number of points to use in the smoothing '
'calculation must be either 5 or 9.')
smooth_grid = scalar_grid[:].copy()
for _i in range(passes):
smooth_grid[1:-1, 1:-1] = (p * smooth_grid[1:-1, 1:-1]
+ q * (smooth_grid[2:, 1:-1] + smooth_grid[1:-1, 2:]
+ smooth_grid[:-2, 1:-1] + smooth_grid[1:-1, :-2])
+ r * (smooth_grid[2:, 2:] + smooth_grid[2:, :-2] +
+ smooth_grid[:-2, 2:] + smooth_grid[:-2, :-2]))
return smooth_grid
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]')
def altimeter_to_station_pressure(altimeter_value, height):
r"""Convert the altimeter measurement to station pressure.
This function is useful for working with METARs since they do not provide
altimeter values, but not sea-level pressure or station pressure.
The following definitions of altimeter setting and station pressure
are taken from [Smithsonian1951]_ Altimeter setting is the
pressure value to which an aircraft altimeter scale is set so that it will
indicate the altitude above mean sea-level of an aircraft on the ground at the
location for which the value is determined. It assumes a standard atmosphere [NOAA1976]_.
Station pressure is the atmospheric pressure at the designated station elevation.
Finding the station pressure can be helpful for calculating sea-level pressure
or other parameters.
Parameters
----------
altimeter_value : `pint.Quantity`
The altimeter setting value as defined by the METAR or other observation,
which can be measured in either inches of mercury (in. Hg) or millibars (mb)
height: `pint.Quantity`
Elevation of the station measuring pressure.
Returns
-------
`pint.Quantity`
The station pressure in hPa or in. Hg, which can be used to calculate sea-level
pressure
See Also
--------
altimeter_to_sea_level_pressure
Notes
-----
This function is implemented using the following equations from the
Smithsonian Handbook (1951) p. 269
Equation 1:
.. math:: A_{mb} = (p_{mb} - 0.3)F
Equation 3:
.. math:: F = \left [1 + \left(\frac{p_{0}^n a}{T_{0}} \right)
\frac{H_{b}}{p_{1}^n} \right ] ^ \frac{1}{n}
Where
:math:`p_{0}` = standard sea-level pressure = 1013.25 mb
:math:`p_{1} = p_{mb} - 0.3` when :math:`p_{0} = 1013.25 mb`
gamma = lapse rate in [NOAA1976]_ standard atmosphere below the isothermal layer
:math:`6.5^{\circ}C. km.^{-1}`
:math:`t_{0}` = standard sea-level temperature 288 K
:math:`H_{b} =` station elevation in meters (elevation for which station
pressure is given)
:math:`n = \frac{a R_{d}}{g} = 0.190284` where :math:`R_{d}` is the gas
constant for dry air
And solving for :math:`p_{mb}` results in the equation below, which is used to
calculate station pressure :math:`(p_{mb})`
.. math:: p_{mb} = \left [A_{mb} ^ n - \left (\frac{p_{0} a H_{b}}{T_0}
\right) \right] ^ \frac{1}{n} + 0.3
"""
# Gamma Value for this case
gamma = 0.0065 * units('K/m')
# N-Value
n = (mpconsts.Rd * gamma / mpconsts.g).to_base_units()
return ((altimeter_value ** n
- ((p0.to(altimeter_value.units) ** n * gamma * height) / t0)) ** (1 / n)
+ 0.3 * units.hPa)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]', '[temperature]')
def altimeter_to_sea_level_pressure(altimeter_value, height, temperature):
r"""Convert the altimeter setting to sea-level pressure.
This function is useful for working with METARs since most provide
altimeter values, but not sea-level pressure, which is often plotted
on surface maps. The following definitions of altimeter setting, station pressure, and
sea-level pressure are taken from [Smithsonian1951]_
Altimeter setting is the pressure value to which an aircraft altimeter scale
is set so that it will indicate the altitude above mean sea-level of an aircraft
on the ground at the location for which the value is determined. It assumes a standard
atmosphere. Station pressure is the atmospheric pressure at the designated station
elevation. Sea-level pressure is a pressure value obtained by the theoretical reduction
of barometric pressure to sea level. It is assumed that atmosphere extends to sea level
below the station and that the properties of the atmosphere are related to conditions
observed at the station. This value is recorded by some surface observation stations,
but not all. If the value is recorded, it can be found in the remarks section. Finding
the sea-level pressure is helpful for plotting purposes and different calculations.
Parameters
----------
altimeter_value : 'pint.Quantity'
The altimeter setting value is defined by the METAR or other observation,
with units of inches of mercury (in Hg) or millibars (hPa)
height : 'pint.Quantity'
Elevation of the station measuring pressure. Often times measured in meters
temperature : 'pint.Quantity'
Temperature at the station
Returns
-------
'pint.Quantity'
The sea-level pressure in hPa and makes pressure values easier to compare
between different stations
See Also
--------
altimeter_to_station_pressure
Notes
-----
This function is implemented using the following equations from Wallace and Hobbs (1977)
Equation 2.29:
.. math::
\Delta z = Z_{2} - Z_{1}
= \frac{R_{d} \bar T_{v}}{g_0}ln\left(\frac{p_{1}}{p_{2}}\right)
= \bar H ln \left (\frac {p_{1}}{p_{2}} \right)
Equation 2.31:
.. math::
p_{0} = p_{g}exp \left(\frac{Z_{g}}{\bar H} \right) \\
= p_{g}exp \left(\frac{g_{0}Z_{g}}{R_{d}\bar T_{v}} \right)
Then by substituting :math:`Delta_{Z}` for :math:`Z_{g}` in Equation 2.31:
.. math:: p_{sea_level} = p_{station} exp\left(\frac{\Delta z}{H}\right)
where :math:`Delta_{Z}` is the elevation in meters and :math:`H = \frac{R_{d}T}{g}`
"""
# Calculate the station pressure using function altimeter_to_station_pressure()
psfc = altimeter_to_station_pressure(altimeter_value, height)
# Calculate the scale height
h = mpconsts.Rd * temperature / mpconsts.g
return psfc * np.exp(height / h)
def _check_radians(value, max_radians=2 * np.pi):
"""Input validation of values that could be in degrees instead of radians.
Parameters
----------
value : `pint.Quantity`
The input value to check.
max_radians : float
Maximum absolute value of radians before warning.
Returns
-------
`pint.Quantity`
The input value
"""
try:
value = value.to('radians').m
except AttributeError:
pass
if np.greater(np.nanmax(np.abs(value)), max_radians):
warnings.warn('Input over {} radians. '
'Ensure proper units are given.'.format(max_radians))
return value
| 3.234375 | 3 |
burl/core/api/views.py | wryfi/burl | 1 | 6673 | from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.reverse import reverse
from rest_framework_simplejwt.tokens import RefreshToken
@api_view(['GET'])
def root(request, fmt=None):
return Response({
'v1': reverse('api_v1:root', request=request, format=fmt),
})
@api_view(['GET'])
def v1_root(request, fmt=None):
root_navigation = {
'redirects': reverse('api_v1:redirects:redirect-list', request=request, format=fmt),
'token': reverse('api_v1:token_root', request=request, format=fmt)
}
return Response(root_navigation)
@api_view(['GET'])
def token_root(request, fmt=None):
token_navigation = {
'auth': reverse('api_v1:token_auth', request=request, format=fmt),
'refresh': reverse('api_v1:token_refresh', request=request, format=fmt),
'verify': reverse('api_v1:token_verify', request=request, format=fmt),
}
return Response(token_navigation)
@api_view(['POST'])
def token_refresh(request):
token = request.COOKIES.get("burl_refresh_token")
if token:
refresh = RefreshToken(str(token))
access = str(refresh.access_token)
if access:
return Response({"access": access}, 200)
else:
return Response({"unauthorized"}, 401)
return Response("unauthorized", 401)
@api_view(['POST'])
def token_refresh_revoke(_request):
response = Response("ok")
response.delete_cookie("burl_refresh_token")
return response
| 2.375 | 2 |
ITmeetups_back/api/serializers.py | RomulusGwelt/AngularProject | 3 | 6674 | from rest_framework import serializers
from .models import Post, Comment, Like
from django.contrib.auth.models import User
class CurrentUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email')
class PostSerializer(serializers.ModelSerializer):
user = CurrentUserSerializer()
class Meta:
model = Post
fields = ('id', 'title', 'text', 'user', 'created_at')
class PostSerializer2(serializers.ModelSerializer):
user = CurrentUserSerializer
class Meta:
model = Post
fields = ('id', 'title', 'text', 'user', 'created_at')
class CommentSerializer(serializers.ModelSerializer):
user = CurrentUserSerializer()
post = PostSerializer()
class Meta:
model = Comment
fields = ('id', 'text', 'user', 'post', 'created_at')
class CommentSerializer2(serializers.ModelSerializer):
user = CurrentUserSerializer
post = PostSerializer
class Meta:
model = Comment
fields = ('id', 'text', 'user', 'post', 'created_at')
class LikeSerializer(serializers.ModelSerializer):
user = CurrentUserSerializer
post = PostSerializer
class Meta:
model = Like
fields = ('id', 'user', 'post', 'created_at')
| 2.34375 | 2 |
qurator/sbb_ned/embeddings/bert.py | qurator-spk/sbb_ned | 6 | 6675 | <reponame>qurator-spk/sbb_ned
from ..embeddings.base import Embeddings
from flair.data import Sentence
class BertEmbeddings(Embeddings):
def __init__(self, model_path,
layers="-1, -2, -3, -4", pooling_operation='first', use_scalar_mix=True, no_cuda=False, *args, **kwargs):
super(BertEmbeddings, self).__init__(*args, **kwargs)
self._path = model_path
self._embeddings = None
self._layers = layers
self._pooling_operation = pooling_operation
self._use_scalar_mix = use_scalar_mix
self._no_cuda = no_cuda
def get(self, keys):
if self._embeddings is None:
if self._no_cuda:
import flair
import torch
flair.device = torch.device('cpu')
from .flair_bert import BertEmbeddings
self._embeddings = BertEmbeddings(bert_model_or_path=self._path,
layers=self._layers,
pooling_operation=self._pooling_operation,
use_scalar_mix=self._use_scalar_mix)
sentences = [Sentence(key) for key in keys]
# noinspection PyUnresolvedReferences
self._embeddings.embed(sentences)
for s_idx, sentence in enumerate(sentences):
for t_idx, token in enumerate(sentence):
emb = token.embedding.cpu().numpy()
yield token.text, emb
del token
del sentence
def config(self):
return {'description': self.description()}
def description(self):
layer_str = self._layers
layer_str = layer_str.replace(' ', '')
layer_str = layer_str.replace(',', '_')
return "bert-layers_{}-pooling_{}-scalarmix_{}".format(layer_str, self._pooling_operation, self._use_scalar_mix)
| 2.015625 | 2 |
Arbitrage_Future/Arbitrage_Future/test.py | ronaldzgithub/CryptoArbitrage | 1 | 6676 | <reponame>ronaldzgithub/CryptoArbitrage<filename>Arbitrage_Future/Arbitrage_Future/test.py
# !/usr/local/bin/python
# -*- coding:utf-8 -*-
import YunBi
import CNBTC
import json
import threading
import Queue
import time
import logging
import numpy
import message
import random
open_platform = [True,True,True,True]
numpy.set_printoptions(suppress=True)
# logging.basicConfig(level=logging.DEBUG,
# format="[%(asctime)20s] [%(levelname)8s] %(filename)10s:%(lineno)-5s --- %(message)s",
# datefmt="%Y-%m-%d %H:%M:%S",
# filename="log/%s.log"%time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),
# filemode='w')
# console = logging.StreamHandler()
# console.setLevel(logging.INFO)
# formatter = logging.Formatter("[%(asctime)20s] [%(levelname)8s] %(filename)10s:%(lineno)-5s --- %(message)s", "%Y-%m-%d %H:%M:%S")
# console.setFormatter(formatter)
# logging.getLogger('').addHandler(console)
coin_status = [-1,-1,-1,-1]
money_status = [-1,-1,-1,-1]
history = open("log/historyPrice_%s.txt"%time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time())),"a")
# output = open("journalist.txt",'a')
balance = open("log/balance%s.txt"%time.strftime('%Y_%m_%d %H_%M_%S', time.localtime(time.time())),'a')
ybQue1 = Queue.Queue()
ybQue2 = Queue.Queue()
hbQue1 = Queue.Queue()
hbQue2 = Queue.Queue()
okcQue1 = Queue.Queue()
okcQue2 = Queue.Queue()
cnbtcQue1 = Queue.Queue()
cnbtcQue2 = Queue.Queue()
ybTradeQue1 = Queue.Queue()
ybTradeQue2 = Queue.Queue()
cnbtcTradeQue1 = Queue.Queue()
cnbtcTradeQue2 = Queue.Queue()
hbTradeQue1 = Queue.Queue()
hbTradeQue2 = Queue.Queue()
okcTradeQue1 = Queue.Queue()
okcTradeQue2 = Queue.Queue()
ybAccountQue1 = Queue.Queue()
ybAccountQue2 = Queue.Queue()
cnbtcAccountQue1 = Queue.Queue()
cnbtcAccountQue2 = Queue.Queue()
hbAccountQue1 = Queue.Queue()
hbAccountQue2 = Queue.Queue()
okcAccountQue1 = Queue.Queue()
okcAccountQue2 = Queue.Queue()
alertQue = Queue.Queue()
total_trade_coin = 0
delay_time = 0.2
config = json.load(open("config.json","r"))
#####max coin # in each trade
maxTradeLimitation = float(config["MaxCoinTradeLimitation"])
tel_list = config["tel"]
# maxTradeLimitation_yb_buy_cnbtc_sell = float(config["MaxCoinTradeLimitation_yb_buy_cnbtc_sell"])
# maxTradeLimitation_yb_buy_hb_sell = float(config["MaxCoinTradeLimitation_yb_buy_hb_sell"])
# maxTradeLimitation_yb_sell_hb_buy = float(config["MaxCoinTradeLimitation_yb_sell_hb_buy"])
# maxTradeLimitation_hb_buy_cnbtc_sell = float(config["MaxCoinTradeLimitation_hb_buy_cnbtc_sell"])
# maxTradeLimitation_hb_sell_cnbtc_buy = float(config["MaxCoinTradeLimitation_hb_sell_cnbtc_buy"])
#####max coin # for each account
maxCoin = float(config["MaxCoinLimitation"])
#####if spread over this threshold, we trade
max_thres_limitation = float(config["max_thres_limitation"])
spread_threshold_yb_sell_cnbtc_buy = float(config["spread_threshold_yb_sell_cnbtc_buy"])
spread_threshold_yb_buy_cnbtc_sell = float(config["spread_threshold_yb_buy_cnbtc_sell"])
spread_threshold_yb_buy_hb_sell = float(config["spread_threshold_yb_buy_hb_sell"])
spread_threshold_yb_sell_hb_buy = float(config["spread_threshold_yb_sell_hb_buy"])
spread_threshold_hb_buy_cnbtc_sell = float(config["spread_threshold_hb_buy_cnbtc_sell"])
spread_threshold_hb_sell_cnbtc_buy = float(config["spread_threshold_hb_sell_cnbtc_buy"])
random_range = float(config["RandomRange"])
spread_threshold_yb_sell_okc_buy = float(config["spread_threshold_yb_sell_okc_buy"])
spread_threshold_yb_buy_okc_sell = float(config["spread_threshold_yb_buy_okc_sell"])
spread_threshold_okc_buy_hb_sell = float(config["spread_threshold_okc_buy_hb_sell"])
spread_threshold_okc_sell_hb_buy = float(config["spread_threshold_okc_sell_hb_buy"])
spread_threshold_okc_buy_cnbtc_sell = float(config["spread_threshold_okc_buy_cnbtc_sell"])
spread_threshold_okc_sell_cnbtc_buy = float(config["spread_threshold_okc_sell_cnbtc_buy"])
max_diff_thres = float(config["max_diff_thres"])
#######if coin # is lower than alert thres, it will increase the thres
alert_thres_coin = float(config["alert_thres_coin"])
alert_thres_money = float(config["alert_thres_money"])
thres_coin = float(config["thres_coin"])
thres_money = float(config["thres_money"])
#######max thres increase is slop*alert_thres
slope = float(config["alert_slope"])
# print max_diff_thres,alert_thres,slope
# spread_threshold = float(config["spread_threshold"])
# spread_threshold_minor = float(config["spread_threshold_minor"])
#####if we start a trade, we will accept all trade until spread reach lowest spread threshold, after that, we cancel all trade
lowest_spread_threshold = float(config["lowest_spread_threshold"])
trade_multiplier_ratio = float(config["TradeMultiplyRatio"])
# lowest_spread_threshold_minor = float(config["lowest_spread_threshold_minor"])
#####the trade price is max trade limitation*trade ratio behind the min/max price of ask/bid
trade_ratio = float(config["TradeAdvanceRatio"])
# trade_ratio_minor = float(config["TradeAdvanceRatio_minor"])
#####slippage
slippage = float(config["slippage"])
tmpThres = maxTradeLimitation*trade_ratio
# tmpThres_minor = maxTradeLimitation_minor*trade_ratio
offset_player = int(config["offset_player"])
# offset_player_minor = int(config["offset_player_minor"])
offset_coin = float(config["offset_coin"])
# offset_coin_minor = float(config["offset_coin_minor"])
########return 0 accumulate amount
########return 1 price
########return 2 list
def cnbtcThresCoin(thres,offset_coin,offset_player,list):
acc = 0
for i in range(offset_player,len(list)):
acc += list[i][1]
if acc > thres+offset_coin:
return (thres,list[i][0],list)
return (acc,list[-1][0],list)
def ybThresCoin(thres,offset_coin,offset_player,list):
acc = 0
for i in range(offset_player,len(list)):
acc += float(list[i][1])
if acc > thres+offset_coin:
return (thres,float(list[i][0]),list)
return (acc,float(list[-1][0]),list)
def hbThresCoin(thres,offset_coin,offset_player,list):
acc = 0
for i in range(offset_player,len(list)):
acc += float(list[i][1])
if acc > thres+offset_coin:
return (thres,float(list[i][0]),list)
return (acc,float(list[-1][0]),list)
def okcThresCoin(thres,offset_coin,offset_player,list):
acc = 0
for i in range(offset_player,len(list)):
acc += list[i][1]
if acc > thres+offset_coin:
return (thres,list[i][0],list)
return (acc,list[-1][0],list)
def ybRun():
while True:
yb = ybQue1.get()
if yb == None:
ybQue1.task_done()
break
else:
while True:
depth = yb.getDepth()
if depth:
break
depth["asks"].reverse()
ybQue2.put((ybThresCoin(tmpThres,offset_coin,offset_player,depth["bids"]),depth["timestamp"]))
ybQue2.put((ybThresCoin(tmpThres,offset_coin,offset_player,depth["asks"]),depth["timestamp"]))
ybQue1.task_done()
def okcRun():
while True:
okc = okcQue1.get()
if okc == None:
okcQue1.task_done()
break
else:
while True:
depth = okc.getDepth()
if depth:
break
depth["asks"].reverse()
okcQue2.put((okcThresCoin(tmpThres,offset_coin,offset_player,depth["bids"]),"-99999999"))
okcQue2.put((okcThresCoin(tmpThres,offset_coin,offset_player,depth["asks"]),"-99999999"))
okcQue1.task_done()
def hbRun():
while True:
hb = hbQue1.get()
if hb == None:
hbQue1.task_done()
break
else:
while True:
depth = hb.getDepth()
if depth and depth["status"] == "ok":
break
# depth["tick"]["asks"].reverse()
hbQue2.put((hbThresCoin(tmpThres,offset_coin,offset_player,depth["tick"]["bids"]),depth["ts"]/1000))
hbQue2.put((hbThresCoin(tmpThres,offset_coin,offset_player,depth["tick"]["asks"]),depth["ts"]/1000))
hbQue1.task_done()
def cnbtcRun():
while True:
cnbtc = cnbtcQue1.get()
if cnbtc == None:
cnbtcQue1.task_done()
break
else:
while True:
depth = cnbtc.getDepth()
if depth:
break
depth["asks"].reverse()
cnbtcQue2.put((cnbtcThresCoin(tmpThres,offset_coin,offset_player,depth["bids"]),depth["timestamp"]))
cnbtcQue2.put((cnbtcThresCoin(tmpThres,offset_coin,offset_player,depth["asks"]),depth["timestamp"]))
cnbtcQue1.task_done()
#######tradeque1[0]:obj
#######tradeque1[1]:buy or sell
#######tradeque1[2]:amount
#######tradeque1[3]:price
#######tradeque1[4]:limit_price
def ybTradeRun():
while True:
yb_tuple = ybTradeQue1.get()
money = 0
if yb_tuple == None:
ybTradeQue1.task_done()
break
yb = yb_tuple[0]
amount = yb_tuple[2]
remain = amount
price = yb_tuple[3]
if amount==0:
ybTradeQue2.put((0.0,0.0))
ybTradeQue1.task_done()
continue
sell = True
if yb_tuple[1] == "buy":
sell = False
times = 10
while True:
order = None
if sell:
order = yb.sell(volume = amount,price=price-slippage)
else:
order = yb.buy(volume = amount, price = price + slippage)
if order!= None:
if order.has_key("error"):
time.sleep(delay_time)
print "yb",order
continue
id = order["id"]
wait_times = 3
while wait_times>0:
wait_times-=1
time.sleep(1)
while True:
order = yb.getOrder(id)
if order!=None:
if order.has_key("error"):
time.sleep(delay_time)
print "yb",order
continue
break
print "yb",order
if order["state"] == "done":
break
if order["state"] == "done":
if sell:
print "yunbi remain sell %f"%0.0
money+=amount*(price-slippage)
ybTradeQue2.put((0.0,money))
break
else:
print "yunbi remain buy 0.0"
money-=amount*(price+slippage)
ybTradeQue2.put((0.0,money))
break
else:
# order["state"] == "wait":
while True:
order = yb.deleteOrder(id)
print "yb",order
if order!=None:
if order.has_key("error"):
print "yb,delete",order
time.sleep(delay_time)
continue
break
while True:
order = yb.getOrder(id)
print "yb",order
if order!=None:
if order.has_key("error"):
time.sleep(delay_time)
print "yb",order
continue
if order["state"] != "wait":
break
else:
time.sleep(delay_time)
# break
#todo judge whether has been deleted
if sell:
money+=float(order["executed_volume"])*(price-slippage)
remain = float(order["remaining_volume"])
print "yunbi remain sell %f"%float(order["remaining_volume"])
else:
money-=float(order["executed_volume"])*(price+slippage)
remain = float(order["remaining_volume"])
print "yunbi remain buy %f"%float(order["remaining_volume"])
if remain <=0:
ybTradeQue2.put((0.0,money))
break
print "get_price"
while True:
depth = yb.getDepth()
if depth:
depth["asks"].reverse()
break
if sell:
price_now = ybThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["bids"])[1]
print "price_now yb",price_now,yb_tuple[4]
if price_now<yb_tuple[4]:
ybTradeQue2.put((remain,money))
break
else:
price_now = ybThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["asks"])[1]
print "price_now yb",price_now
if price_now>yb_tuple[4]:
ybTradeQue2.put((remain,money))
break
price = price_now
amount = remain
times-=1
ybTradeQue1.task_done()
def okcTradeRun():
while True:
okc_tuple = okcTradeQue1.get()
money = 0
if okc_tuple == None:
okcTradeQue1.task_done()
break
okc = okc_tuple[0]
amount = okc_tuple[2]
remain = amount
price = okc_tuple[3]
if amount==0:
okcTradeQue2.put((0.0,0.0))
okcTradeQue1.task_done()
continue
sell = True
if okc_tuple[1] == "buy":
sell = False
times = 10
while True:
order = None
if sell:
order = okc.sell(volume = amount,price=price-slippage)
else:
order = okc.buy(volume = amount, price = price+slippage)
if order!= None:
if order["result"] != True:
print "okc",order
time.sleep(delay_time)
continue
id = order["order_id"]
wait_times = 3
while wait_times>0:
wait_times-=1
time.sleep(1)
while True:
order = okc.getOrder(id)
if order!=None:
if order["result"] != True:
time.sleep(delay_time)
print "okc",order
continue
break
print "okc",order
if order["orders"][0]["status"] == 2:
break
if order["orders"][0]["status"] == 2:
if sell:
print "okcoin remain sell %f"%0.0
money+=amount*(price-slippage)
okcTradeQue2.put((0.0,money))
break
else:
print "okcoin remain buy 0.0"
money-=amount*(price+slippage)
okcTradeQue2.put((0.0,money))
break
else:
# order["state"] == "wait":
while True:
order = okc.deleteOrder(id)
if order!=None:
if order["result"] != True:
time.sleep(delay_time)
print "okc",order
if order["error_code"]==10050:
break
continue
break
while True:
order = okc.getOrder(id)
if order!=None:
if order["result"] != True:
time.sleep(delay_time)
print "okc",order
continue
if order["orders"][0]["status"] == 2 or order["orders"][0]["status"]== -1:
break
else:
time.sleep(delay_time)
#todo judge whether has been deleted
if sell:
money+=float(order["orders"][0]["deal_amount"])*(price-slippage)
remain = float(order["orders"][0]["amount"]) - float(order["orders"][0]["deal_amount"])
print "okcoin remain sell %f"%remain
else:
money-=float(order["orders"][0]["deal_amount"])*(price+slippage)
remain = float(order["orders"][0]["amount"])-float(order["orders"][0]["deal_amount"])
print "okcoin remain buy %f"%remain
if remain<=0:
okcTradeQue2.put((0.0,money))
break
print "get_price"
while True:
depth = okc.getDepth()
if depth:
depth["asks"].reverse()
break
if sell:
price_now = okcThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["bids"])[1]
print "price_now okc",price_now,okc_tuple[4]
if price_now<okc_tuple[4]:
okcTradeQue2.put((remain,money))
break
else:
price_now = okcThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["asks"])[1]
print "price_now okc",price_now
if price_now>okc_tuple[4]:
okcTradeQue2.put((remain,money))
break
price = price_now
amount = remain
times-=1
okcTradeQue1.task_done()
def hbTradeRun():
while True:
hb_tuple = hbTradeQue1.get()
money = 0
if hb_tuple == None:
hbTradeQue1.task_done()
break
hb = hb_tuple[0]
amount = hb_tuple[2]
remain = amount
price = hb_tuple[3]
if amount==0:
hbTradeQue2.put((0.0,0.0))
hbTradeQue1.task_done()
continue
sell = True
if hb_tuple[1] == "buy":
sell = False
times = 10
while True:
order = None
if sell:
order = hb.sell(volume = amount,price=price-slippage)
#todo
if order!=None and order["status"] == "ok":
order = hb.place_order(order["data"])
else:
#todo
order = hb.buy(volume = amount, price = price + slippage)
if order!=None and order["status"] == "ok":
order = hb.place_order(order["data"])
if order!= None:
if order["status"]!="ok":
print "hb",order
time.sleep(delay_time)
continue
id = order["data"]
wait_times = 3
while wait_times>0:
wait_times-=1
time.sleep(1)
while True:
order = hb.getOrder(id)
if order!=None:
if order["status"]!="ok":
time.sleep(delay_time)
print "hb",order
continue
break
print "hb",order
if order["data"]["state"] == "filled":
break
#todo
if order["data"]["state"] == "filled":
if sell:
print "huobi remain sell %f"%0.0
money+=amount*(price-slippage)
hbTradeQue2.put((0.0,money))
break
else:
print "huobi remain buy 0.0"
money-=amount*(price+slippage)
hbTradeQue2.put((0.0,money))
break
else:
# order["state"] == "wait":
while True:
print id
order = hb.deleteOrder(id)
if order!=None:
if order["status"]!="ok":
if order['status'] == 'error' and order['err-code'] == 'order-orderstate-error':
break
print "hb",order
continue
break
while True:
order = hb.getOrder(id)
if order!=None:
if order["status"]!="ok":
time.sleep(delay_time)
print "hb",order
continue
print "hb",order
if order["data"]["state"] == "canceled" or order["data"]["state"] == "filled" or order["data"]["state"] == "partial-canceled" or order["data"]["state"] == "partial-filled":
break
else:
time.sleep(delay_time)
#todo judge whether has been deleted
if sell:
money+=float(order["data"]["field-amount"])*(price-slippage)
remain = float(order["data"]["amount"])-float(order["data"]["field-amount"])
print "huobi remain sell %f"%remain
else:
money-=float(order["data"]["field-amount"])*(price+slippage)
remain = float(order["data"]["amount"])-float(order["data"]["field-amount"])
print "huobi remain buy %f"%remain
if remain<=0:
hbTradeQue2.put((0.0,money))
break
print "get_price"
while True:
depth = hb.getDepth()
if depth:
break
if sell:
price_now = hbThresCoin(remain*trade_ratio,offset_coin,offset_player,depth['tick']["bids"])[1]
print "price_now hb",price_now,hb_tuple[4]
if price_now<hb_tuple[4]:
hbTradeQue2.put((remain,money))
break
else:
price_now = hbThresCoin(remain*trade_ratio,offset_coin,offset_player,depth['tick']["asks"])[1]
print "price_now hb",price_now
if price_now>hb_tuple[4]:
hbTradeQue2.put((remain,money))
break
price = price_now
amount = remain
times-=1
hbTradeQue1.task_done()
def cnbtcTradeRun():
while True:
cnbtc_tuple = cnbtcTradeQue1.get()
if cnbtc_tuple == None:
cnbtcTradeQue1.task_done()
break
# print cnbtc_tuple
money = 0;
cnbtc = cnbtc_tuple[0]
amount = cnbtc_tuple[2]
remain = amount
price = cnbtc_tuple[3]
if amount==0:
cnbtcTradeQue2.put((0.0,0.0))
cnbtcTradeQue1.task_done()
continue
buy = True
if cnbtc_tuple[1] == "sell":
buy = False
times = 10
while True:
if buy:
order = cnbtc.buy(volume = amount,price=price+slippage)
else:
order = cnbtc.sell(volume=amount,price=price-slippage)
if order!= None:
if order.has_key("code") and order["code"] != 1000:
time.sleep(delay_time)
print "cnbtc",order
continue
id = order["id"]
wait_times = 5
while wait_times>0:
wait_times-=1
time.sleep(1)
while True:
order = cnbtc.getOrder(id)
if order!=None:
break
print "cnbtc",order
####2 is done
####
if order["status"] == 2:
break
if order["status"] == 2:
if buy:
print "cnbtc remain buy ",0.0
money-=amount*(price+slippage)
cnbtcTradeQue2.put((0.0,money))
else:
print "cnbtc remain sell 0.0"
money+=amount*(price-slippage)
cnbtcTradeQue2.put((0.0,money))
break
elif order["status"] == 0 or order["status"] == 3:
while True:
order = cnbtc.deleteOrder(id)
if order!=None:
if order.has_key("code") and order["code"] != 1000:
print json.dumps(order,ensure_ascii=False)
if order["code"] == 3001:
break
time.sleep(delay_time)
continue
break
while True:
order = cnbtc.getOrder(id)
if order!=None:
# print order
if order.has_key("code") and order["code"] != 1000:
print "cnbtc",order
time.sleep(delay_time)
continue
#todo judge whether is deleted
if order["status"]==1 or order["status"] == 2:
break
else:
time.sleep(delay_time)
print "cnbtc",order
if buy:
money-=float(order["trade_amount"])*(price+slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain buy %f/%f"%(remain,float(order["total_amount"]))
else:
money+=float(order["trade_amount"])*(price-slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain sell %f/%f"%(remain,float(order["total_amount"]))
if remain<=0:
cnbtcTradeQue2.put((0.0,money))
break
else:
if buy:
money-=float(order["trade_amount"])*(price+slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain buy %f/%f"%(remain,float(order["total_amount"]))
else:
money+=float(order["trade_amount"])*(price-slippage)
remain = float(order["total_amount"]) - float(order["trade_amount"])
print "cnbtc remain sell %f/%f"%(remain,float(order["total_amount"]))
if remain<=0:
cnbtcTradeQue2.put((0.0,money))
break
print "get_depth"
while True:
depth = cnbtc.getDepth()
depth["asks"].reverse()
if depth:
break
if buy:
price_now = cnbtcThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["asks"])[1]
print "prince_now cnbtc",price_now
if price_now>cnbtc_tuple[4]:
cnbtcTradeQue2.put((remain,money))
break
else:
price_now = cnbtcThresCoin(remain*trade_ratio,offset_coin,offset_player,depth["bids"])[1]
print "prince_now cnbtc",price_now
if price_now<cnbtc_tuple[4]:
cnbtcTradeQue2.put((remain,money))
break
price = price_now
amount = remain
times-=1
cnbtcTradeQue1.task_done()
def ybAccountRun():
while True:
yb = ybAccountQue1.get()
yb_cny = 0
yb_eth = 0
while True:
yb_acc = yb.get_account()
if yb_acc!= None:
if yb_acc.has_key("error"):
time.sleep(delay_time)
print yb_acc
continue
break
for acc in yb_acc["accounts"]:
if acc["currency"] == "cny":
yb_cny=float(acc["balance"])
elif acc["currency"] == "eth":
yb_eth= float(acc["balance"])
ybAccountQue1.task_done()
ybAccountQue2.put((yb_cny,yb_eth))
def cnbtcAccountRun():
while True:
cnbtc = cnbtcAccountQue1.get()
cnbtc_cny = 0
cnbtc_eth = 0
while True:
cnbtc_acc = cnbtc.get_account()
if cnbtc_acc!= None:
if cnbtc_acc.has_key("code") and cnbtc_acc["code"] != 1000:
time.sleep(delay_time)
print cnbtc_acc
continue
break
cnbtc_eth=cnbtc_acc["result"]["balance"]["ETH"]["amount"]
cnbtc_cny+=cnbtc_acc["result"]["balance"]["CNY"]["amount"]
cnbtcAccountQue1.task_done()
cnbtcAccountQue2.put((cnbtc_cny,cnbtc_eth))
def okcAccountRun():
while True:
time.sleep(delay_time)
okc = okcAccountQue1.get()
okc_cny = 0
okc_eth = 0
while True:
okc_acc = okc.get_account()
if okc_acc!= None:
if okc_acc["result"]!=True:
time.sleep(delay_time)
print "okc",okc_acc
continue
break
okc_eth = float(okc_acc["info"]["funds"]["free"]["eth"])
okc_cny = float(okc_acc["info"]["funds"]["free"]["cny"])
# print okc_acc
okcAccountQue1.task_done()
okcAccountQue2.put((okc_cny,okc_eth))
def hbAccountRun():
while True:
hb = hbAccountQue1.get()
hb_cny = 0
hb_eth = 0
while True:
hb_acc = hb.get_account()
if hb_acc!= None:
if hb_acc["status"]!="ok":
print hb_acc
continue
break
for mon in hb_acc["data"]["list"]:
if mon["currency"]=="cny" and mon["type"] == "trade":
hb_cny = float(mon["balance"])
if mon["currency"] == "eth" and mon["type"] == "trade":
hb_eth = float(mon["balance"])
hbAccountQue1.task_done()
hbAccountQue2.put((hb_cny,hb_eth))
import sys
import numpy.matlib
def setThreshold(cny_list,eth_list,brokerage_fee,cash_fee,thres_list_now,thres_list_origin,number,price,tick_coin,name_list):
trade_multiplier = numpy.ones([number,number])
thres_list = thres_list_origin.copy()
sell_times = eth_list/tick_coin
buy_times = cny_list/price/tick_coin
trade_broker = numpy.add.outer(brokerage_fee,brokerage_fee)*price*1.1
trade_cash = numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*price*1.05
length = cny_list.shape[0]
print "buy_times",buy_times
print "sell_times",sell_times
tmp = buy_times.copy()
tmp[tmp>thres_money] = thres_money
tmp = (-tmp+thres_money)*slope
tmp[tmp>max_thres_limitation] = max_thres_limitation
offset = numpy.matlib.repmat(tmp,length,1)
tmp = buy_times.copy()
tmp[tmp>thres_money] = thres_money
tmp = (-tmp+thres_money)*5/thres_money
tmp[tmp>1] = 1
max_diff_thres_tmp = max(0,max_diff_thres)
tmp_mul = numpy.matlib.repmat(tmp.reshape(length,1),1,length)
trade_multiplier+=tmp_mul*trade_multiplier_ratio
tmp = numpy.matlib.repmat(tmp.reshape(length,1),1,length)
# print 123
offset_cash = -numpy.multiply(tmp,numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*price*1.05)
# print tmp
# tmp = numpy.matlib.repmat(tmp.reshape(length,1),1,length)
# print tmp
tmp = sell_times.copy()
tmp[tmp>thres_coin] = thres_coin
tmp = (-tmp+thres_coin)*slope
tmp[tmp>max_thres_limitation] = max_thres_limitation
offset += numpy.matlib.repmat(tmp.reshape(length,1),1,length)
tmp = sell_times.copy()
tmp[tmp>thres_coin] = thres_coin
tmp = (-tmp+thres_coin)*5/thres_coin
tmp[tmp>1] = 1
tmp_mul = numpy.matlib.repmat(tmp,length,1)
trade_multiplier+=tmp_mul*trade_multiplier_ratio
tmp = numpy.matlib.repmat(tmp,length,1)
# print 123
offset_cash -= numpy.multiply(tmp,numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*price*1.05)
# print offset
# buy_times<100
alertQue.put((buy_times,sell_times,number))
# offset[offset<max_diff_thres_tmp] = max_diff_thres_tmp
offset[offset>max_thres_limitation] = max_thres_limitation
print offset
# print offset
# print trade_broker,trade_cash,offset_cash
thres_list = trade_broker+trade_cash+offset_cash+max_diff_thres_tmp+offset+thres_list_origin
# print thres_list
thres_list[:,buy_times<=8] = 999999
thres_list[sell_times<=8,:] = 999999
buy_tmp = (thres_money-buy_times.copy())*slope
buy_tmp[buy_tmp<0] = 0
buy_tmp[buy_tmp>max_diff_thres_tmp] = max_diff_thres_tmp
buy_tmp_n_n = numpy.matlib.repmat(buy_tmp.reshape(length, 1), 1, length)
sell_tmp = (thres_coin-sell_times.copy())*slope
sell_tmp[sell_tmp<0] = 0
sell_tmp[sell_tmp>max_diff_thres_tmp] = max_diff_thres_tmp
sell_tmp_n_n = numpy.matlib.repmat(sell_tmp,length,1)
tmp_n_n = numpy.maximum(sell_tmp_n_n,buy_tmp_n_n)
# print thres_list
# print tmp_n_n
thres_list -= tmp_n_n
# thres_list -= sell_tmp
numpy.fill_diagonal(thres_list,999999)
numpy.fill_diagonal(trade_multiplier,0)
trade_multiplier[trade_multiplier>2] = 2
# print trade_multiplier
# print thres_list
# thres_list = numpy.maximum.reduce([thres_list,(trade_broker+trade_cash)])
# print buy_times<=1
# print thres_list
# result = thres_list_origin.copy()
# result[:number,:number] = thres_list
# thres_list[2,0] = 0
# thres_list[2,1] = 0
# thres_list[1,2] = 0
# thres_list[0,2] = 0
# print thres_list
return thres_list,trade_multiplier
def alert():
while True:
alertTuple = alertQue.get()
buy_times = alertTuple[0]
sell_times = alertTuple[1]
number = alertTuple[2]
for i in range(number):
if open_platform[i]:
if buy_times[i] <= 8:
if money_status[i] == 0 or money_status[i] == 1:
for tel in tel_list:
res = message.send_sms("提醒:%s的账户完全没钱了" % name_list[i], tel)
print res
money_status[i] = 2
print >> sys.stderr, "%s has no money!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
elif buy_times[i] < alert_thres_money:
if money_status[i] == 0:
for tel in tel_list:
message.send_sms("提醒:%s快没钱了,只能买%f次了" % (name_list[i],buy_times[i]), tel)
money_status[i] = 1
print >> sys.stderr, "%s is low money!!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
else:
money_status[i] = 0
if sell_times[i] <= 8:
if coin_status[i] == 0 or coin_status[i] == 1:
for tel in tel_list:
message.send_sms("提醒:%s的账户完全没币了" % name_list[i], tel)
coin_status[i] = 2
print >> sys.stderr, "%s has no coin!!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
elif sell_times[i] < alert_thres_coin:
if coin_status[i] == 0:
for tel in tel_list:
message.send_sms("提醒:%s快没币了,只能卖%f次了" % (name_list[i],sell_times[i]), tel)
coin_status[i] = 1
print >> sys.stderr, "%s is low coin!!!!!!!!!!!!!!!!!!!!!!" % name_list[i]
else:
coin_status[i] = 0
alertQue.task_done()
import HuoBi
import OKCoin
open_okc = open_platform[3]
open_yb = open_platform[1]
open_cnbtc = open_platform[0]
open_hb = open_platform[2]
if open_yb:
yb = YunBi.Yunbi(config,"LiChen")
print yb.get_account()
else:
yb = None
# import gzip
# from StringIO import StringIO
#
# buf = StringIO(acc["name"])
# f = gzip.GzipFile(fileobj=buf)
# print f.read()
# sss = acc["name"].encode("raw_unicode_escape").decode()
# print ss
# logging.info("YB Account "+json.dumps(yb.get_account(),ensure_ascii=False))
if open_cnbtc:
cnbtc = CNBTC.CNBTC(config)
print("cnbtc Account "+str(cnbtc.get_account()))
else:
cnbtc = None
if open_hb:
hb = HuoBi.HuoBi(config)
print("HB Account "+str(hb.get_account()))
else:
hb = None
if open_okc:
okc = OKCoin.OKCoin(config)
print("OKCoin Account "+str(okc.get_account()))
okc_thread = threading.Thread(target=okcRun)
okc_thread.setDaemon(True)
okc_thread.start()
else:
okc = None
if open_yb:
yb_thread = threading.Thread(target=ybRun)
yb_thread.setDaemon(True)
yb_thread.start()
if open_cnbtc:
cnbtc_thread = threading.Thread(target=cnbtcRun)
cnbtc_thread.setDaemon(True)
cnbtc_thread.start()
if open_hb:
hb_thread = threading.Thread(target=hbRun)
hb_thread.setDaemon(True)
hb_thread.start()
if open_okc:
okc_trade_thread = threading.Thread(target=okcTradeRun)
okc_trade_thread.setDaemon(True)
okc_trade_thread.start()
if open_yb:
yb_trade_thread = threading.Thread(target=ybTradeRun)
yb_trade_thread.setDaemon(True)
yb_trade_thread.start()
if open_cnbtc:
cnbtc_trade_thread = threading.Thread(target = cnbtcTradeRun)
cnbtc_trade_thread.setDaemon(True)
cnbtc_trade_thread.start()
if open_hb:
hb_trade_thread = threading.Thread(target=hbTradeRun)
hb_trade_thread.setDaemon(True)
hb_trade_thread.start()
if open_okc:
okc_account_thread = threading.Thread(target=okcAccountRun)
okc_account_thread.setDaemon(True)
okc_account_thread.start()
if open_yb:
yb_account_thread = threading.Thread(target=ybAccountRun)
yb_account_thread.setDaemon(True)
yb_account_thread.start()
if open_cnbtc:
cnbtc_account_thread = threading.Thread(target = cnbtcAccountRun)
cnbtc_account_thread.setDaemon(True)
cnbtc_account_thread.start()
if open_hb:
hb_account_thread = threading.Thread(target=hbAccountRun)
hb_account_thread.setDaemon(True)
hb_account_thread.start()
alertThread = threading.Thread(target=alert)
alertThread.setDaemon(True)
alertThread.start()
total_coin = 0
total_money = 0
tick = 0
last_total_eth = 0
last_total_cny = 0
first_total_eth = 0
first_total_cny = 0
first = True
platform_number = 4
name_list = ["CNBTC","YunBi","HuoBi","OKCoin"]
obj_list = [cnbtc,yb,hb,okc]
que1_list = [cnbtcQue1,ybQue1,hbQue1,okcQue1]
que2_list = [cnbtcQue2,ybQue2,hbQue2,okcQue2]
trade_que1_list = [cnbtcTradeQue1,ybTradeQue1,hbTradeQue1,okcTradeQue1]
trade_que2_list = [cnbtcTradeQue2,ybTradeQue2,hbTradeQue2,okcTradeQue2]
thres_list = numpy.array([[999999,spread_threshold_yb_buy_cnbtc_sell,spread_threshold_hb_buy_cnbtc_sell,spread_threshold_okc_buy_cnbtc_sell],
[spread_threshold_yb_sell_cnbtc_buy,999999,spread_threshold_yb_sell_hb_buy,spread_threshold_yb_sell_okc_buy],
[spread_threshold_hb_sell_cnbtc_buy,spread_threshold_yb_buy_hb_sell,9999999,spread_threshold_okc_buy_hb_sell],
[spread_threshold_okc_sell_cnbtc_buy,spread_threshold_yb_buy_okc_sell,spread_threshold_okc_sell_hb_buy,999999]])
thres_list_origin = thres_list.copy()
has_ts = [True,True,True,False]
platform_list = []
for i in range(platform_number):
platform_list.append(
{
"name":name_list[i],
"obj":obj_list[i],
"que1":que1_list[i],
"que2":que2_list[i],
"trade_que1":trade_que1_list[i],
"trade_que2":trade_que2_list[i],
"depth_buy":None,
"depth_sell":None,
"has_ts":has_ts[i]
}
)
brokerage_fee = numpy.asarray([0.0004,0.001,0.002,0.001])
cash_fee = numpy.asarray([0.001,0.001,0.002,0.002])
while True:
print 'tick',tick
for platform in platform_list:
if platform["obj"]!=None:
platform["que1"].put(platform["obj"])
if open_yb:
ybAccountQue1.put(yb)
if open_okc:
okcAccountQue1.put(okc)
if open_cnbtc:
cnbtcAccountQue1.put(cnbtc)
if open_hb:
hbAccountQue1.put(hb)
for platform in platform_list:
if platform["obj"]!=None:
platform["depth_sell"] = platform["que2"].get()
platform["depth_buy"] = platform["que2"].get()
###depth[0] is amount
###depth[1] is price
###depth[2] is list platform_list["depth_buy"] = platform["que2"].get()
max_diff = -1000
trade_info = dict()
average_price = 0
open_num = 0
for i in range(platform_number):
if platform_list[i]["obj"]!=None:
open_num+=1
average_price+=platform_list[i]["depth_buy"][0][1]+platform_list[i]["depth_sell"][0][1]
average_price /= open_num*2.0/1.01
print 'average_price %f'%average_price
brokerage_trade = numpy.add.outer(brokerage_fee,brokerage_fee)*average_price
cash_trade = numpy.add.outer(cash_fee,numpy.zeros(cash_fee.shape[0]))*average_price
tick+=1
if tick % 1 == 0:
total_cny = 0
total_eth = 0
yb_cny = 0
yb_eth = 0
cnbtc_cny = 0
cnbtc_eth = 0
hb_cny = 0
hb_eth = 0
okc_cny = 0
okc_eth = 0
if open_yb:
yb_cny,yb_eth = ybAccountQue2.get()
print "yb_balance:%f %f"%(yb_eth,yb_cny)
if open_okc:
okc_cny,okc_eth = okcAccountQue2.get()
print "okc_balance:%f %f"%(okc_eth,okc_cny)
if open_hb:
hb_cny,hb_eth = hbAccountQue2.get()
print "hb balance:%f %f"%(hb_eth,hb_cny)
if open_cnbtc:
cnbtc_cny,cnbtc_eth = cnbtcAccountQue2.get()
print "cnbtc balance:%f %f"%(cnbtc_eth,cnbtc_cny)
total_cny = yb_cny+hb_cny+cnbtc_cny+okc_cny
total_eth = yb_eth+hb_eth+cnbtc_eth+okc_eth
balance.write("%s %f %f %f %f %f %f %f %f %f %f\n"%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),
cnbtc_eth,cnbtc_cny,yb_eth,yb_cny,hb_eth,hb_cny,okc_eth,okc_cny,total_eth,total_cny))
history.write("%s "%time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
for i in range(platform_number):
if platform_list[i]["obj"]!=None:
history.write("%f %f "%(platform_list[i]["depth_buy"][0][1],platform_list[i]["depth_sell"][0][1]))
else:
history.write('0 0 ')
history.write('\n')
cny_list = numpy.asarray([cnbtc_cny,yb_cny,hb_cny,okc_cny])
eth_list = numpy.asarray([cnbtc_eth,yb_eth,hb_eth,okc_eth])
last_total_eth = total_eth
last_total_cny = total_cny
if first:
first_total_cny = total_cny
first_total_eth = total_eth
first = False
# history.write("%s %f %f %f %f %f %f\n" % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
# yb_depth[0][1], cnbtc_depth[0][1], yb_depth[0][1] - cnbtc_depth[0][1],
# yb_depth_minor[0][1], cnbtc_depth_minor[0][1],
# cnbtc_depth_minor[0][1] - yb_depth_minor[0][1]))
balance.flush()
history.flush()
if tick%1 == 0:
thres_list,trade_multiplier = setThreshold(cny_list,eth_list,brokerage_fee,cash_fee,thres_list,thres_list_origin,platform_number,average_price,maxTradeLimitation,name_list)
# print thres_list
i1 = None
j1 = None
for i in range(platform_number):
for j in range(platform_number):
if i!=j and platform_list[i]["obj"]!=None and platform_list[j]["obj"]!=None:
# if platform_list[i]["has_ts"] and platform_list[j]["has_ts"]:
# print i,j,int(platform_list[i]["depth_sell"][1]),int(platform_list[j]["depth_buy"][1])
# if (int(platform_list[i]["depth_sell"][1])-int(platform_list[j]["depth_buy"][1]))>5:
# continue
# print platform_list[i],platform_list[j]
if platform_list[i]["depth_sell"][0][1] - platform_list[j]["depth_buy"][0][1]>thres_list[i,j] and platform_list[i]["depth_sell"][0][1] - platform_list[j]["depth_buy"][0][1]-thres_list[i,j]>max_diff:
max_diff = platform_list[i]["depth_sell"][0][1]-platform_list[j]["depth_buy"][0][1]-thres_list[i,j]
trade_info["sell_depth"] = platform_list[i]["depth_sell"]
trade_info["buy_depth"] = platform_list[j]["depth_buy"]
trade_info["sell_name"] = platform_list[i]["name"]
trade_info["buy_name"] = platform_list[j]["name"]
trade_info["sell_que1"] = platform_list[i]["trade_que1"]
trade_info["sell_que2"] = platform_list[i]["trade_que2"]
trade_info["buy_que1"] = platform_list[j]["trade_que1"]
trade_info["buy_que2"] = platform_list[j]["trade_que2"]
trade_info["sell_obj"] = platform_list[i]["obj"]
trade_info["buy_obj"]=platform_list[j]["obj"]
i1 = i
j1 = j
if max_diff>0:
print "max_diff %f"%max_diff
buy_depth = trade_info["buy_depth"]
sell_depth = trade_info["sell_depth"]
# print("BuySide:%s timestamp:%s amount:\t%f price:\t%f"%(trade_info["buy_name"],buy_depth[1],buy_depth[0][0],buy_depth[0][1],str(buy_depth[0][2])))
# print('SellSide:%s timestamp:%s amount:\t%f price:\t%f'%(trade_info["sell_name"],sell_depth[1],sell_depth[0][0],sell_depth[0][1],str(sell_depth[0][2])))
# print 'BuySide:%s timestamp:%s amount:\t%f price:\t%f asks:%s'%(trade_info["buy_name"],buy_depth[1],buy_depth[0][0],buy_depth[0][1],str(buy_depth[0][2]))
# print 'SellSide:%s timestamp:%s amount:\t%f price:\t%f bids:%s'%(trade_info["sell_name"],sell_depth[1],sell_depth[0][0],sell_depth[0][1],str(sell_depth[0][2]))
amount = int(min(buy_depth[0][0],sell_depth[0][0])*1.0/trade_ratio*trade_multiplier[i1,j1]*100)/100.0
amount +=int((random.random()-0.5)*2*(random_range+0.01)*100)/100.0
if amount<0:
amount = 0
amount_buy=amount
amount_sell=amount_buy
limit = (buy_depth[0][1]+sell_depth[0][1])*1.0/2.0
if total_coin>0.0001:
amount_buy = max(amount_buy-total_coin,0)
elif total_coin<-0.0001:
amount_sell = max(amount_sell+total_coin,0)
print "%s buy %f coins at %f and limit %f" %(trade_info["buy_name"],amount_buy,buy_depth[0][1],limit-lowest_spread_threshold/2.0)
trade_info["buy_que1"].put((trade_info["buy_obj"],"buy",amount_buy,buy_depth[0][1],limit-lowest_spread_threshold/2.0))
print "%s sell %f coins at %f and limit %f" %(trade_info["sell_name"],amount_sell,sell_depth[0][1],limit+lowest_spread_threshold/2.0)
trade_info["sell_que1"].put((trade_info["sell_obj"],"sell",amount_sell,sell_depth[0][1],limit+lowest_spread_threshold/2.0))
sell_remain = trade_info["sell_que2"].get()
buy_remain = trade_info["buy_que2"].get()
# output.write('%f, %f, %f, %f\n'%(sell_remain[0]-amount_sell,amount_buy-buy_remain[0],buy_remain[1],sell_remain[1]))
# output.flush()
total_coin+=sell_remain[0]-amount_sell-buy_remain[0]+amount_buy
total_money+=sell_remain[1]+buy_remain[1]
print "%s_remain:%f\t %s_remain:%f,total_remain:%f"%(trade_info["buy_name"],buy_remain[0],trade_info["sell_name"],sell_remain[0],maxCoin)
print"coin:%f,money:%f"%(total_coin,total_money)
maxCoin-=max(sell_remain[0],buy_remain[0])
# if maxCoin<0:
# hbQue1.put(None)
# cnbtcQue1.put(None)
# hbTradeQue1.put(None)
# cnbtcTradeQue1.put(None)
# break
else:
# average_price = 0
for i in range(platform_number):
for j in range(platform_number):
if i!=j and platform_list[i]["obj"]!=None and platform_list[j]["obj"]!=None:
print "no trade %s sell:%f %s buy:%f diff:%15f thres:%20f diff_brokerage:%20f"%(platform_list[i]["name"],platform_list[i]["depth_sell"][0][1],platform_list[j]["name"],platform_list[j]["depth_buy"][0][1],
platform_list[i]["depth_sell"][0][1]-platform_list[j]["depth_buy"][0][1],thres_list[i,j],platform_list[i]["depth_sell"][0][1]-platform_list[j]["depth_buy"][0][1]-thres_list[i,j])
# average_price+=platform_list[i]["depth_buy"][0][1]+platform_list[i]["depth_sell"][0][1]
# average_price/=2.0*platform_number
print average_price
# print "no trade yb sell:%f cnbtc buy:%f diff:%f"%(yb_depth_sell[0][1],cnbtc_depth_buy[0][1],yb_depth_sell[0][1]-cnbtc_depth_buy[0][1])
# print "no trade hb sell:%f cnbtc buy:%f diff:%f"%(hb_depth_sell[0][1],cnbtc_depth_buy[0][1],hb_depth_sell[0][1]-cnbtc_depth_buy[0][1])
# print "no trade yb buy:%f cnbtc sell:%f diff:%f"%(yb_depth_buy[0][1],cnbtc_depth_sell[0][1],cnbtc_depth_sell[0][1]-yb_depth_buy[0][1])
# print "no trade hb buy:%f cnbtc sell:%f diff:%f"%(hb_depth_buy[0][1],cnbtc_depth_sell[0][1],cnbtc_depth_sell[0][1]-hb_depth_buy[0][1])
# print "no trade yb buy:%f hb sell:%f diff:%f"%(yb_depth_buy[0][1],hb_depth_sell[0][1],hb_depth_sell[0][1]-yb_depth_buy[0][1])
# print "no trade hb buy:%f yb sell:%f diff:%f"%(hb_depth_buy[0][1],yb_depth_sell[0][1],yb_depth_sell[0][1]-hb_depth_buy[0][1])
print "balance %f %f diff: %f %f %f first:%f %f"%(total_eth,total_cny, total_eth - last_total_eth,total_cny - last_total_cny,(total_eth - last_total_eth)*2000.0,
total_eth - first_total_eth,total_cny - first_total_cny)
print '\n'
#
# if hb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>spread_threshold_hb_sell_cnbtc_buy and abs(int(cnbtc_depth_buy[1])-int(hb_depth_sell[1])<=3) and hb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>max_diff:
# if cnbtc_depth_sell[0][1]-hb_depth_buy[0][1]>spread_threshold_hb_buy_cnbtc_sell and abs(int(hb_depth_buy[1])-int(cnbtc_depth_sell[1])<=3) and cnbtc_depth_sell[0][1]-hb_depth_buy[0][1]>max_diff:
# max_diff = cnbtc_depth_sell[0][1]-hb_depth_buy[0][1]
# trade_info["sell_depth"] = cnbtc_depth_sell
# trade_info["buy_depth"] = hb_depth_buy
# trade_info["sell_name"] = "CNBTC"
# trade_info["buy_name"] = "HuoBi"
# trade_info["sell_que1"] = cnbtcTradeQue1
# trade_info["sell_que2"] = cnbtcTradeQue2
# trade_info["buy_que1"] = hbTradeQue1
# trade_info["buy_que2"] = hbTradeQue2
# trade_info["buy_obj"] = hb
# trade_info["sell_obj"]=cnbtc
# if hb_depth_sell[0][1]-yb_depth_buy[0][1]>spread_threshold_yb_buy_hb_sell and abs(int(yb_depth_buy[1])-int(hb_depth_sell[1])<=3) and hb_depth_sell[0][1]-yb_depth_buy[0][1]>max_diff:
# max_diff = hb_depth_sell[0][1]-yb_depth_buy[0][1]
# trade_info["sell_depth"] = hb_depth_sell
# trade_info["buy_depth"] = yb_depth_buy
# trade_info["sell_name"] = "HuoBi"
# trade_info["buy_name"] = "YunBi"
# trade_info["sell_que1"] = hbTradeQue1
# trade_info["sell_que2"] = hbTradeQue2
# trade_info["buy_que1"] = ybTradeQue1
# trade_info["buy_que2"] = ybTradeQue2
# trade_info["sell_obj"] = hb
# trade_info["buy_obj"]=yb
# if yb_depth_sell[0][1]-hb_depth_buy[0][1]>spread_threshold_yb_sell_hb_buy and abs(int(hb_depth_buy[1])-int(yb_depth_sell[1])<=3) and yb_depth_sell[0][1]-hb_depth_buy[0][1]>max_diff:
# max_diff = yb_depth_sell[0][1]-hb_depth_buy[0][1]
# trade_info["sell_depth"] = yb_depth_sell
# trade_info["buy_depth"] = hb_depth_buy
# trade_info["sell_name"] = "YunBi"
# trade_info["buy_name"] = "HuoBi"
# trade_info["sell_que1"] = ybTradeQue1
# trade_info["sell_que2"] = ybTradeQue2
# trade_info["buy_que1"] = hbTradeQue1
# trade_info["buy_que2"] = hbTradeQue2
# trade_info["sell_obj"] = yb
# trade_info["buy_obj"]=hb
# if yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>spread_threshold_yb_sell_cnbtc_buy and abs(int(cnbtc_depth_buy[1])-int(yb_depth_sell[1])<=3) and yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>max_diff:
# max_diff = yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]
# trade_info["sell_depth"] = yb_depth_sell
# trade_info["buy_depth"] = cnbtc_depth_buy
# trade_info["sell_name"] = "YunBi"
# trade_info["buy_name"] = "CNBTC"
# trade_info["sell_que1"] = ybTradeQue1
# trade_info["sell_que2"] = ybTradeQue2
# trade_info["buy_que1"] = cnbtcTradeQue1
# trade_info["buy_que2"] = cnbtcTradeQue2
# trade_info["sell_obj"] = yb
# trade_info["buy_obj"]=cnbtc
# if cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]>spread_threshold_yb_sell_cnbtc_buy and abs(int(cnbtc_depth_sell[1])-int(yb_depth_buy[1])<=3) and cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]>max_diff:
# max_diff = cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]
# trade_info["sell_depth"] = cnbtc_depth_sell
# trade_info["buy_depth"] = yb_depth_buy
# trade_info["sell_name"] = "CNBTC"
# trade_info["buy_name"] = "YunBi"
# trade_info["sell_que1"] = cnbtcTradeQue1
# trade_info["sell_que2"] = cnbtcTradeQue2
# trade_info["buy_que1"] = ybTradeQue1
# trade_info["buy_que2"] = ybTradeQue2
# trade_info["sell_obj"] = cnbtc
# trade_info["buy_obj"]=yb
# if open_okc:
# if okc_depth_sell[0][1]-cnbtc_depth_buy[0][1]>spread_threshold_okc_sell_cnbtc_buy and okc_depth_sell[0][1]-cnbtc_depth_buy[0][1]>max_diff:
# max_diff = okc_depth_sell[0][1]-cnbtc_depth_buy[0][1]
# trade_info["sell_depth"] = okc_depth_sell
# trade_info["buy_depth"] = cnbtc_depth_buy
# trade_info["sell_name"] = "OKCoin"
# trade_info["buy_name"] = "CNBTC"
# trade_info["sell_que1"] = okcTradeQue1
# trade_info["sell_que2"] = okcTradeQue2
# trade_info["buy_que1"] = cnbtcTradeQue1
# trade_info["buy_que2"] = cnbtcTradeQue2
# trade_info["sell_obj"] = okc
# trade_info["buy_obj"]=cnbtc
# if cnbtc_depth_sell[0][1]-okc_depth_buy[0][1]>spread_threshold_okc_buy_cnbtc_sell and cnbtc_depth_sell[0][1]-okc_depth_buy[0][1]>max_diff:
# max_diff = cnbtc_depth_sell[0][1]-okc_depth_buy[0][1]
# trade_info["sell_depth"] = cnbtc_depth_sell
# trade_info["buy_depth"] = okc_depth_buy
# trade_info["sell_name"] = "CNBTC"
# trade_info["buy_name"] = "OKCoin"
# trade_info["sell_que1"] = cnbtcTradeQue1
# trade_info["sell_que2"] = cnbtcTradeQue2
# trade_info["buy_que1"] = okcTradeQue1
# trade_info["buy_que2"] = okcTradeQue2
# trade_info["buy_obj"] = okc
# trade_info["sell_obj"]=cnbtc
# if hb_depth_sell[0][1]-okc_depth_buy[0][1]>spread_threshold_okc_buy_hb_sell and hb_depth_sell[0][1]-okc_depth_buy[0][1]>max_diff:
# max_diff = hb_depth_sell[0][1]-okc_depth_buy[0][1]
# trade_info["sell_depth"] = hb_depth_sell
# trade_info["buy_depth"] = okc_depth_buy
# trade_info["sell_name"] = "HuoBi"
# trade_info["buy_name"] = "OKCoin"
# trade_info["sell_que1"] = hbTradeQue1
# trade_info["sell_que2"] = hbTradeQue2
# trade_info["buy_que1"] = okcTradeQue1
# trade_info["buy_que2"] = okcTradeQue2
# trade_info["sell_obj"] = hb
# trade_info["buy_obj"]=okc
# if okc_depth_sell[0][1]-hb_depth_buy[0][1]>spread_threshold_okc_sell_hb_buy and okc_depth_sell[0][1]-hb_depth_buy[0][1]>max_diff:
# max_diff = okc_depth_sell[0][1]-hb_depth_buy[0][1]
# trade_info["sell_depth"] = okc_depth_sell
# trade_info["buy_depth"] = hb_depth_buy
# trade_info["sell_name"] = "OKCoin"
# trade_info["buy_name"] = "HuoBi"
# trade_info["sell_que1"] = okcTradeQue1
# trade_info["sell_que2"] = okcTradeQue2
# trade_info["buy_que1"] = hbTradeQue1
# trade_info["buy_que2"] = hbTradeQue2
# trade_info["sell_obj"] = okc
# trade_info["buy_obj"]=hb
# if yb_depth_sell[0][1]-okc_buy[0][1]>spread_threshold_yb_sell_cnbtc_buy and yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>max_diff:
# max_diff = yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]
# trade_info["sell_depth"] = yb_depth_sell
# trade_info["buy_depth"] = cnbtc_depth_buy
# trade_info["sell_name"] = "YunBi"
# trade_info["buy_name"] = "CNBTC"
# trade_info["sell_que1"] = ybTradeQue1
# trade_info["sell_que2"] = ybTradeQue2
# trade_info["buy_que1"] = cnbtcTradeQue1
# trade_info["buy_que2"] = cnbtcTradeQue2
# trade_info["sell_obj"] = yb
# trade_info["buy_obj"]=cnbtc
# if cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]>spread_threshold_yb_sell_cnbtc_buy and cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]>max_diff:
# max_diff = cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]
# trade_info["sell_depth"] = cnbtc_depth_sell
# trade_info["buy_depth"] = yb_depth_buy
# trade_info["sell_name"] = "CNBTC"
# trade_info["buy_name"] = "YunBi"
# trade_info["sell_que1"] = cnbtcTradeQue1
# trade_info["sell_que2"] = cnbtcTradeQue2
# trade_info["buy_que1"] = ybTradeQue1
# trade_info["buy_que2"] = ybTradeQue2
# trade_info["sell_obj"] = cnbtc
# trade_info["buy_obj"]=yb
# if hb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>spread_threshold_hb_sell_cnbtc_buy and abs(int(cnbtc_depth_buy[1])-int(hb_depth_sell[1])<=3) and hb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>max_diff:
# print "start trade major"
#
# elif yb_depth_sell[0][1]-cnbtc_depth_buy[0][1]>spread_threshold_yb_sell_cnbtc_buy and abs(int(cnbtc_depth_buy[1])-int(yb_depth_sell[1])<=3):
# print 'CNBTC: timestamp:%s amount:\t%f price:\t%f asks:%s'%(cnbtc_depth_buy[1],cnbtc_depth_buy[0][0],cnbtc_depth_buy[0][1],str(cnbtc_depth_buy[0][2]))
# print 'YUNBI: timestamp:%s amount:\t%f price:\t%f bids:%s'%(yb_depth_sell[1],yb_depth_sell[0][0],yb_depth_sell[0][1],str(yb_depth_sell[0][2]))
# print "start trade major"
# amount = min(cnbtc_depth_buy[0][0],yb_depth_sell[0][0])*1.0/trade_ratio
# amount_buy=amount
# amount_sell=amount_buy
# limit = (cnbtc_depth_buy[0][1]+yb_depth_sell[0][1])*1.0/2.0
# if total_coin>0.0001:
# amount_buy = max(amount_buy-total_coin,0)
# elif total_coin<-0.0001:
# amount_sell = max(amount_sell+total_coin,0)
# print "cnbtc buy %f coins at %f and limit %f" %(amount_buy,cnbtc_depth_buy[0][1],limit-lowest_spread_threshold/2.0)
# cnbtcTradeQue1.put((cnbtc,"buy",amount_buy,cnbtc_depth_buy[0][1],limit-lowest_spread_threshold/2.0))
# print "yb sell %f coins at %f and limit %f" %(amount_sell,yb_depth_sell[0][1],limit+lowest_spread_threshold/2.0)
# ybTradeQue1.put((yb,"sell",amount_sell,yb_depth_sell[0][1],limit+lowest_spread_threshold/2.0))
# cnbtc_remain = cnbtcTradeQue2.get()
# yb_remain = ybTradeQue2.get()
# output.write('%f, %f, %f, %f\n'%(yb_remain[0]-amount_sell,amount_buy-cnbtc_remain[0],yb_remain[1],cnbtc_remain[1]))
# output.flush()
# total_coin+=yb_remain[0]-amount_sell-cnbtc_remain[0]+amount_buy
# total_money+=yb_remain[1]+cnbtc_remain[1]
# print "cnbtc_remain:%f\t yb_remain:%f,total_remain:%f"%(cnbtc_remain[0],yb_remain[0],maxCoin)
# print"coin:%f,money:%f"%(total_coin,total_money)
# maxCoin-=max(yb_remain[0],cnbtc_remain[0])
# if maxCoin<0:
# ybQue1.put(None)
# cnbtcQue1.put(None)
# ybTradeQue1.put(None)
# cnbtcTradeQue1.put(None)
# break
#
# # elif False:
# elif cnbtc_depth_sell[0][1]-yb_depth_buy[0][1]>spread_threshold_yb_buy_cnbtc_sell and abs(int(cnbtc_depth_sell[1])-int(yb_depth_buy[1])<=3):
# print 'CNBTC: timestamp:%s amount:\t%f price:\t%f bids:%s'%(cnbtc_depth_sell[1],cnbtc_depth_sell[0][0],cnbtc_depth_sell[0][1],str(cnbtc_depth_sell[0][2]))
# print 'YUNBI: timestamp:%s amount:\t%f price:\t%f asks:%s'%(yb_depth_buy[1],yb_depth_buy[0][0],yb_depth_buy[0][1],str(yb_depth_buy[0][2]))
# print "start trade minor"
# amount = min(cnbtc_depth_sell[0][0], yb_depth_buy[0][0]) * 1.0 / trade_ratio
# amount_buy = amount
# amount_sell = amount_buy
# limit = (cnbtc_depth_sell[0][1] + yb_depth_buy[0][1]) * 1.0 / 2.0
# if total_coin > 0.01:
# amount_buy = max(amount_buy - total_coin, 0)
# elif total_coin < -0.01:
# amount_sell = max(amount_sell + total_coin, 0)
# print "cnbtc sell %f coins at %f and limit %f" % (amount_sell, cnbtc_depth_sell[0][1], limit + lowest_spread_threshold/ 2.0)
# cnbtcTradeQue1.put((cnbtc, "sell", amount_sell, cnbtc_depth_sell[0][1], limit + lowest_spread_threshold / 2.0))
# print "yb buy %f coins at %f and limit %f" % (amount_buy, yb_depth_buy[0][1], limit - lowest_spread_threshold / 2.0)
# ybTradeQue1.put(
# (yb, "buy", amount_buy, yb_depth_buy[0][1], limit - lowest_spread_threshold / 2.0))
# cnbtc_remain = cnbtcTradeQue2.get()
# yb_remain = ybTradeQue2.get()
# output.write('%f, %f, %f, %f\n' % (
# amount_buy - yb_remain[0], cnbtc_remain[0] - amount_sell, yb_remain[1], cnbtc_remain[1]))
# total_coin += -yb_remain[0] - amount_sell + cnbtc_remain[0] + amount_buy
# total_money += yb_remain[1] + cnbtc_remain[1]
# print "cnbtc_remain:%f\t yb_remain:%f,total_remain:%f" % (cnbtc_remain[0], yb_remain[0], maxCoin)
# print"coin:%f,money:%f" % (total_coin, total_money)
# maxCoin -= max(yb_remain[0], cnbtc_remain[0])
# if maxCoin < 0:
# ybQue1.put(None)
# cnbtcQue1.put(None)
# ybTradeQue1.put(None)
# cnbtcTradeQue1.put(None)
# break
# # elif False:
# elif cnbtc_depth_sell[0][1]-hb_depth_buy[0][1]>spread_threshold_hb_buy_cnbtc_sell and abs(int(cnbtc_depth_sell[1])-int(hb_depth_buy[1])<=3):
# print 'CNBTC: timestamp:%s amount:\t%f price:\t%f bids:%s'%(cnbtc_depth_sell[1],cnbtc_depth_sell[0][0],cnbtc_depth_sell[0][1],str(cnbtc_depth_sell[0][2]))
# print 'HuoBI: timestamp:%s amount:\t%f price:\t%f asks:%s'%(hb_depth_buy[1],hb_depth_buy[0][0],hb_depth_buy[0][1],str(hb_depth_buy[0][2]))
# print "start trade minor"
# amount = min(cnbtc_depth_sell[0][0], hb_depth_buy[0][0]) * 1.0 / trade_ratio
# amount_buy = amount
# amount_sell = amount_buy
# limit = (cnbtc_depth_sell[0][1] + hb_depth_buy[0][1]) * 1.0 / 2.0
# if total_coin > 0.01:
# amount_buy = max(amount_buy - total_coin, 0)
# elif total_coin < -0.01:
# amount_sell = max(amount_sell + total_coin, 0)
# print "cnbtc sell %f coins at %f and limit %f" % (amount_sell, cnbtc_depth_sell[0][1], limit + lowest_spread_threshold/ 2.0)
# cnbtcTradeQue1.put((cnbtc, "sell", amount_sell, cnbtc_depth_sell[0][1], limit + lowest_spread_threshold / 2.0))
# print "hb buy %f coins at %f and limit %f" % (amount_buy, hb_depth_buy[0][1], limit - lowest_spread_threshold / 2.0)
# hbTradeQue1.put(
# (hb, "buy", amount_buy, hb_depth_buy[0][1], limit - lowest_spread_threshold / 2.0))
# cnbtc_remain = cnbtcTradeQue2.get()
# hb_remain = hbTradeQue2.get()
# output.write('%f, %f, %f, %f\n' % (
# amount_buy - hb_remain[0], cnbtc_remain[0] - amount_sell, hb_remain[1], cnbtc_remain[1]))
# total_coin += -hb_remain[0] - amount_sell + cnbtc_remain[0] + amount_buy
# total_money += hb_remain[1] + cnbtc_remain[1]
# print "cnbtc_remain:%f\t hb_remain:%f,total_remain:%f" % (cnbtc_remain[0], hb_remain[0], maxCoin)
# print"coin:%f,money:%f" % (total_coin, total_money)
# maxCoin -= max(hb_remain[0], cnbtc_remain[0])
# if maxCoin < 0:
# hbQue1.put(None)
# cnbtcQue1.put(None)
# hbTradeQue1.put(None)
# cnbtcTradeQue1.put(None)
# break
# else:
# # print "total coin: %f total_cny %f"%(total_eth,total_cny)
# # print "yunbi ",str(yb.get_account())
# # print "cnbtc ",str(cnbtc.get_account())
# print cnbtc.get_account()
# cnbtc.getDepth()
# print cnbtc.buy(volume=0.01,price=1461)
# print cnbtc.get_account()
# hft = HaiFengTeng.HaiFengTeng(config)
# hft.login()
# yb = YunBi.Yunbi(config,"YunBi2")
# yb.get_account()
# yb.buy(volume=0.001,price=9999.0)
# yb.getOrder()
# print yb.getDepth()
| 2.171875 | 2 |
startuptweet.py | cudmore/startupnotify | 0 | 6677 | #!/usr/bin/python3
"""
Author: <NAME>
Date: 20181013
Purpose: Send a Tweet with IP and MAC address of a Raspberry Pi
Install:
pip3 install tweepy
Usage:
python3 startuptweet.py 'this is my tweet'
"""
import tweepy
import sys
import socket
import subprocess
from uuid import getnode as get_mac
from datetime import datetime
# Create variables for each key, secret, token
from my_config import hash_tag
from my_config import consumer_key
from my_config import consumer_secret
from my_config import access_token
from my_config import access_token_secret
message = ''
if len( sys.argv ) > 1:
message = sys.argv[1]
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#
thetime = datetime.now().strftime('%Y%m%d %H:%M:%S')
ip = subprocess.check_output(['hostname', '--all-ip-addresses'])
ip = ip.decode('utf-8').strip()
hostname = socket.gethostname()
mac = get_mac()
mac = hex(mac)
tweet = thetime + ' ' + hostname + ' ' + ip + ' ' + mac + ' ' + message + ' ' + hash_tag
print('tweeting:', tweet)
api.update_status(status=tweet)
| 3.375 | 3 |
distributed/db.py | VW-Stephen/pySpiderScrape | 0 | 6678 | #!/usr/bin/python
from bs4 import BeautifulSoup
import sqlite3
class DB:
"""
Abstraction for the profile database
"""
def __init__(self, filename):
"""
Creates a new connection to the database
filename - The name of the database file to use
"""
self.Filename = filename
self.Connection = sqlite3.connect(filename)
self.Cursor = self.Connection.cursor()
def SaveProfile(self, data):
"""
Saves the profile to the database
data - A dictionary of profile information
"""
self.Cursor.execute("INSERT INTO profiles (url, e0, e1, e2, e3, e4, e5, e6, e7, e8, gender, age, orientation, status, location) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (data['url'], data['e0'], data['e1'], data['e2'], data['e3'], data['e4'], data['e5'], data['e6'], data['e7'], data['e8'], data['gender'], data['age'], data['orientation'], data['status'], data['location']))
self.Connection.commit()
def HasVisited(self, url):
"""
Returns true if the given URL is in the database, false otherwise
url - The URL to check
"""
self.Cursor.execute("SELECT 1 FROM profiles WHERE url = ? LIMIT 1", (url,))
return self.Cursor.fetchone() is not None
| 3.390625 | 3 |
private/scripts/recheck-invalid-handles.py | bansal-shubham/stopstalk-deployment | 0 | 6679 | """
Copyright (c) 2015-2019 <NAME>(<EMAIL>), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import requests, bs4
import sites
# Constants to be used in case of request failures
SERVER_FAILURE = "SERVER_FAILURE"
NOT_FOUND = "NOT_FOUND"
OTHER_FAILURE = "OTHER_FAILURE"
REQUEST_FAILURES = (SERVER_FAILURE, NOT_FOUND, OTHER_FAILURE)
def get_invalid_handle_method(site):
site_class = getattr(sites, site.lower())
invalid_handle_method = getattr(site_class.Profile, "is_invalid_handle")
return invalid_handle_method
if __name__ == "__main__":
ihtable = db.invalid_handle
atable = db.auth_user
cftable = db.custom_friend
stable = db.submission
nrtable = db.next_retrieval
mapping = {}
handle_to_row = {}
for site in current.SITES:
mapping[site] = get_invalid_handle_method(site)
handle_to_row[site] = {}
impossiblehandle = "thisreallycantbeahandle308"
assert(all(map(lambda site: get_invalid_handle_method(site)(impossiblehandle), current.SITES.keys())))
def populate_handle_to_row(table):
for row in db(table).select():
for site in current.SITES:
site_handle = row[site.lower() + "_handle"]
if site_handle:
if handle_to_row[site].has_key(site_handle):
handle_to_row[site][site_handle].append(row)
else:
handle_to_row[site][site_handle] = [row]
populate_handle_to_row(atable)
populate_handle_to_row(cftable)
# for site in current.SITES:
# print site
# for site_handle in handle_to_row[site]:
# print "\t", site_handle
# for row in handle_to_row[site][site_handle]:
# print "\t\t", row.first_name, row.last_name, row.stopstalk_handle
update_dict = {"stopstalk_rating": 0,
"stopstalk_prev_rating": 0,
"per_day": 0.0,
"per_day_change": "0.0",
"authentic": False}
final_delete_query = False
cnt = 0
for row in db(ihtable).iterselect():
# If not an invalid handle anymore
if handle_to_row[row.site].has_key(row.handle) and mapping[row.site](row.handle) is False:
cnt += 1
print row.site, row.handle, "deleted"
for row_obj in handle_to_row[row.site][row.handle]:
print "\t", row_obj.stopstalk_handle, "updated"
update_dict[row.site.lower() + "_lr"] = current.INITIAL_DATE
row_obj.update_record(**update_dict)
if "user_id" in row_obj:
# Custom user
db(nrtable.custom_user_id == row_obj.id).update(**{row.site.lower() + "_delay": 0})
else:
db(nrtable.user_id == row_obj.id).update(**{row.site.lower() + "_delay": 0})
final_delete_query |= ((stable.site == row.site) & \
(stable.stopstalk_handle == row_obj.stopstalk_handle))
del update_dict[row.site.lower() + "_lr"]
row.delete_record()
if cnt >= 10:
if final_delete_query:
db(final_delete_query).delete()
cnt = 0
final_delete_query = False
if final_delete_query:
db(final_delete_query).delete()
| 1.914063 | 2 |
onnxmltools/convert/keras/_parse.py | gpminsuk/onnxmltools | 1 | 6680 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import tensorflow as tf
from keras.models import Model
from keras.layers import Layer, InputLayer
from ...proto import onnx
from ..common._container import KerasModelContainer
from ..common._topology import Topology
from ..common.data_types import *
def _extract_inbound_nodes(model):
if hasattr(model, 'inbound_nodes'):
return model.inbound_nodes
elif hasattr(model, '_inbound_nodes'):
return model._inbound_nodes
else:
raise ValueError('Failed to find inbound_nodes and _inbound_nodes when parsing Keras model')
def extract_model_input_and_output_shapes(model, default_batch_size):
if hasattr(model, 'input_shape'):
if not isinstance(model.input_shape, list):
input_shapes = [list(model.input_shape)]
else:
input_shapes = [list(shape) for shape in model.input_shape]
elif hasattr(model, 'input_shapes'):
input_shapes = [list(shape) for shape in model.input_shapes]
else:
raise ValueError('Fail to extract model input shape(s)')
for shape in input_shapes:
if not isinstance(shape[0], numbers.Integral):
shape[0] = default_batch_size
if hasattr(model, 'output_shape'):
if not isinstance(model.output_shape, list):
output_shapes = [list(model.output_shape)]
else:
output_shapes = [list(shape) for shape in model.output_shape]
elif hasattr(model, 'output_shapes'):
output_shapes = [list(shape) for shape in model.output_shapes]
else:
raise ValueError('Fail to extract model output shape(s)')
for shape in output_shapes:
if not isinstance(shape[0], numbers.Integral):
shape[0] = default_batch_size
return input_shapes, output_shapes
def determine_tensor_type(tensor, default_batch_size, keras_shape=None):
# keras_shape can overwrite the shaped defined in Tensorflow tensor
if keras_shape is None:
tensor_shape = [d.value if d.value is not None else 'None' for d in tensor.shape]
else:
tensor_shape = [d if d is not None else 'None' for d in keras_shape]
# Adjust batch size if needed
if tensor_shape[0] == 'None':
tensor_shape[0] = default_batch_size
# Determine the tensor's element type
tensor_type = tensor.dtype
if tensor_type in [tf.int8, tf.int16, tf.int32, tf.int64]:
return Int64TensorType(shape=tensor_shape)
elif tensor_type in [tf.float16, tf.float32, tf.float64]:
return FloatTensorType(shape=tensor_shape)
else:
raise ValueError('Unable to find out a correct type for tensor %s' % tensor)
def parse_keras(model, initial_types=None, targeted_onnx=onnx.__version__):
'''
The main parsing function of Keras Model and Sequential objects.
:param model: A Keras Model or Sequential object
:param initial_types: A list providing some types for some root variables. Each element is a tuple of a variable
name and a type defined in data_types.py.
:param targeted_onnx: a version string such as `1.1.2` or `1.2.1` for specifying the ONNX version used to produce
the output model.
:return: a Topology object. It's a intermediate representation of the input Keras model
'''
raw_model_container = KerasModelContainer(model)
topology = Topology(raw_model_container, default_batch_size=1, initial_types=initial_types,
targeted_onnx=targeted_onnx)
scope = topology.declare_scope('__root__')
# Each inbound node defines an evaluation of the underlining model (if the model is called multiple times, it may
# contain several inbound nodes). According to the tensors specified in those inbound nodes, we declare the roots
# and leaves of the computational graph described by the Keras input model.
for node in _extract_inbound_nodes(model):
input_shapes, output_shapes = extract_model_input_and_output_shapes(model, topology.default_batch_size)
# Declare inputs for a specific model execution
for tensor, shape in zip(node.input_tensors, input_shapes):
raw_model_container.add_input_name(tensor.name)
tensor_type = determine_tensor_type(tensor, topology.default_batch_size, list(shape))
scope.get_local_variable_or_declare_one(tensor.name, tensor_type)
# Declare outputs for a specific model execution
for tensor, shape in zip(node.output_tensors, output_shapes):
raw_model_container.add_output_name(tensor.name)
tensor_type = determine_tensor_type(tensor, topology.default_batch_size, list(shape))
scope.get_local_variable_or_declare_one(tensor.name, tensor_type)
# For each model execution, we call a parsing function to create a computational (sub-)graph because ONNX has no
# model/layer sharing.
for node in _extract_inbound_nodes(model):
_parse_keras(topology, scope, model, node)
topology.root_names = [variable.onnx_name for variable in scope.variables.values()]
return topology
def _parse_keras(topology, parent_scope, model, inbound_node):
if isinstance(model, Model):
scope = topology.declare_scope('scope')
# Declare output variables so that they can be connected with the variables produced in layers and sub-models
for layer in model.layers:
for node in _extract_inbound_nodes(layer):
for tensor in node.output_tensors:
tensor_type = determine_tensor_type(tensor, topology.default_batch_size)
scope.declare_local_variable(tensor.name, tensor_type)
# Recursively call the parsing function
for layer in model.layers:
for node in _extract_inbound_nodes(layer):
_parse_keras(topology, scope, layer, node)
# Connect the variables declared when parsing the input model and the actual model inputs. inbound_node has the
# actual inputs while the while graph is indeed declared only via the first inbound node of the input model.
# That is, for a shared (sub-)model, we may declare it several times and each time we may connect its I/O with
# the I/O specified in a inbound node.
for parent_tensor, local_tensor in zip(inbound_node.input_tensors, _extract_inbound_nodes(model)[0].input_tensors):
parent_tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
local_tensor_type = determine_tensor_type(local_tensor, topology.default_batch_size)
parent_variable = parent_scope.get_local_variable_or_declare_one(parent_tensor.name, parent_tensor_type)
local_variable = scope.get_local_variable_or_declare_one(local_tensor.name, local_tensor_type)
operator = scope.declare_local_operator('identity')
operator.inputs.append(parent_variable)
operator.outputs.append(local_variable)
# Connect the variables declared when parsing the input model and the actual model outputs. inbound_node has the
# actual outputs while the while graph is indeed declared via the first inbound node of the input models.
for parent_tensor, local_tensor in zip(inbound_node.output_tensors, _extract_inbound_nodes(model)[0].output_tensors):
parent_tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
local_tensor_type = determine_tensor_type(local_tensor, topology.default_batch_size)
parent_variable = parent_scope.get_local_variable_or_declare_one(parent_tensor.name, parent_tensor_type)
local_variable = scope.get_local_variable_or_declare_one(local_tensor.name, local_tensor_type)
operator = scope.declare_local_operator('identity')
operator.inputs.append(local_variable)
operator.outputs.append(parent_variable)
elif isinstance(model, Layer):
if isinstance(model, InputLayer):
return
operator = parent_scope.declare_local_operator(type(model), raw_model=model)
# Simply connect the layer's I/O with variables declared in the parent scope. Note that it may create input
# variables in the parent scope because we only declare output variables in the beginning of _parse_keras(...)
for parent_tensor in inbound_node.input_tensors:
tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
operator.inputs.append(parent_scope.get_local_variable_or_declare_one(parent_tensor.name, tensor_type))
for parent_tensor in inbound_node.output_tensors:
tensor_type = determine_tensor_type(parent_tensor, topology.default_batch_size)
operator.outputs.append(parent_scope.get_local_variable_or_declare_one(parent_tensor.name, tensor_type))
else:
raise RuntimeError('Unsupported Keras component %s' % type(model))
| 2.421875 | 2 |
src/scenic/core/regions.py | cahartsell/Scenic | 0 | 6681 | """Objects representing regions in space."""
import math
import random
import itertools
import numpy
import scipy.spatial
import shapely.geometry
import shapely.ops
from scenic.core.distributions import Samplable, RejectionException, needsSampling
from scenic.core.lazy_eval import valueInContext
from scenic.core.vectors import Vector, OrientedVector, VectorDistribution
from scenic.core.geometry import RotatedRectangle
from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors
from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion
from scenic.core.type_support import toVector
from scenic.core.utils import cached, areEquivalent
def toPolygon(thing):
if needsSampling(thing):
return None
if hasattr(thing, 'polygon'):
return thing.polygon
if hasattr(thing, 'polygons'):
return thing.polygons
if hasattr(thing, 'lineString'):
return thing.lineString
return None
def regionFromShapelyObject(obj, orientation=None):
"""Build a 'Region' from Shapely geometry."""
assert obj.is_valid, obj
if obj.is_empty:
return nowhere
elif isinstance(obj, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=obj, orientation=orientation)
elif isinstance(obj, (shapely.geometry.LineString, shapely.geometry.MultiLineString)):
return PolylineRegion(polyline=obj, orientation=orientation)
else:
raise RuntimeError(f'unhandled type of Shapely geometry: {obj}')
class PointInRegionDistribution(VectorDistribution):
"""Uniform distribution over points in a Region"""
def __init__(self, region):
super().__init__(region)
self.region = region
def sampleGiven(self, value):
return value[self.region].uniformPointInner()
def __str__(self):
return f'PointIn({self.region})'
class Region(Samplable):
"""Abstract class for regions."""
def __init__(self, name, *dependencies, orientation=None):
super().__init__(dependencies)
self.name = name
self.orientation = orientation
def sampleGiven(self, value):
return self
def intersect(self, other, triedReversed=False):
"""Get a `Region` representing the intersection of this one with another."""
if triedReversed:
return IntersectionRegion(self, other)
else:
return other.intersect(self, triedReversed=True)
@staticmethod
def uniformPointIn(region):
"""Get a uniform `Distribution` over points in a `Region`."""
return PointInRegionDistribution(region)
def uniformPoint(self):
"""Sample a uniformly-random point in this `Region`.
Can only be called on fixed Regions with no random parameters.
"""
assert not needsSampling(self)
return self.uniformPointInner()
def uniformPointInner(self):
"""Do the actual random sampling. Implemented by subclasses."""
raise NotImplementedError()
def containsPoint(self, point):
"""Check if the `Region` contains a point. Implemented by subclasses."""
raise NotImplementedError()
def containsObject(self, obj):
"""Check if the `Region` contains an :obj:`~scenic.core.object_types.Object`.
The default implementation assumes the `Region` is convex; subclasses must
override the method if this is not the case.
"""
for corner in obj.corners:
if not self.containsPoint(corner):
return False
return True
def __contains__(self, thing):
"""Check if this `Region` contains an object or vector."""
from scenic.core.object_types import Object
if isinstance(thing, Object):
return self.containsObject(thing)
vec = toVector(thing, '"X in Y" with X not an Object or a vector')
return self.containsPoint(vec)
def getAABB(self):
"""Axis-aligned bounding box for this `Region`. Implemented by some subclasses."""
raise NotImplementedError()
def orient(self, vec):
"""Orient the given vector along the region's orientation, if any."""
if self.orientation is None:
return vec
else:
return OrientedVector(vec.x, vec.y, self.orientation[vec])
def __str__(self):
return f'<Region {self.name}>'
class AllRegion(Region):
"""Region consisting of all space."""
def intersect(self, other, triedReversed=False):
return other
def containsPoint(self, point):
return True
def containsObject(self, obj):
return True
def __eq__(self, other):
return type(other) is AllRegion
def __hash__(self):
return hash(AllRegion)
class EmptyRegion(Region):
"""Region containing no points."""
def intersect(self, other, triedReversed=False):
return self
def uniformPointInner(self):
raise RejectionException(f'sampling empty Region')
def containsPoint(self, point):
return False
def containsObject(self, obj):
return False
def show(self, plt, style=None):
pass
def __eq__(self, other):
return type(other) is EmptyRegion
def __hash__(self):
return hash(EmptyRegion)
everywhere = AllRegion('everywhere')
nowhere = EmptyRegion('nowhere')
class CircularRegion(Region):
def __init__(self, center, radius, resolution=32):
super().__init__('Circle', center, radius)
self.center = center.toVector()
self.radius = radius
self.circumcircle = (self.center, self.radius)
if not (needsSampling(self.center) or needsSampling(self.radius)):
ctr = shapely.geometry.Point(self.center)
self.polygon = ctr.buffer(self.radius, resolution=resolution)
def sampleGiven(self, value):
return CircularRegion(value[self.center], value[self.radius])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
return CircularRegion(center, radius)
def containsPoint(self, point):
point = point.toVector()
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
r = random.triangular(0, self.radius, self.radius)
t = random.uniform(-math.pi, math.pi)
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def getAABB(self):
x, y = self.center
r = self.radius
return ((x - r, y - r), (x + r, y + r))
def isEquivalentTo(self, other):
if type(other) is not CircularRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius))
def __str__(self):
return f'CircularRegion({self.center}, {self.radius})'
class SectorRegion(Region):
def __init__(self, center, radius, heading, angle, resolution=32):
super().__init__('Sector', center, radius, heading, angle)
self.center = center.toVector()
self.radius = radius
self.heading = heading
self.angle = angle
r = (radius / 2) * cos(angle / 2)
self.circumcircle = (self.center.offsetRadially(r, heading), r)
if not any(needsSampling(x) for x in (self.center, radius, heading, angle)):
ctr = shapely.geometry.Point(self.center)
circle = ctr.buffer(self.radius, resolution=resolution)
if angle >= math.tau - 0.001:
self.polygon = circle
else:
mask = shapely.geometry.Polygon([
self.center,
self.center.offsetRadially(radius, heading + angle/2),
self.center.offsetRadially(2*radius, heading),
self.center.offsetRadially(radius, heading - angle/2)
])
self.polygon = circle & mask
def sampleGiven(self, value):
return SectorRegion(value[self.center], value[self.radius],
value[self.heading], value[self.angle])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
heading = valueInContext(self.heading, context)
angle = valueInContext(self.angle, context)
return SectorRegion(center, radius, heading, angle)
def containsPoint(self, point):
point = point.toVector()
if not pointIsInCone(tuple(point), tuple(self.center), self.heading, self.angle):
return False
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
heading, angle, maxDist = self.heading, self.angle, self.radius
r = random.triangular(0, maxDist, maxDist)
ha = angle / 2.0
t = random.uniform(-ha, ha) + (heading + (math.pi / 2))
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def isEquivalentTo(self, other):
if type(other) is not SectorRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.angle, self.angle))
def __str__(self):
return f'SectorRegion({self.center},{self.radius},{self.heading},{self.angle})'
class RectangularRegion(RotatedRectangle, Region):
def __init__(self, position, heading, width, height):
super().__init__('Rectangle', position, heading, width, height)
self.position = position.toVector()
self.heading = heading
self.width = width
self.height = height
self.hw = hw = width / 2
self.hh = hh = height / 2
self.radius = hypot(hw, hh) # circumcircle; for collision detection
self.corners = tuple(position.offsetRotated(heading, Vector(*offset))
for offset in ((hw, hh), (-hw, hh), (-hw, -hh), (hw, -hh)))
self.circumcircle = (self.position, self.radius)
def sampleGiven(self, value):
return RectangularRegion(value[self.position], value[self.heading],
value[self.width], value[self.height])
def evaluateInner(self, context):
position = valueInContext(self.position, context)
heading = valueInContext(self.heading, context)
width = valueInContext(self.width, context)
height = valueInContext(self.height, context)
return RectangularRegion(position, heading, width, height)
def uniformPointInner(self):
hw, hh = self.hw, self.hh
rx = random.uniform(-hw, hw)
ry = random.uniform(-hh, hh)
pt = self.position.offsetRotated(self.heading, Vector(rx, ry))
return self.orient(pt)
def getAABB(self):
x, y = zip(*self.corners)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
return ((minx, miny), (maxx, maxy))
def isEquivalentTo(self, other):
if type(other) is not RectangularRegion:
return False
return (areEquivalent(other.position, self.position)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.width, self.width)
and areEquivalent(other.height, self.height))
def __str__(self):
return f'RectangularRegion({self.position},{self.heading},{self.width},{self.height})'
class PolylineRegion(Region):
"""Region given by one or more polylines (chain of line segments)"""
def __init__(self, points=None, polyline=None, orientation=True):
super().__init__('Polyline', orientation=orientation)
if points is not None:
points = tuple(points)
if len(points) < 2:
raise RuntimeError('tried to create PolylineRegion with < 2 points')
self.points = points
self.lineString = shapely.geometry.LineString(points)
elif polyline is not None:
if isinstance(polyline, shapely.geometry.LineString):
if len(polyline.coords) < 2:
raise RuntimeError('tried to create PolylineRegion with <2-point LineString')
elif isinstance(polyline, shapely.geometry.MultiLineString):
if len(polyline) == 0:
raise RuntimeError('tried to create PolylineRegion from empty MultiLineString')
for line in polyline:
assert len(line.coords) >= 2
else:
raise RuntimeError('tried to create PolylineRegion from non-LineString')
self.lineString = polyline
else:
raise RuntimeError('must specify points or polyline for PolylineRegion')
if not self.lineString.is_valid:
raise RuntimeError('tried to create PolylineRegion with '
f'invalid LineString {self.lineString}')
self.segments = self.segmentsOf(self.lineString)
cumulativeLengths = []
total = 0
for p, q in self.segments:
dx, dy = p[0] - q[0], p[1] - q[1]
total += math.hypot(dx, dy)
cumulativeLengths.append(total)
self.cumulativeLengths = cumulativeLengths
@classmethod
def segmentsOf(cls, lineString):
if isinstance(lineString, shapely.geometry.LineString):
segments = []
points = list(lineString.coords)
if len(points) < 2:
raise RuntimeError('LineString has fewer than 2 points')
last = points[0]
for point in points[1:]:
segments.append((last, point))
last = point
return segments
elif isinstance(lineString, shapely.geometry.MultiLineString):
allSegments = []
for line in lineString:
allSegments.extend(cls.segmentsOf(line))
return allSegments
else:
raise RuntimeError('called segmentsOf on non-linestring')
def uniformPointInner(self):
pointA, pointB = random.choices(self.segments,
cum_weights=self.cumulativeLengths)[0]
interpolation = random.random()
x, y = averageVectors(pointA, pointB, weight=interpolation)
if self.orientation is True:
return OrientedVector(x, y, headingOfSegment(pointA, pointB))
else:
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
if poly is not None:
intersection = self.lineString & poly
if (intersection.is_empty or
not isinstance(intersection, (shapely.geometry.LineString,
shapely.geometry.MultiLineString))):
# TODO handle points!
return nowhere
return PolylineRegion(polyline=intersection)
return super().intersect(other, triedReversed)
def containsPoint(self, point):
return self.lineString.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
return False
def getAABB(self):
xmin, ymin, xmax, ymax = self.lineString.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
for pointA, pointB in self.segments:
plt.plot([pointA[0], pointB[0]], [pointA[1], pointB[1]], style)
def __str__(self):
return f'PolylineRegion({self.lineString})'
def __eq__(self, other):
if type(other) is not PolylineRegion:
return NotImplemented
return (other.lineString == self.lineString)
@cached
def __hash__(self):
return hash(str(self.lineString))
class PolygonalRegion(Region):
"""Region given by one or more polygons (possibly with holes)"""
def __init__(self, points=None, polygon=None, orientation=None):
super().__init__('Polygon', orientation=orientation)
if polygon is None and points is None:
raise RuntimeError('must specify points or polygon for PolygonalRegion')
if polygon is None:
points = tuple(points)
if len(points) == 0:
raise RuntimeError('tried to create PolygonalRegion from empty point list!')
for point in points:
if needsSampling(point):
raise RuntimeError('only fixed PolygonalRegions are supported')
self.points = points
polygon = shapely.geometry.Polygon(points)
if isinstance(polygon, shapely.geometry.Polygon):
self.polygons = shapely.geometry.MultiPolygon([polygon])
elif isinstance(polygon, shapely.geometry.MultiPolygon):
self.polygons = polygon
else:
raise RuntimeError(f'tried to create PolygonalRegion from non-polygon {polygon}')
if not self.polygons.is_valid:
raise RuntimeError('tried to create PolygonalRegion with '
f'invalid polygon {self.polygons}')
if points is None and len(self.polygons) == 1 and len(self.polygons[0].interiors) == 0:
self.points = tuple(self.polygons[0].exterior.coords[:-1])
if self.polygons.is_empty:
raise RuntimeError('tried to create empty PolygonalRegion')
triangles = []
for polygon in self.polygons:
triangles.extend(triangulatePolygon(polygon))
assert len(triangles) > 0, self.polygons
self.trianglesAndBounds = tuple((tri, tri.bounds) for tri in triangles)
areas = (triangle.area for triangle in triangles)
self.cumulativeTriangleAreas = tuple(itertools.accumulate(areas))
def uniformPointInner(self):
triangle, bounds = random.choices(
self.trianglesAndBounds,
cum_weights=self.cumulativeTriangleAreas)[0]
minx, miny, maxx, maxy = bounds
# TODO improve?
while True:
x, y = random.uniform(minx, maxx), random.uniform(miny, maxy)
if triangle.intersects(shapely.geometry.Point(x, y)):
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
orientation = other.orientation if self.orientation is None else self.orientation
if poly is not None:
intersection = self.polygons & poly
if intersection.is_empty:
return nowhere
elif isinstance(intersection, (shapely.geometry.Polygon,
shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=intersection, orientation=orientation)
elif isinstance(intersection, shapely.geometry.GeometryCollection):
polys = []
for geom in intersection:
if isinstance(geom, shapely.geometry.Polygon):
polys.append(geom)
if len(polys) == 0:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
intersection = shapely.geometry.MultiPolygon(polys)
return PolygonalRegion(polygon=intersection, orientation=orientation)
else:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
return super().intersect(other, triedReversed)
def union(self, other):
poly = toPolygon(other)
if not poly:
raise RuntimeError(f'cannot take union of PolygonalRegion with {other}')
union = polygonUnion((self.polygons, poly))
return PolygonalRegion(polygon=union)
def containsPoint(self, point):
return self.polygons.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
objPoly = obj.polygon
if objPoly is None:
raise RuntimeError('tried to test containment of symbolic Object!')
# TODO improve boundary handling?
return self.polygons.contains(objPoly)
def getAABB(self):
xmin, xmax, ymin, ymax = self.polygons.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
plotPolygon(self.polygons, plt, style=style)
def __str__(self):
return '<PolygonalRegion>'
def __eq__(self, other):
if type(other) is not PolygonalRegion:
return NotImplemented
return (other.polygons == self.polygons
and other.orientation == self.orientation)
@cached
def __hash__(self):
# TODO better way to hash mutable Shapely geometries? (also for PolylineRegion)
return hash((str(self.polygons), self.orientation))
class PointSetRegion(Region):
"""Region consisting of a set of discrete points.
No :obj:`~scenic.core.object_types.Object` can be contained in a `PointSetRegion`,
since the latter is discrete. (This may not be true for subclasses, e.g.
`GridRegion`.)
Args:
name (str): name for debugging
points (iterable): set of points comprising the region
kdtree (:obj:`scipy.spatial.KDTree`, optional): k-D tree for the points (one will
be computed if none is provided)
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation for
the region
tolerance (float, optional): distance tolerance for checking whether a point lies
in the region
"""
def __init__(self, name, points, kdTree=None, orientation=None, tolerance=1e-6):
super().__init__(name, orientation=orientation)
self.points = tuple(points)
for point in self.points:
if needsSampling(point):
raise RuntimeError('only fixed PointSetRegions are supported')
self.kdTree = scipy.spatial.cKDTree(self.points) if kdTree is None else kdTree
self.orientation = orientation
self.tolerance = tolerance
def uniformPointInner(self):
return self.orient(Vector(*random.choice(self.points)))
def intersect(self, other, triedReversed=False):
def sampler(intRegion):
o = intRegion.regions[1]
center, radius = o.circumcircle
possibles = (Vector(*self.kdTree.data[i])
for i in self.kdTree.query_ball_point(center, radius))
intersection = [p for p in possibles if o.containsPoint(p)]
if len(intersection) == 0:
raise RejectionException(f'empty intersection of Regions {self} and {o}')
return self.orient(random.choice(intersection))
return IntersectionRegion(self, other, sampler=sampler, orientation=self.orientation)
def containsPoint(self, point):
distance, location = self.kdTree.query(point)
return (distance <= self.tolerance)
def containsObject(self, obj):
raise NotImplementedError()
def __eq__(self, other):
if type(other) is not PointSetRegion:
return NotImplemented
return (other.name == self.name
and other.points == self.points
and other.orientation == self.orientation)
def __hash__(self):
return hash((self.name, self.points, self.orientation))
class GridRegion(PointSetRegion):
"""A Region given by an obstacle grid.
A point is considered to be in a `GridRegion` if the nearest grid point is
not an obstacle.
Args:
name (str): name for debugging
grid: 2D list, tuple, or NumPy array of 0s and 1s, where 1 indicates an obstacle
and 0 indicates free space
Ax (float): spacing between grid points along X axis
Ay (float): spacing between grid points along Y axis
Bx (float): X coordinate of leftmost grid column
By (float): Y coordinate of lowest grid row
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation of region
"""
def __init__(self, name, grid, Ax, Ay, Bx, By, orientation=None):
self.grid = numpy.array(grid)
self.sizeY, self.sizeX = self.grid.shape
self.Ax, self.Ay = Ax, Ay
self.Bx, self.By = Bx, By
y, x = numpy.where(self.grid == 0)
points = [self.gridToPoint(point) for point in zip(x, y)]
super().__init__(name, points, orientation=orientation)
def gridToPoint(self, gp):
x, y = gp
return ((self.Ax * x) + self.Bx, (self.Ay * y) + self.By)
def pointToGrid(self, point):
x, y = point
x = (x - self.Bx) / self.Ax
y = (y - self.By) / self.Ay
nx = int(round(x))
if nx < 0 or nx >= self.sizeX:
return None
ny = int(round(y))
if ny < 0 or ny >= self.sizeY:
return None
return (nx, ny)
def containsPoint(self, point):
gp = self.pointToGrid(point)
if gp is None:
return False
x, y = gp
return (self.grid[y, x] == 0)
def containsObject(self, obj):
# TODO improve this procedure!
# Fast check
for c in obj.corners:
if not self.containsPoint(c):
return False
# Slow check
gps = [self.pointToGrid(corner) for corner in obj.corners]
x, y = zip(*gps)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
p = self.gridToPoint((x, y))
if self.grid[y, x] == 1 and obj.containsPoint(p):
return False
return True
class IntersectionRegion(Region):
def __init__(self, *regions, orientation=None, sampler=None):
self.regions = tuple(regions)
if len(self.regions) < 2:
raise RuntimeError('tried to take intersection of fewer than 2 regions')
super().__init__('Intersection', *self.regions, orientation=orientation)
if sampler is None:
sampler = self.genericSampler
self.sampler = sampler
def sampleGiven(self, value):
regs = [value[reg] for reg in self.regions]
# Now that regions have been sampled, attempt intersection again in the hopes
# there is a specialized sampler to handle it (unless we already have one)
if self.sampler is self.genericSampler:
failed = False
intersection = regs[0]
for region in regs[1:]:
intersection = intersection.intersect(region)
if isinstance(intersection, IntersectionRegion):
failed = True
break
if not failed:
intersection.orientation = value[self.orientation]
return intersection
return IntersectionRegion(*regs, orientation=value[self.orientation],
sampler=self.sampler)
def evaluateInner(self, context):
regs = (valueInContext(reg, context) for reg in self.regions)
orientation = valueInContext(self.orientation, context)
return IntersectionRegion(*regs, orientation=orientation, sampler=self.sampler)
def containsPoint(self, point):
return all(region.containsPoint(point) for region in self.regions)
def uniformPointInner(self):
return self.orient(self.sampler(self))
@staticmethod
def genericSampler(intersection):
regs = intersection.regions
point = regs[0].uniformPointInner()
for region in regs[1:]:
if not region.containsPoint(point):
raise RejectionException(
f'sampling intersection of Regions {regs[0]} and {region}')
return point
def isEquivalentTo(self, other):
if type(other) is not IntersectionRegion:
return False
return (areEquivalent(set(other.regions), set(self.regions))
and other.orientation == self.orientation)
def __str__(self):
return f'IntersectionRegion({self.regions})'
| 2.34375 | 2 |
orangery/cli/cutfill.py | mrahnis/orangery | 2 | 6682 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
import time
import json
import click
import matplotlib.pyplot as plt
import orangery as o
from orangery.cli import defaults, util
from orangery.tools.plotting import get_scale_factor
@click.command(options_metavar='<options>')
@click.argument('file1', nargs=1, type=click.Path(exists=True), metavar='<file_t0>') # help="survey representing the initial condition"
@click.argument('file2', nargs=1, type=click.Path(exists=True), metavar='<file_t1>') # help="survey representing the final condition"
@click.argument('fields', nargs=1, metavar='<fields>') # help="character string identifying the columns"
@click.argument('xs_name', nargs=1, metavar='<name>') # help="name of the cross-section to plot"
@click.option('--codes', 'codes_f', nargs=1, type=click.Path(exists=True), metavar='<codes_file>', help="JSON file representing the usage intent of a set of survey codes")
@click.option('--show/--save', is_flag=True, default=True, help="Show the plot or save to files; --show is the default")
@click.option('--summary/--no-summary', default=True, help="Print summary information; --summary is the default")
@click.option('--units', type=click.Choice(['m','sft','ft']), default='m', help="Unit to show in axis labels")
@click.option('--labels', nargs=2, metavar='<text text>', help="Labels to display in the legend")
@click.option('--exaggeration', metavar='<int>', default=3, help="Vertical exaggeration of plot")
@click.option('--scale', nargs=2, metavar='<float int>', type=click.Tuple([float, int]), default=(10, 300), help="Scale where first argument is units per-inch on the horizontal axis and second argument is output DPI")
@click.option('--close/--no-close', default=True, help="Close the line ends; --close is the default")
@click.option('--reverse', type=click.Choice(['t0','t1','tx']), help="Reverse a line or lines of section (t0=initial, t1=final, tx=both)")
@click.option('--exclude', nargs=2, type=click.Tuple([str, click.Choice(['t0','t1','tx'])]), multiple=True, metavar='<str choice>', help="Exclude a survey code from a line or lines of section (t0=initial, t1=final, tx=both)")
@click.option('--overlay', nargs=1, type=click.Path(exists=True))
@click.option('-v', '--verbose', is_flag=True, help="Enables verbose mode")
def cutfill(file1, file2, fields, xs_name, codes_f, show, summary, units, labels, exaggeration, scale, close, reverse, exclude, overlay, verbose):
"""Displays a plot of a repeat survey with cut and fill.
\b
The cutfill subcommand takes four arguments:
<file_t0> : survey data representing the initial condition in csv format
<file_t1> : survey data representing the final condition in csv format
<fields> : series of characters describing the data columns
<name> : name of cross-section to plot
Options allow to set various properties of the plot. The default is to --show the plot.
With the --save option the plot will be saved as an image along with a csv file containing
data about cross-sectional cut-and-fill areas along the line of secion.
\b
Example:
orangery cutfill file_2004.csv file_2010.csv pxyzctr XS-7 --reverse t0
"""
if verbose is True:
loglevel = 2
else:
loglevel = 0
logging.basicConfig(stream=sys.stderr, level=loglevel or logging.INFO)
# load the configuration
codes = defaults.codes.copy()
if codes_f:
user_codes = util.load_config(codes_f)
codes.update(user_codes)
# load the survey data
s1 = o.Survey(file1, fields, codes, 0)
s2 = o.Survey(file2, fields, codes, 0)
if overlay:
s3 = o.Survey(overlay, fields, codes, 0)
exclude_t0 = []
exclude_t1 = []
for code in exclude:
if code[1] in ('t0', 'tx'):
exclude_t0.append(code[0])
if code[1] in ('t1', 'tx'):
exclude_t1.append(code[0])
# select a group of points, in this case a cross section
xs_pts1 = o.group(s1.data, s1.code_table, group=xs_name, exclude=exclude_t0)
xs_pts2 = o.group(s2.data, s2.code_table, group=xs_name, exclude=exclude_t1)
# xs_pts_overlay = o.group(s3.data, s3.code_table, group=xs_name)
# get the endpoints of the group
p1, p2 = o.endpoints(xs_pts1, reverse=reverse in ('t0','tx'))
# make the sections
xs1 = o.Section(xs_pts1, p1, p2, reverse=reverse in ('t0','tx'))
xs2 = o.Section(xs_pts2, p1, p2, reverse=reverse in ('t1','tx'))
# xs_overlay = o.Section(xs_pts_overlay, p1, p2)
if labels:
label_t0 = labels[0]
label_t1 = labels[1]
label_overlay = labels[3]
elif 't' in fields:
label_t0 = (xs1.data.iloc[0]['t']).split('T')[0]
label_t1 = (xs2.data.iloc[0]['t']).split('T')[0]
# label_overlay = (xs_overlay.data.iloc[0]['t']).split('T')[0]
else:
label_t0 = 't0'
label_t1 = 't1'
# label_overlay = 'pre-restoration'
# calculate the change
chg = o.Change(xs1, xs2, close_ends=close)
if summary:
chg.summarize()
import matplotlib
font = {'family':'normal','weight':'normal','size':16}
matplotlib.rc('font', **font)
# plot the change between two cross-sections
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect(exaggeration)
# xs_overlay.plot(ax=ax, marker='None', linestyle='-', linewidth=3, color='tab:red', label=label_overlay)
xs1.plot(ax=ax, marker='o', markersize=4, markerfacecolor='white', markeredgecolor='black', linestyle='-', color='gray', label=label_t0)
xs2.plot(ax=ax, marker='o', markersize=4, markerfacecolor='black', markeredgecolor='black', linestyle='-', color='black', label=label_t1)
chg.polygon_plot(ax=ax, fill_label='Fill', cut_label='Cut')
chg.annotate_plot(ax=ax)
ax.set_xlabel('Distance ({0})'.format(units))
ax.set_ylabel('Elevation ({0}), {1}x exaggeration'.format(units, exaggeration))
plt.legend(loc='best')
plt.title('Cross-section {0}'.format(xs_name))
if show:
plt.show()
else:
fname = xs_name + '-' + label_t0.replace('-', '') + '-' + label_t1.replace('-', '')
scale_factor = get_scale_factor(fig, ax, scale[0])
dims = fig.get_size_inches()
fig.set_size_inches(dims[0]*scale_factor, dims[1]*scale_factor)
fig.savefig(fname+'.png', dpi=scale[1])
click.echo('Figure saved to: {}'.format(fname+'.png'))
chg.save(fname+'.csv')
click.echo('Data saved to: {}'.format(fname+'.csv'))
| 2.484375 | 2 |
src/ice_g2p/dictionaries.py | cadia-lvl/ice-g2p | 0 | 6683 | import os, sys
DICTIONARY_FILE = os.path.join(sys.prefix, 'dictionaries/ice_pron_dict_standard_clear.csv')
HEAD_FILE = os.path.join(sys.prefix, 'data/head_map.csv')
MODIFIER_FILE = os.path.join(sys.prefix, 'data/modifier_map.csv')
VOWELS_FILE = os.path.join(sys.prefix, 'data/vowels_sampa.txt')
CONS_CLUSTERS_FILE = os.path.join(sys.prefix, 'data/cons_clusters_sampa.txt')
def read_map(filename):
with open(filename) as f:
file_content = f.read().splitlines()
dict_map = {}
for line in file_content:
arr = line.split('\t')
if len(arr) > 1:
values = arr[1:]
else:
values = []
key = arr[0]
dict_map[key] = values
return dict_map
def read_dictionary(filename):
with open(filename) as f:
file_content = f.read().splitlines()
pronDict = {}
for line in file_content:
word, transcr = line.split('\t')
pronDict[word] = transcr
return pronDict
def read_list(filename):
with open(filename) as f:
file_content = f.read().splitlines()
return file_content
def get_head_map():
return read_map(HEAD_FILE)
def get_modifier_map():
return read_map(MODIFIER_FILE)
def get_dictionary():
return read_dictionary(DICTIONARY_FILE)
def get_vowels():
return read_list(VOWELS_FILE)
def get_cons_clusters():
return read_list(CONS_CLUSTERS_FILE)
| 3.109375 | 3 |
tests/test_annotations_notebook.py | jeromedockes/pylabelbuddy | 0 | 6684 | from pylabelbuddy import _annotations_notebook
def test_annotations_notebook(root, annotations_mock, dataset_mock):
nb = _annotations_notebook.AnnotationsNotebook(
root, annotations_mock, dataset_mock
)
nb.change_database()
assert nb.notebook.index(nb.notebook.select()) == 2
nb.go_to_annotations()
assert nb.notebook.index(nb.notebook.select()) == 0
| 1.804688 | 2 |
py/solns/wordSearch/wordSearch.py | zcemycl/algoTest | 1 | 6685 | <reponame>zcemycl/algoTest
class Solution:
@staticmethod
def naive(board,word):
rows,cols,n = len(board),len(board[0]),len(word)
visited = set()
def dfs(i,j,k):
idf = str(i)+','+str(j)
if i<0 or j<0 or i>cols-1 or j>rows-1 or \
board[j][i]!=word[k] or idf in visited:
return False
if k==n-1 and word[k]==board[j][i]:
return True
visited.add(idf)
if word[k]==board[j][i]:
return dfs(i+1,j,k+1) or dfs(i-1,j,k+1) or\
dfs(i,j+1,k+1) or dfs(i,j-1,k+1)
for j in range(rows):
for i in range(cols):
if board[j][i]==word[0]:
if dfs(i,j,0): return True
return False
@staticmethod
def quick(board,word):
''' Improve by,
1. Exclude set which stores visited coordinates, and use #.
2. No indicing in original word.
3. Quick exit for 4 directions.
'''
rows,cols,n = len(board),len(board[0]),len(word)
def dfs(i,j,remain):
if len(remain)==0: return True
if i<0 or j<0 or i>cols-1 or j>rows-1 or \
board[j][i]!=remain[0]: return False
board[j][i]="#"
ret = False
for rowOff,colOff in [(1,0),(-1,0),(0,1),(0,-1)]:
ret = dfs(i+colOff,j+rowOff,remain[1:])
if ret: break
board[j][i]=remain[0]
return ret
for j in range(rows):
for i in range(cols):
if board[j][i]==word[0]:
if dfs(i,j,word): return True
return False | 3.46875 | 3 |
middleware/run.py | natedogg484/react-flask-authentication | 0 | 6686 | <reponame>natedogg484/react-flask-authentication
from flask import Flask
from flask_cors import CORS
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
app = Flask(__name__)
CORS(app)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'some-secret-string'
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
db = SQLAlchemy(app)
jwt = JWTManager(app)
@app.before_first_request
def create_tables():
db.create_all()
import models, resources, views
api.add_resource(resources.UserRegistration, '/registration')
api.add_resource(resources.UserLogin, '/login')
api.add_resource(resources.UserLogoutAccess, '/logout/access')
api.add_resource(resources.UserLogoutRefresh, '/logout/refresh')
api.add_resource(resources.TokenRefresh, '/token/refresh')
api.add_resource(resources.AllUsers, '/users')
api.add_resource(resources.SecretResource, '/secret')
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return models.RevokedTokenModel.is_jti_blacklisted(jti) | 2.71875 | 3 |
Programas do Curso/Desafio 2.py | carvalhopedro22/Programas-em-python-cursos-e-geral- | 0 | 6687 | <filename>Programas do Curso/Desafio 2.py
nome = input('Qual o seu nome? ')
dia = input('Que dia do mês você nasceu? ')
mes = input('Qual o mês em que você nasceu? ')
ano = input('Qual o ano em que você nasceu? ')
print(nome, 'nasceu em', dia,'de',mes,'do ano',ano) | 4.15625 | 4 |
cmibs/cisco_vlan_membership_mib.py | prorevizor/noc | 84 | 6688 | # ----------------------------------------------------------------------
# CISCO-VLAN-MEMBERSHIP-MIB
# Compiled MIB
# Do not modify this file directly
# Run ./noc mib make-cmib instead
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# MIB Name
NAME = "CISCO-VLAN-MEMBERSHIP-MIB"
# Metadata
LAST_UPDATED = "2007-12-14"
COMPILED = "2020-01-19"
# MIB Data: name -> oid
MIB = {
"CISCO-VLAN-MEMBERSHIP-MIB::ciscoVlanMembershipMIB": "1.3.6.1.4.192.168.127.12",
"CISCO-VLAN-MEMBERSHIP-MIB::ciscoVlanMembershipMIBObjects": "1.3.6.1.4.192.168.127.12.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmps": "1.3.6.1.4.192.168.127.12.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsVQPVersion": "1.3.6.1.4.192.168.127.12.1.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsRetries": "1.3.6.1.4.192.168.127.12.1.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsReconfirmInterval": "1.3.6.1.4.192.168.127.12.1.1.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsReconfirm": "1.3.6.1.4.192.168.127.12.1.1.4",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsReconfirmResult": "1.3.6.1.4.192.168.127.12.1.1.5",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsCurrent": "1.3.6.1.4.192.168.127.12.1.1.6",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsTable": "1.3.6.1.4.192.168.127.12.1.1.7",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsEntry": "1.3.6.1.4.192.168.127.12.1.1.7.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsIpAddress": "1.3.6.1.4.192.168.127.12.1.1.7.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsPrimary": "1.3.6.1.4.192.168.127.12.1.1.7.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsRowStatus": "1.3.6.1.4.192.168.127.12.1.1.7.1.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembership": "1.3.6.1.4.192.168.127.12.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryTable": "1.3.6.1.4.192.168.127.12.1.2.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryEntry": "1.3.6.1.4.192.168.127.12.1.2.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryVlanIndex": "1.3.6.1.4.192.168.127.12.1.2.1.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryMemberPorts": "1.3.6.1.4.192.168.127.12.1.2.1.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryMember2kPorts": "1.3.6.1.4.192.168.127.12.1.2.1.1.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipTable": "1.3.6.1.4.192.168.127.12.1.2.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipEntry": "1.3.6.1.4.192.168.127.12.1.2.2.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlanType": "1.3.6.1.4.192.168.127.12.1.2.2.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlan": "1.3.6.1.4.192.168.127.12.1.2.2.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmPortStatus": "1.3.6.1.4.192.168.127.12.1.2.2.1.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlans": "1.3.6.1.4.192.168.127.12.1.2.2.1.4",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlans2k": "1.3.6.1.4.192.168.127.12.1.2.2.1.5",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlans3k": "1.3.6.1.4.192.168.127.12.1.2.2.1.6",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlans4k": "1.3.6.1.4.192.168.127.12.1.2.2.1.7",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryExtTable": "1.3.6.1.4.1.9.9.68.1.2.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryExtEntry": "1.3.6.1.4.192.168.127.12.1.2.3.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipPortRangeIndex": "1.3.6.1.4.192.168.127.12.1.2.3.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMembershipSummaryExtPorts": "1.3.6.1.4.192.168.127.12.1.2.3.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVlanCreationMode": "1.3.6.1.4.192.168.127.12.1.2.4",
"CISCO-VLAN-MEMBERSHIP-MIB::vmStatistics": "1.3.6.1.4.192.168.127.12.1.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPQueries": "1.3.6.1.4.192.168.127.12.1.3.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPResponses": "1.3.6.1.4.192.168.127.12.1.3.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsChanges": "1.3.6.1.4.192.168.127.12.1.3.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPShutdown": "1.3.6.1.4.192.168.127.12.1.3.4",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPDenied": "1.3.6.1.4.192.168.127.12.1.3.5",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPWrongDomain": "1.3.6.1.4.192.168.127.12.1.3.6",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVQPWrongVersion": "1.3.6.1.4.192.168.127.12.1.3.7",
"CISCO-VLAN-MEMBERSHIP-MIB::vmInsufficientResources": "1.3.6.1.4.192.168.127.12.1.3.8",
"CISCO-VLAN-MEMBERSHIP-MIB::vmStatus": "1.3.6.1.4.192.168.127.12.1.4",
"CISCO-VLAN-MEMBERSHIP-MIB::vmNotificationsEnabled": "1.3.6.1.4.192.168.127.12.1.4.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVoiceVlan": "1.3.6.1.4.192.168.127.12.1.5",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVoiceVlanTable": "1.3.6.1.4.192.168.127.12.1.5.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVoiceVlanEntry": "1.3.6.1.4.192.168.127.12.1.5.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVoiceVlanId": "1.3.6.1.4.192.168.127.12.1.5.1.1.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVoiceVlanCdpVerifyEnable": "1.3.6.1.4.192.168.127.12.1.5.1.1.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmNotifications": "1.3.6.1.4.192.168.127.12.2",
"CISCO-VLAN-MEMBERSHIP-MIB::vmNotificationsPrefix": "1.3.6.1.4.192.168.127.12.2.0",
"CISCO-VLAN-MEMBERSHIP-MIB::vmVmpsChange": "1.3.6.1.4.192.168.127.12.2.0.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMIBConformance": "1.3.6.1.4.192.168.127.12.3",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMIBCompliances": "1.3.6.1.4.192.168.127.12.3.1",
"CISCO-VLAN-MEMBERSHIP-MIB::vmMIBGroups": "1.3.6.1.4.192.168.127.12.3.2",
}
DISPLAY_HINTS = {}
| 1.507813 | 2 |
harbor/tests/test_unit.py | tdimnet/integrations-core | 663 | 6689 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from mock import MagicMock
from requests import HTTPError
from datadog_checks.base import AgentCheck
from datadog_checks.dev.http import MockResponse
from .common import HARBOR_COMPONENTS, HARBOR_VERSION, VERSION_1_5, VERSION_1_6, VERSION_1_8
@pytest.mark.usefixtures("patch_requests")
def test_check_health(aggregator, harbor_check, harbor_api):
base_tags = ['tag1:val1', 'tag2']
harbor_check._check_health(harbor_api, base_tags)
if harbor_api.harbor_version >= VERSION_1_8:
components = HARBOR_COMPONENTS
for c in components:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags + ['component:{}'.format(c)])
elif harbor_api.harbor_version >= VERSION_1_6:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags + ['component:chartmuseum'])
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags)
elif harbor_api.harbor_version >= VERSION_1_5:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags)
else:
aggregator.assert_service_check('harbor.status', AgentCheck.UNKNOWN, tags=base_tags)
@pytest.mark.usefixtures("patch_requests")
def test_check_registries_health(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._check_registries_health(harbor_api, tags)
tags.append('registry:demo')
aggregator.assert_service_check('harbor.registry.status', AgentCheck.OK, tags=tags)
@pytest.mark.usefixtures("patch_requests")
def test_submit_project_metrics(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_project_metrics(harbor_api, tags)
aggregator.assert_metric('harbor.projects.count', 2, tags=tags)
@pytest.mark.usefixtures("patch_requests")
def test_submit_disk_metrics(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_disk_metrics(harbor_api, tags)
aggregator.assert_metric('harbor.disk.free', 5e5, tags=tags)
aggregator.assert_metric('harbor.disk.total', 1e6, tags=tags)
@pytest.mark.usefixtures("patch_requests")
@pytest.mark.skipif(HARBOR_VERSION < VERSION_1_5, reason="The registry.read_only metric is submitted for Harbor 1.5+")
def test_submit_read_only_status(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_read_only_status(harbor_api, tags)
aggregator.assert_metric('harbor.registry.read_only', 0, tags=tags)
def test_api__make_get_request(harbor_api):
harbor_api.http = MagicMock()
harbor_api.http.get = MagicMock(return_value=MockResponse(json_data={'json': True}))
assert harbor_api._make_get_request('{base_url}/api/path') == {"json": True}
harbor_api.http.get = MagicMock(return_value=MockResponse(status_code=500))
with pytest.raises(HTTPError):
harbor_api._make_get_request('{base_url}/api/path')
def test_api__make_paginated_get_request(harbor_api):
expected_result = [{'item': i} for i in range(20)]
paginated_result = [[expected_result[i], expected_result[i + 1]] for i in range(0, len(expected_result) - 1, 2)]
values = []
for r in paginated_result:
values.append(MockResponse(json_data=r, headers={'link': 'Link: <unused_url>; rel=next; type="text/plain"'}))
values[-1].headers.pop('link')
harbor_api.http = MagicMock()
harbor_api.http.get = MagicMock(side_effect=values)
assert harbor_api._make_paginated_get_request('{base_url}/api/path') == expected_result
def test_api__make_post_request(harbor_api):
harbor_api.http = MagicMock()
harbor_api.http.post = MagicMock(return_value=MockResponse(json_data={'json': True}))
assert harbor_api._make_post_request('{base_url}/api/path') == {"json": True}
harbor_api.http.post = MagicMock(return_value=MockResponse(status_code=500))
with pytest.raises(HTTPError):
harbor_api._make_post_request('{base_url}/api/path')
| 2 | 2 |
M-SPRING/template/adapter.py | CN-UPB/SPRING | 3 | 6690 | # module for adapting templates on the fly if components are reused
# check that all reused components are defined consistently -> else: exception
def check_consistency(components):
for j1 in components:
for j2 in components: # compare all components
if j1 == j2 and j1.__dict__ != j2.__dict__: # same name and reuseID but different other attributes
raise ValueError("Inconsistent definition of reused component {}.".format(j1))
# check and return number of reuses
def reuses(component, arcs):
# count number of reuses for each port
times = set() # set => no duplicates
for k in range(component.inputs):
times.add(len([a for a in arcs if a.ends_in(k, component)]))
for k in range(component.outputs):
times.add(len([a for a in arcs if a.starts_at(k, component)]))
# check if each port was reused the same number of times (requirement/assumption)
if len(times) != 1:
raise ValueError("Not all ports of {} are (re-)used the same number of times (required).".format(component))
return times.pop()
# return adapted templates with adapted reused components and exactly one arc per port (allows proportional output)
def adapt_for_reuse(templates):
# create set of components and arcs
arcs = []
for t in templates:
arcs += t.arcs
# find reused components and adapt them
component_reuses = {} # dictionary with components-#reuses
reused_components = [] # list of all reused components (contains duplicates) for consistency check
for t in templates:
for j in t.components:
uses = reuses(j, arcs)
if uses > 1: # used by >1 => reuse
if j.source:
raise ValueError("Source component {} cannot be reused".format(j))
j.adapt(uses) # add ports and functions on the fly
component_reuses[j] = uses
reused_components.append(j)
check_consistency(reused_components) # check consistent def of reused components
# adjust arcs to use new ports
for j in component_reuses:
uses = component_reuses[j]
port_offset = 0
for t in templates:
# adjust/shift ingoing arcs by offset to correct port
arc_shifted = False
for a in t.arcs:
if a.dest == j:
a.dest_in += port_offset
arc_shifted = True
if a.source == j:
a.src_out += port_offset
arc_shifted = True
# increase the offset for the next template if an arc was shifted
if arc_shifted:
if port_offset >= uses: # arc was shifted too often: something went wrong
raise ValueError("Port offset {} too high. Should be < {} (#reuses).".format(port_offset, uses))
port_offset += 1
return templates
| 2.734375 | 3 |
column_completer.py | AllanLRH/column_completer | 0 | 6691 | <reponame>AllanLRH/column_completer
class ColumnCompleter(object):
"""Complete Pandas DataFrame column names"""
def __init__(self, df, space_filler='_', silence_warnings=False):
"""
Once instantiated with a Pandas DataFrame, it will expose the column
names as attributes which maps to their string counterparts.
Autocompletion is supported.
Spaces in the column names are by default replaced with underscores, though
it still maps to the original column names — the replacement is necessary to
conform to a valid Python syntax.
Parameters
----------
df : pd.DataFrame
DataFrame whose column names to expose.
space_filler : str, optional
String to replace spaces in collumn names, by default '_'.
silence_warnings : bool, optional
Set to True to disable warning concerning column names which start or ends
with spaces, which is hard to detect by visual inspection, by default False.
"""
super(ColumnCompleter, self).__init__()
# We copy the columns to avoid keeping old references to a DataFrame which
# would otherwise be garbage collected.
self.columns = df.columns.copy()
self.space_filler = space_filler
self.silence_warnings = silence_warnings
if not self.silence_warnings:
self._warn_about_column_names_edge_spaces()
self._set_columns()
def _warn_about_column_names_edge_spaces(self):
if not hasattr(self.columns, 'str'): # the column names are not strings
return None
if self.columns.str.startswith(' ').any():
raise Warning("The following columns starts with one or more spaces: " +
self.columns[self.columns.str.startswith(' ')])
if self.columns.str.endswith(' ').any():
raise Warning("The following columns ends with one or more spaces: " +
self.columns[self.columns.str.endswith(' ')])
def _set_columns(self):
if not hasattr(self.columns, 'str'): # the column names are not strings
self.mapping = {col: col for col in self.columns}
elif self.space_filler is None:
self.mapping = {col: col for col in self.columns if ' ' not in col}
else:
self.mapping = {col.replace(
' ', self.space_filler): col for col in self.columns}
if len(self.mapping) < len(self.columns):
raise ValueError("Using {} as a replacemnt for".format(repr(self.space_filler)) +
" spaces causes a collision of column names, please chose another.")
self.keys = self.mapping.keys()
if len(self.keys) < len(self.columns) and not self.silence_warnings:
raise Warning("Without a space_filler specified, you're only able to autocomplete " +
"{} of {} column names.".format(len(self.keys), len(self.columns)))
@staticmethod
def replace_df_column_spaces(df, rep, capatilize_first_letter=False):
"""
Return a DataFrame with the spaces in the column names replaced with a custom string.
Parameters
----------
df : pd.DataFrame
DataFrame whose columns ot rename.
rep : str
String to replace spaces with.
capatilize_first_letter : bool, optional
If True, the first letter of the rennamed columns will be capitalized, by default False.
Returns
-------
pd.DataFrame
DataFrame with renamed columns.
Raises
------
ValueError
If the renaming of the columns causes one or more column names to be identical.
"""
rename_dict = {col: col.replace(' ', rep) for col in df.columns}
if len(set(rename_dict.values())) < len(df.columns.unique()):
raise ValueError("Renaming the columns in such a way would cause a " +
"collision of column names.")
if capatilize_first_letter:
rename_dict = {k: v[0].upper() + v[1:]
for k, v in rename_dict.items()}
return df.rename(columns=rename_dict)
def __dir__(self):
return self.keys
def __getattr__(self, key):
return self.mapping[key]
| 3.265625 | 3 |
source/vsm-dashboard/vsm_dashboard/test/test_data/swift_data.py | ramkrsna/virtual-storage-manager | 172 | 6692 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vsm_dashboard.api import swift
from .utils import TestDataContainer
def data(TEST):
TEST.containers = TestDataContainer()
TEST.objects = TestDataContainer()
container_1 = swift.Container(dict(name=u"container_one\u6346"))
container_2 = swift.Container(dict(name=u"container_two\u6346"))
TEST.containers.add(container_1, container_2)
object_dict = {"name": u"test_object\u6346",
"content_type": u"text/plain",
"bytes": 128,
"last_modified": None,
"hash": u"object_hash"}
obj_dicts = [object_dict]
obj_data = "Fake Data"
for obj_dict in obj_dicts:
swift_object = swift.StorageObject(obj_dict,
container_1.name,
data=obj_data)
TEST.objects.add(swift_object)
| 1.734375 | 2 |
cinder/backup/driver.py | liangintel/stx-cinder | 0 | 6693 | # Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
import abc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder import keymgr as key_manager
service_opts = [
cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
'service doing the restore supports the new version.'),
cfg.IntOpt('backup_object_number_per_notification',
default=10,
help='The number of chunks or objects, for which one '
'Ceilometer notification will be sent'),
cfg.IntOpt('backup_timer_interval',
default=120,
help='Interval, in seconds, between two progress notifications '
'reporting the backup status'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class BackupMetadataAPI(base.Base):
TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
TYPE_TAG_VOL_META = 'volume-metadata'
TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
def __init__(self, context, db=None):
super(BackupMetadataAPI, self).__init__(db)
self.context = context
@staticmethod
def _is_serializable(value):
"""Returns True if value is serializable."""
try:
jsonutils.dumps(value)
except TypeError:
LOG.info("Value with type=%s is not serializable",
type(value))
return False
return True
def _save_vol_base_meta(self, container, volume_id):
"""Save base volume metadata to container.
This will fetch all fields from the db Volume object for volume_id and
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", key)
continue
# Copy the encryption key UUID for backup
if key is 'encryption_key_id' and value is not None:
km = key_manager.API(CONF)
value = km.store(self.context, km.get(self.context, value))
LOG.debug("Copying encryption key UUID for backup.")
container[type_tag][key] = value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
This will fetch all fields from the db VolumeMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", entry)
continue
container[type_tag][entry] = meta[entry]
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
This will fetch all fields from the db VolumeGlanceMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info("Unable to serialize field '%s' - "
"excluding from backup", entry)
continue
container[type_tag][entry.key] = entry.value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields, excludes=None):
"""Returns set of metadata restricted to required fields.
If fields is empty list, the full set is returned.
:param metadata: master set of metadata
:param fields: list of fields we want to extract
:param excludes: fields to be excluded
:returns: filtered metadata
"""
if not fields:
return metadata
if not excludes:
excludes = []
subset = {}
for field in fields:
if field in metadata and field not in excludes:
subset[field] = metadata[field]
else:
LOG.debug("Excluding field '%s'", field)
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug("Restoring volume base metadata")
excludes = []
# Ignore unencrypted backups.
key = 'encryption_key_id'
if key in fields and key in metadata and metadata[key] is not None:
self._restore_vol_encryption_meta(volume_id,
metadata['volume_type_id'])
# NOTE(dosaboy): if the target volume looks like it was auto-created
# as part of this restore operation and we have a name to restore
# then apply the name to the target volume. However, if that target
# volume already existed and it has a name or we do not have a name to
# restore, then ignore this key. This is intended to be a less drastic
# solution than commit 7ee80f7.
key = 'display_name'
if key in fields and key in metadata:
target_vol = self.db.volume_get(self.context, volume_id)
name = target_vol.get(key, '')
if (not metadata.get(key) or name and
not name.startswith('restore_backup_')):
excludes.append(key)
excludes.append('display_description')
metadata = self._filter(metadata, fields, excludes=excludes)
self.db.volume_update(self.context, volume_id, metadata)
def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
"""Restores the volume_type_id for encryption if needed.
Only allow restoration of an encrypted backup if the destination
volume has the same volume type as the source volume. Otherwise
encryption will not work. If volume types are already the same,
no action is needed.
"""
dest_vol = self.db.volume_get(self.context, volume_id)
if dest_vol['volume_type_id'] != src_volume_type_id:
LOG.debug("Volume type id's do not match.")
# If the volume types do not match, and the destination volume
# does not have a volume type, force the destination volume
# to have the encrypted volume type, provided it still exists.
if dest_vol['volume_type_id'] is None:
try:
self.db.volume_type_get(
self.context, src_volume_type_id)
except exception.VolumeTypeNotFound:
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
msg = _("The source volume type '%s' is not "
"available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
"will become the volume type of the source "
"volume.")
self.db.volume_update(self.context, volume_id,
{'volume_type_id': src_volume_type_id})
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed.")
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,
'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
def _restore_vol_glance_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeGlanceMetadata object for provided fields.
First delete any existing metadata then save new values.
"""
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
for key, value in metadata.items():
self.db.volume_glance_metadata_create(self.context,
volume_id,
key, value)
# Now mark the volume as bootable
self.db.volume_update(self.context, volume_id,
{'bootable': True})
def _v1_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def _v2_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description', 'encryption_key_id']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def get(self, volume_id):
"""Get volume metadata.
Returns a json-encoded dict containing all metadata and the restore
version i.e. the version used to decide what actually gets restored
from this container when doing a backup restore.
"""
container = {'version': CONF.backup_metadata_version}
self._save_vol_base_meta(container, volume_id)
self._save_vol_meta(container, volume_id)
self._save_vol_glance_meta(container, volume_id)
if container:
return jsonutils.dumps(container)
else:
return None
def put(self, volume_id, json_metadata):
"""Restore volume metadata to a volume.
The json container should contain a version that is supported here.
"""
meta_container = jsonutils.loads(json_metadata)
version = meta_container['version']
if version == 1:
factory = self._v1_restore_factory()
elif version == 2:
factory = self._v2_restore_factory()
else:
msg = (_("Unsupported backup metadata version (%s)") % (version))
raise exception.BackupMetadataUnsupportedVersion(msg)
for type in factory:
func = factory[type][0]
fields = factory[type][1]
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
LOG.debug("No metadata of type '%s' to restore", type)
@six.add_metaclass(abc.ABCMeta)
class BackupDriver(base.Base):
def __init__(self, context, db=None):
super(BackupDriver, self).__init__(db)
self.context = context
self.backup_meta_api = BackupMetadataAPI(context, db)
# This flag indicates if backup driver supports force
# deletion. So it should be set to True if the driver that inherits
# from BackupDriver supports the force deletion function.
self.support_force_delete = False
def get_metadata(self, volume_id):
return self.backup_meta_api.get(volume_id)
def put_metadata(self, volume_id, json_metadata):
self.backup_meta_api.put(volume_id, json_metadata)
@abc.abstractmethod
def backup(self, backup, volume_file, backup_metadata=False):
"""Start a backup of a specified volume.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def restore(self, backup, volume_id, volume_file):
"""Restore a saved backup.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def delete_backup(self, backup):
"""Delete a saved backup."""
return
def export_record(self, backup):
"""Export driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
and return it here as a dictionary so it can be serialized into a
string.
Default backup driver implementation has no extra information.
:param backup: backup object to export
:returns: driver_info - dictionary with extra information
"""
return {}
def import_record(self, backup, driver_info):
"""Import driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
since it will be called with the extra information that was provided by
export_record when exporting the backup.
Default backup driver implementation does nothing since it didn't
export any specific data in export_record.
:param backup: backup object to export
:param driver_info: dictionary with driver specific backup record
information
:returns: nothing
"""
return
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed."""
return
@six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver):
@abc.abstractmethod
def verify(self, backup):
"""Verify that the backup exists on the backend.
Verify that the backup is OK, possibly following an import record
operation.
:param backup: backup id of the backup to verify
:raises InvalidBackup, NotImplementedError:
"""
return
| 1.570313 | 2 |
__init__.py | ENDERZOMBI102/chained | 0 | 6694 | <reponame>ENDERZOMBI102/chained<filename>__init__.py<gh_stars>0
from .chainOpen import chainOpen
__all__ = [
'chainOpen'
] | 1.03125 | 1 |
code/reasoningtool/tests/QuerySciGraphTests.py | andrewsu/RTX | 31 | 6695 | import unittest
from QuerySciGraph import QuerySciGraph
class QuerySciGraphTestCase(unittest.TestCase):
def test_get_disont_ids_for_mesh_id(self):
disont_ids = QuerySciGraph.get_disont_ids_for_mesh_id('MESH:D005199')
known_ids = {'DOID:13636'}
self.assertSetEqual(disont_ids, known_ids)
def test_query_sub_phenotypes_for_phenotype(self):
sub_phenotypes = QuerySciGraph.query_sub_phenotypes_for_phenotype("HP:0000107") # Renal cyst
known_phenotypes = {'HP:0100877': 'Renal diverticulum',
'HP:0000108': 'Renal corticomedullary cysts',
'HP:0000803': 'Renal cortical cysts',
'HP:0000003': 'Multicystic kidney dysplasia',
'HP:0008659': 'Multiple small medullary renal cysts',
'HP:0005562': 'Multiple renal cysts',
'HP:0000800': 'Cystic renal dysplasia',
'HP:0012581': 'Solitary renal cyst'}
self.assertDictEqual(sub_phenotypes, known_phenotypes)
if __name__ == '__main__':
unittest.main()
| 2.765625 | 3 |
ledis/cli.py | gianghta/Ledis | 0 | 6696 | from typing import Any
from ledis import Ledis
from ledis.exceptions import InvalidUsage
class CLI:
__slots__ = {"ledis", "commands"}
def __init__(self):
self.ledis = Ledis()
self.commands = {
"set": self.ledis.set,
"get": self.ledis.get,
"sadd": self.ledis.sadd,
"srem": self.ledis.srem,
"smembers": self.ledis.smembers,
"sinter": self.ledis.sinter,
"keys": self.ledis.keys,
"del": self.ledis.delete,
"expire": self.ledis.expire,
"ttl": self.ledis.ttl,
"save": self.ledis.save,
"restore": self.ledis.restore,
}
def call(self, query: str) -> Any:
if " " in query:
command, data = query.split(" ", 1)
data = data.split()
else:
command = query
data = []
if command.lower() not in self.commands:
allowed_commands = ", ".join(key.upper() for key in self.commands)
raise InvalidUsage(
f"Command '{command}' is invalid. "
f"Allowed commands are {allowed_commands}."
)
try:
return self.commands[command.lower()](*data)
except TypeError:
raise InvalidUsage("Invalid command format")
| 2.84375 | 3 |
ClosedLoopTF.py | nazhanshaberi/miniature-octo-barnacle | 0 | 6697 | #group 1: Question 1(b)
# A control system for positioning the head of a laser printer has the closed loop transfer function:
# !pip install control
import matplotlib.pyplot as plt
import control
a=10 #Value for a
b=50 #value for b
sys1 = control.tf(20*b,[1,20+a,b+20*a,20*b])
print('3rd order system transfer function T1(s)=',sys1)
sys2=control.tf(b,[1,a,b])
print('2nd order system transfer funtion T2(s)',sys2)
value = sys1.pole()
list_of_poles = [pole.round(2) for pole in value]
print('poles',list_of_poles)
y1=control.step_response(sys1)
y2=control.step_response(sys2)
plt.plot(y1[0],y1[1],'r--', label='3rd order actual system')
plt.plot(y2[0],y2[1],'g', label='2nd order approximation system')
plt.legend()
plt.grid()
plt.xlabel('time (s)')
plt.ylabel('step response y(t)')
plt.title('step response comparison of 3rd and 2nd order system')
plt.show()
| 3.734375 | 4 |
example_project/test_messages/bbcode_tags.py | bastiedotorg/django-precise-bbcode | 30 | 6698 | import re
from precise_bbcode.bbcode.tag import BBCodeTag
from precise_bbcode.tag_pool import tag_pool
color_re = re.compile(r'^([a-z]+|#[0-9abcdefABCDEF]{3,6})$')
class SubTag(BBCodeTag):
name = 'sub'
def render(self, value, option=None, parent=None):
return '<sub>%s</sub>' % value
class PreTag(BBCodeTag):
name = 'pre'
render_embedded = False
def render(self, value, option=None, parent=None):
return '<pre>%s</pre>' % value
class SizeTag(BBCodeTag):
name = 'size'
definition_string = '[size={RANGE=4,7}]{TEXT}[/size]'
format_string = '<span style="font-size:{RANGE=4,7}px;">{TEXT}</span>'
class FruitTag(BBCodeTag):
name = 'fruit'
definition_string = '[fruit]{CHOICE=tomato,orange,apple}[/fruit]'
format_string = '<h5>{CHOICE=tomato,orange,apple}</h5>'
class PhoneLinkTag(BBCodeTag):
name = 'phone'
definition_string = '[phone]{PHONENUMBER}[/phone]'
format_string = '<a href="tel:{PHONENUMBER}">{PHONENUMBER}</a>'
def render(self, value, option=None, parent=None):
href = 'tel:{}'.format(value)
return '<a href="{0}">{0}</a>'.format(href, value)
class StartsWithATag(BBCodeTag):
name = 'startswitha'
definition_string = '[startswitha]{STARTSWITH=a}[/startswitha]'
format_string = '<span>{STARTSWITH=a}</span>'
class RoundedBBCodeTag(BBCodeTag):
name = 'rounded'
class Options:
strip = False
def render(self, value, option=None, parent=None):
if option and re.search(color_re, option) is not None:
return '<div class="rounded" style="border-color:{};">{}</div>'.format(option, value)
return '<div class="rounded">{}</div>'.format(value)
tag_pool.register_tag(SubTag)
tag_pool.register_tag(PreTag)
tag_pool.register_tag(SizeTag)
tag_pool.register_tag(FruitTag)
tag_pool.register_tag(PhoneLinkTag)
tag_pool.register_tag(StartsWithATag)
tag_pool.register_tag(RoundedBBCodeTag)
| 2.53125 | 3 |
tests/test_vmtkScripts/test_vmtksurfacescaling.py | ramtingh/vmtk | 0 | 6699 | ## Program: VMTK
## Language: Python
## Date: January 10, 2018
## Version: 1.4
## Copyright (c) <NAME>, <NAME>, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## <NAME> (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtksurfacescaling as scaling
def test_isotropic_scale(aorta_surface, compare_surfaces):
name = __name__ + '_test_isotropic_scale.vtp'
scaler = scaling.vmtkSurfaceScaling()
scaler.Surface = aorta_surface
scaler.ScaleFactor = 2
scaler.Execute()
assert compare_surfaces(scaler.Surface, name, tolerance=1.0) == True
@pytest.mark.parametrize('xfactor,yfactor,zfactor,paramid', [
(2, None, None, '0'),
(None, 2, None, '1'),
(None, None, 2, '2'),
(2, 2, None, '3'),
(2, None, 2, '4'),
(None, 2, 2, '5'),
])
def test_xyz_scale_factors(aorta_surface, compare_surfaces, xfactor,
yfactor, zfactor, paramid):
name = __name__ + '_test_xyz_scale_factors_' + paramid + '.vtp'
scaler = scaling.vmtkSurfaceScaling()
scaler.Surface = aorta_surface
scaler.ScaleFactorX = xfactor
scaler.ScaleFactorY = yfactor
scaler.ScaleFactorZ = zfactor
scaler.Execute()
assert compare_surfaces(scaler.Surface, name, tolerance=1.0) == True
| 2 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.