max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
simba/ROI_multiply.py | KonradDanielewski/simba | 172 | 11041 | <filename>simba/ROI_multiply.py
import glob
import pandas as pd
from configparser import ConfigParser
import os
from simba.drop_bp_cords import *
def multiplyFreeHand(inifile, currVid):
_, CurrVidName, ext = get_fn_ext(currVid)
config = ConfigParser()
configFile = str(inifile)
config.read(configFile)
projectPath = config.get('General settings', 'project_path')
videoPath = os.path.join(projectPath, 'videos')
ROIcoordinatesPath = os.path.join(projectPath, 'logs', 'measures', 'ROI_definitions.h5')
try:
rectanglesInfo = pd.read_hdf(ROIcoordinatesPath, key='rectangles')
circleInfo = pd.read_hdf(ROIcoordinatesPath, key='circleDf')
polygonInfo = pd.read_hdf(ROIcoordinatesPath, key='polygons')
rectangularDf = rectanglesInfo.loc[rectanglesInfo['Video'] == str(CurrVidName)]
circleDf = circleInfo.loc[circleInfo['Video'] == str(CurrVidName)]
polygonDf = polygonInfo.loc[polygonInfo['Video'] == str(CurrVidName)]
ROIdefExist = True
except FileNotFoundError:
ROIdefExist = False
print('Cannot apply to all: no ROI definitions exists')
if ROIdefExist is True:
if (len(rectangularDf) == 0 and len(circleDf) == 0 and len(polygonDf) == 0):
print('Cannot apply ROIs to all: no records exist for ' + str(CurrVidName))
else:
videofilesFound = glob.glob(videoPath + '/*.mp4') + glob.glob(videoPath + '/*.avi')
duplicatedRec, duplicatedCirc, duplicatedPoly = (rectangularDf.copy(), circleDf.copy(), polygonDf.copy())
for vids in videofilesFound:
_, CurrVidName, ext = get_fn_ext(vids)
duplicatedRec['Video'], duplicatedCirc['Video'], duplicatedPoly['Video'] = (CurrVidName, CurrVidName, CurrVidName)
rectangularDf = rectangularDf.append(duplicatedRec, ignore_index=True)
circleDf = circleDf.append(duplicatedCirc, ignore_index=True)
polygonDf = polygonDf.append(duplicatedPoly, ignore_index=True)
rectangularDf = rectangularDf.drop_duplicates(subset=['Video', 'Name'], keep="first")
circleDf = circleDf.drop_duplicates(subset=['Video', 'Name'], keep="first")
polygonDf = polygonDf.drop_duplicates(subset=['Video', 'Name'], keep="first")
store = pd.HDFStore(ROIcoordinatesPath, mode='w')
store['rectangles'] = rectangularDf
store['circleDf'] = circleDf
store['polygons'] = polygonDf
store.close()
print('ROI(s) for ' + CurrVidName + ' applied to all videos')
print('Next, click on "draw" to modify ROI location(s) or click on "reset" to remove ROI drawing(s)')
|
paas-ce/paas/esb/lib/redis_rate_limit/ratelimit.py | renmcc/bk-PaaS | 767 | 11077 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
"""A distributed rate limiter rely on redis
based on `token bucket <https://en.wikipedia.org/wiki/Token_bucket>` algorithm
Usage
~~~~~
.. code-block:: python
# Init a redis connection pool
import redis
redisdb = redis.Redis()
rate = RateLimiter(redisdb, identifier='ip=127.0.0.1 path=/get_user_info/')
# Allow 10 requests every 1 minute
# period also accepts seconds/minutes/hours/days as key
rate.add_rule(tokens=10, period={'minute': 1})
# You could add multiple rules for on limiter
# rate.add_rule(tokens=200, period={'hour': 1})
print rate.acquire()
# returns {'allowed': True, 'remaining_tokens': 9.0}
"""
import time
import logging
from redis import WatchError
logger = logging.getLogger('root')
class BaseRateLimiter(object):
def __init__(self, redisdb, identifier, namespace='', tokens=None, period=None):
"""Init a RateLimiter class
:param redisdb: a `redis.Redis` instance
:param str identifier: identifier for the limiter, such as an user_id etc.
:param str namespace: namespace for redis keys
:param int tokens: maxium tokens for one time period
:param dict period: dict, time period, such as {'minutes': 10}
"""
self.redisdb = redisdb
self.identifier = identifier
self.namespace = namespace
self.rules = []
# Add rule
if tokens is not None and period:
self.add_rule(tokens, period)
self.prepare()
def prepare(self):
"""Prepare to work
"""
pass
def add_rule(self, tokens, period):
"""Add multiple rules for this limiter, see `__init__` for parameter details
"""
rule = Rule(tokens, Rule.period_to_seonds(period))
self.rules.append(rule)
def acquire(self, tokens=1):
"""Acquire for a single request
:param int tokens: tokens to consume for this request, default to 1
"""
if not self.rules:
return {'allowed': True, 'remaining_tokens': 0}
logger.debug('Start acquiring tokens by given rules, this operation may have several '
'communications with redis.')
rets = []
for rule in self.rules:
logger.debug('Acquiring by single rule, rule=%s tokens=%s', rule, tokens)
ret = self.acquire_by_single_rule(rule, tokens)
logger.debug('Acquiring finished, result=%s', ret)
if not ret['allowed']:
logger.debug('Acquiring denied by given rule, rule=%s.', rule)
return ret
rets.append(ret)
logger.debug('Acquiring successed.')
return {
'allowed': True,
'remaining_tokens': min(x['remaining_tokens'] for x in rets)
}
class RateLimiter(BaseRateLimiter):
"""Rate limiter class
"""
def acquire_by_single_rule(self, rule, tokens=1):
"""Acquire an request quota from limiter
:param rule: `Rule` object
:param int tokens: tokens to be consumed, default 1
:returns: a dict of `allowed` and `remaining_tokens`
- allowed: wheather this request is allowed
- remaining_tokens: remaining_tokens for this rule's period
"""
rk_tokens = 'rlim::%s::tokens::%s::r%s' % (self.namespace, self.identifier, rule.to_string())
rk_last_ts = 'rlim::%s::last_ts::%s::r%s' % (self.namespace, self.identifier, rule.to_string())
rule_ttl_seconds = rule.period_seconds + 10
try:
rv_last_ts = float(self.redisdb.get(rk_last_ts))
rv_tokens = float(self.redisdb.get(rk_tokens))
except Exception:
# Inintilize values if not exists
rv_last_ts = time.time()
rv_tokens = rule.tokens
self.redisdb.set(rk_tokens, rv_tokens, ex=rule_ttl_seconds)
self.redisdb.set(rk_last_ts, '%.3f' % rv_last_ts, ex=rule_ttl_seconds)
# Add fresh tokens since last timestamp
with self.redisdb.pipeline() as pipe:
pipe.watch(rk_last_ts)
# Float precision may cause this value negative
# Add token by passed time
senconds_passed = max(time.time() - rv_last_ts, 0)
fresh_tokens = rule.fresh_tokens_by_seconds(senconds_passed)
remaining_tokens = rv_tokens
# Only add fresh token when it's greater than 1
# Passed time maybe less than 1, fresh_token more than 1
if fresh_tokens >= 1 and remaining_tokens < rule.tokens:
# Never add let tokens more than rule.tokens
fresh_tokens = min(fresh_tokens, rule.tokens - remaining_tokens)
pipe.multi()
pipe.incrbyfloat(rk_tokens, fresh_tokens)
pipe.expire(rk_tokens, rule_ttl_seconds)
pipe.set(rk_last_ts, '%.3f' % time.time(), ex=rule_ttl_seconds)
# Ignore WatchError
try:
pipe.execute()
except WatchError:
pass
# Remove tokens, if tokens to consume are bigger than remaining tokens, do nothing
# and return Flase
remaining_tokens = self.redisdb.incrbyfloat(rk_tokens, -tokens)
over_limit = False
if remaining_tokens < 0:
remaining_tokens = self.redisdb.incrbyfloat(rk_tokens, tokens)
over_limit = True
return {
'allowed': not over_limit,
'remaining_tokens': max(remaining_tokens, 0)
}
class SimpleLimiter(BaseRateLimiter):
def prepare(self):
self.simple_incr = self.redisdb.register_script('''\
local current
current = redis.call("incr", KEYS[1])
if tonumber(current) == 1 then
redis.call("expire", KEYS[1], ARGV[1])
end
return current''')
def acquire_by_single_rule(self, rule, tokens=1):
"""Acquire an request quota from limiter
:param rule: `Rule` object
:param int tokens: tokens to be consumed, default 1
:returns: a dict of `allowed` and `remaining_tokens`
- allowed: wheather this request is allowed
- remaining_tokens: remaining_tokens for this rule's period
"""
# TODO: Should we use ( current timestamp / period_seconds ) as part of the redis key?
rk_counter = 'rlim::%s::scounter::%s::r%s' % (self.namespace, self.identifier, rule.to_string())
old_cnt = self.redisdb.get(rk_counter)
if old_cnt is not None and int(old_cnt) >= rule.tokens:
return {
'allowed': False,
'remaining_tokens': 0.0
}
new_cnt = self.simple_incr(keys=[rk_counter], args=[rule.period_seconds])
return {
'allowed': True,
'remaining_tokens': max(0, rule.tokens - new_cnt)
}
class Rule(object):
"""Rule class for RateLimiter"""
time_unit_to_seconds = {
'second': 1,
'minute': 60,
'hour': 3600,
'day': 3600 * 24,
}
@classmethod
def period_to_seonds(cls, period):
for unit, seconds in cls.time_unit_to_seconds.items():
if unit in period:
period_seconds = period[unit] * seconds
break
else:
raise ValueError(('Invalid period %s given, should be '
'{"second/minute/hour/day": NUMBER}') % period)
return period_seconds
def __init__(self, tokens, period_seconds):
self.tokens = tokens
# Precision of seconds only to second
self.period_seconds = int(period_seconds)
if tokens < 0:
logger.warn('Will not allow any acquire because given tokens < 0')
def to_string(self):
return "%s_%s" % (self.tokens, self.period_seconds)
def fresh_tokens_by_seconds(self, seconds):
return int(self.rate_per_seconds * seconds)
@property
def rate_per_seconds(self):
return self.tokens / float(self.period_seconds)
def __repr__(self):
return '<Rule %s>' % self.to_string()
|
plugins/Autocomplete/plugin.py | mogad0n/Limnoria | 476 | 11127 | <reponame>mogad0n/Limnoria
###
# Copyright (c) 2020-2021, The Limnoria Contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot import conf, ircutils, ircmsgs, callbacks
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization("Autocomplete")
REQUEST_TAG = "+draft/autocomplete-request"
RESPONSE_TAG = "+draft/autocomplete-response"
def _commonPrefix(L):
"""Takes a list of lists, and returns their longest common prefix."""
assert L
if len(L) == 1:
return L[0]
for n in range(1, max(map(len, L)) + 1):
prefix = L[0][:n]
for item in L[1:]:
if prefix != item[:n]:
return prefix[0:-1]
assert False
def _getAutocompleteResponse(irc, msg, payload):
"""Returns the value of the +draft/autocomplete-response tag for the given
+draft/autocomplete-request payload."""
tokens = callbacks.tokenize(
payload, channel=msg.channel, network=irc.network
)
normalized_payload = " ".join(tokens)
candidate_commands = _getCandidates(irc, normalized_payload)
if len(candidate_commands) == 0:
# No result
return None
elif len(candidate_commands) == 1:
# One result, return it directly
commands = candidate_commands
else:
# Multiple results, return only the longest common prefix + one word
tokenized_candidates = [
callbacks.tokenize(c, channel=msg.channel, network=irc.network)
for c in candidate_commands
]
common_prefix = _commonPrefix(tokenized_candidates)
words_after_prefix = {
candidate[len(common_prefix)] for candidate in tokenized_candidates
}
commands = [
" ".join(common_prefix + [word]) for word in words_after_prefix
]
# strip what the user already typed
assert all(command.startswith(normalized_payload) for command in commands)
normalized_payload_length = len(normalized_payload)
response_items = [
command[normalized_payload_length:] for command in commands
]
return "\t".join(sorted(response_items))
def _getCandidates(irc, normalized_payload):
"""Returns a list of commands starting with the normalized_payload."""
candidates = set()
for cb in irc.callbacks:
cb_commands = cb.listCommands()
# copy them with the plugin name (optional when calling a command)
# at the beginning
plugin_name = cb.canonicalName()
cb_commands += [plugin_name + " " + command for command in cb_commands]
candidates |= {
command
for command in cb_commands
if command.startswith(normalized_payload)
}
return candidates
class Autocomplete(callbacks.Plugin):
"""Provides command completion for IRC clients that support it."""
def _enabled(self, irc, msg):
return (
conf.supybot.protocols.irc.experimentalExtensions()
and self.registryValue("enabled", msg.channel, irc.network)
)
def doTagmsg(self, irc, msg):
if REQUEST_TAG not in msg.server_tags:
return
if "msgid" not in msg.server_tags:
return
if not self._enabled(irc, msg):
return
msgid = msg.server_tags["msgid"]
text = msg.server_tags[REQUEST_TAG]
# using callbacks._addressed instead of callbacks.addressed, as
# callbacks.addressed would tag the m
payload = callbacks._addressed(irc, msg, payload=text)
if not payload:
# not addressed
return
# marks used by '_addressed' are usually prefixes (char, string,
# nick), but may also be suffixes (with
# supybot.reply.whenAddressedBy.nick.atEnd); but there is no way to
# have it in the middle of the message AFAIK.
assert payload in text
if not text.endswith(payload):
# If there is a suffix, it means the end of the text is used to
# address the bot, so it can't be a method to be completed.
return
autocomplete_response = _getAutocompleteResponse(irc, msg, payload)
if not autocomplete_response:
return
target = msg.channel or ircutils.nickFromHostmask(msg.prefix)
irc.queueMsg(
ircmsgs.IrcMsg(
server_tags={
"+draft/reply": msgid,
RESPONSE_TAG: autocomplete_response,
},
command="TAGMSG",
args=[target],
)
)
Class = Autocomplete
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
enaml/qt/qt_timer.py | xtuzy/enaml | 1,080 | 11145 | <reponame>xtuzy/enaml
#------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed
from enaml.widgets.timer import ProxyTimer
from .QtCore import QTimer
from .qt_toolkit_object import QtToolkitObject
class QtTimer(QtToolkitObject, ProxyTimer):
""" A Qt implementation of an Enaml ProxyTimer.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QTimer)
#--------------------------------------------------------------------------
# Initialization
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying timer object.
"""
self.widget = QTimer()
def init_widget(self):
""" Initialize the widget.
"""
super(QtTimer, self).init_widget()
d = self.declaration
self.set_interval(d.interval)
self.set_single_shot(d.single_shot)
self.widget.timeout.connect(self.on_timeout)
def destroy(self):
""" A reimplemented destructor.
This stops the timer before invoking the superclass destructor.
"""
self.widget.stop()
super(QtTimer, self).destroy()
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def on_timeout(self):
""" Handle the timeout signal for the timer.
"""
d = self.declaration
if d is not None:
d.timeout()
#--------------------------------------------------------------------------
# ProxyTimer API
#--------------------------------------------------------------------------
def set_interval(self, interval):
""" Set the interval on the timer.
"""
self.widget.setInterval(interval)
def set_single_shot(self, single_shot):
""" Set the single shot flag on the timer.
"""
self.widget.setSingleShot(single_shot)
def start(self):
""" Start or restart the timer.
"""
self.widget.start()
def stop(self):
""" Stop the timer.
"""
self.widget.stop()
def is_running(self):
""" Get whether or not the timer is running.
"""
return self.widget.isActive()
|
pypy/module/cpyext/test/test_pystrtod.py | m4sterchain/mesapy | 381 | 11149 | <filename>pypy/module/cpyext/test/test_pystrtod.py
import math
from pypy.module.cpyext import pystrtod
from pypy.module.cpyext.test.test_api import BaseApiTest, raises_w
from rpython.rtyper.lltypesystem import rffi
from rpython.rtyper.lltypesystem import lltype
from pypy.module.cpyext.pystrtod import PyOS_string_to_double
class TestPyOS_string_to_double(BaseApiTest):
def test_simple_float(self, space):
s = rffi.str2charp('0.4')
null = lltype.nullptr(rffi.CCHARPP.TO)
r = PyOS_string_to_double(space, s, null, None)
assert r == 0.4
rffi.free_charp(s)
def test_empty_string(self, space):
s = rffi.str2charp('')
null = lltype.nullptr(rffi.CCHARPP.TO)
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, null, None)
rffi.free_charp(s)
def test_bad_string(self, space):
s = rffi.str2charp(' 0.4')
null = lltype.nullptr(rffi.CCHARPP.TO)
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, null, None)
rffi.free_charp(s)
def test_overflow_pos(self, space):
s = rffi.str2charp('1e500')
null = lltype.nullptr(rffi.CCHARPP.TO)
r = PyOS_string_to_double(space, s, null, None)
assert math.isinf(r)
assert r > 0
rffi.free_charp(s)
def test_overflow_neg(self, space):
s = rffi.str2charp('-1e500')
null = lltype.nullptr(rffi.CCHARPP.TO)
r = PyOS_string_to_double(space, s, null, None)
assert math.isinf(r)
assert r < 0
rffi.free_charp(s)
def test_overflow_exc(self, space):
s = rffi.str2charp('1e500')
null = lltype.nullptr(rffi.CCHARPP.TO)
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, null, space.w_ValueError)
rffi.free_charp(s)
def test_endptr_number(self, space):
s = rffi.str2charp('0.4')
endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
r = PyOS_string_to_double(space, s, endp, None)
assert r == 0.4
endp_addr = rffi.cast(rffi.LONG, endp[0])
s_addr = rffi.cast(rffi.LONG, s)
assert endp_addr == s_addr + 3
rffi.free_charp(s)
lltype.free(endp, flavor='raw')
def test_endptr_tail(self, space):
s = rffi.str2charp('0.4 foo')
endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
r = PyOS_string_to_double(space, s, endp, None)
assert r == 0.4
endp_addr = rffi.cast(rffi.LONG, endp[0])
s_addr = rffi.cast(rffi.LONG, s)
assert endp_addr == s_addr + 3
rffi.free_charp(s)
lltype.free(endp, flavor='raw')
def test_endptr_no_conversion(self, space):
s = rffi.str2charp('foo')
endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, endp, None)
endp_addr = rffi.cast(rffi.LONG, endp[0])
s_addr = rffi.cast(rffi.LONG, s)
assert endp_addr == s_addr
rffi.free_charp(s)
lltype.free(endp, flavor='raw')
class TestPyOS_double_to_string(BaseApiTest):
def test_format_code(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(150.0, 'e', 1, 0, ptype)
assert '1.5e+02' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_precision(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(3.14159269397, 'g', 5, 0, ptype)
assert '3.1416' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_flags_sign(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(-3.14, 'g', 3, 1, ptype)
assert '-3.14' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_flags_add_dot_0(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(3, 'g', 5, 2, ptype)
assert '3.0' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_flags_alt(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(314., 'g', 3, 4, ptype)
assert '314.' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_ptype_nan(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(float('nan'), 'g', 3, 4, ptype)
assert 'nan' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_NAN == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_ptype_infinity(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(1e200 * 1e200, 'g', 0, 0, ptype)
assert 'inf' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_INFINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_ptype_null(self, api):
ptype = lltype.nullptr(rffi.INTP.TO)
r = api.PyOS_double_to_string(3.14, 'g', 3, 0, ptype)
assert '3.14' == rffi.charp2str(r)
assert ptype == lltype.nullptr(rffi.INTP.TO)
rffi.free_charp(r)
|
python/cuxfilter/tests/charts/core/test_core_non_aggregate.py | Anhmike/cuxfilter | 201 | 11163 | import pytest
import cudf
import mock
from cuxfilter.charts.core.non_aggregate.core_non_aggregate import (
BaseNonAggregate,
)
from cuxfilter.dashboard import DashBoard
from cuxfilter import DataFrame
from cuxfilter.layouts import chart_view
class TestCoreNonAggregateChart:
def test_variables(self):
bnac = BaseNonAggregate()
# BaseChart variables
assert bnac.chart_type is None
assert bnac.x is None
assert bnac.y is None
assert bnac.aggregate_fn == "count"
assert bnac.color is None
assert bnac.height == 0
assert bnac.width == 0
assert bnac.add_interaction is True
assert bnac.chart is None
assert bnac.source is None
assert bnac.source_backup is None
assert bnac.data_points == 0
assert bnac._library_specific_params == {}
assert bnac.stride is None
assert bnac.stride_type == int
assert bnac.min_value == 0.0
assert bnac.max_value == 0.0
assert bnac.x_label_map == {}
assert bnac.y_label_map == {}
assert bnac.title == ""
# test chart name setter
bnac.x = "x"
bnac.y = "y"
bnac.chart_type = "test_chart_type"
assert bnac.name == "x_y_count_test_chart_type_"
# BaseNonAggregateChart variables
assert bnac.use_data_tiles is False
assert bnac.reset_event is None
assert bnac.x_range is None
assert bnac.y_range is None
assert bnac.aggregate_col is None
def test_label_mappers(self):
bnac = BaseNonAggregate()
library_specific_params = {
"x_label_map": {"a": 1, "b": 2},
"y_label_map": {"a": 1, "b": 2},
}
bnac.library_specific_params = library_specific_params
assert bnac.x_label_map == {"a": 1, "b": 2}
assert bnac.y_label_map == {"a": 1, "b": 2}
@pytest.mark.parametrize("chart, _chart", [(None, None), (1, 1)])
def test_view(self, chart, _chart):
bnac = BaseNonAggregate()
bnac.chart = chart
bnac.width = 400
bnac.title = "test_title"
assert str(bnac.view()) == str(
chart_view(_chart, width=bnac.width, title=bnac.title)
)
def test_get_selection_geometry_callback(self):
bnac = BaseNonAggregate()
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
assert (
bnac.get_selection_geometry_callback(dashboard).__name__
== "selection_callback"
)
assert callable(type(bnac.get_selection_geometry_callback(dashboard)))
def test_box_selection_callback(self):
bnac = BaseNonAggregate()
bnac.x = "a"
bnac.y = "b"
bnac.chart_type = "temp"
self.result = None
def t_function(data, patch_update=False):
self.result = data
bnac.reload_chart = t_function
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
dashboard._active_view = bnac
class evt:
geometry = dict(x0=1, x1=2, y0=3, y1=4, type="rect")
t = bnac.get_selection_geometry_callback(dashboard)
t(evt)
assert self.result.equals(df.query("1<=a<=2 and 3<=b<=4"))
def test_lasso_election_callback(self):
bnac = BaseNonAggregate()
bnac.x = "a"
bnac.y = "b"
bnac.chart_type = "temp"
def t_function(data, patch_update=False):
self.result = data
bnac.reload_chart = t_function
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
class evt:
geometry = dict(x=[1, 1, 2], y=[1, 2, 1], type="poly")
final = True
t = bnac.get_selection_geometry_callback(dashboard)
with mock.patch("cuspatial.point_in_polygon") as pip:
pip.return_value = cudf.DataFrame(
{"selection": [True, False, True]}
)
t(evt)
assert pip.called
@pytest.mark.parametrize(
"data, _data",
[
(cudf.DataFrame(), cudf.DataFrame()),
(
cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}),
cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}),
),
],
)
def test_calculate_source(self, data, _data):
"""
Calculate source just calls to the format_source_data function
which is implemented by chart types inheriting this class.
"""
bnac = BaseNonAggregate()
self.result = None
def t_function(data, patch_update=False):
self.result = data
bnac.format_source_data = t_function
bnac.calculate_source(data)
assert self.result.equals(_data)
@pytest.mark.parametrize(
"x_range, y_range, query, local_dict",
[
(
(1, 2),
(3, 4),
"@x_min<=x<=@x_max and @y_min<=y<=@y_max",
{"x_min": 1, "x_max": 2, "y_min": 3, "y_max": 4},
),
(
(0, 2),
(3, 5),
"@x_min<=x<=@x_max and @y_min<=y<=@y_max",
{"x_min": 0, "x_max": 2, "y_min": 3, "y_max": 5},
),
],
)
def test_compute_query_dict(self, x_range, y_range, query, local_dict):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "x"
bnac.y = "y"
bnac.x_range = x_range
bnac.y_range = y_range
df = cudf.DataFrame({"x": [1, 2, 2], "y": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
bnac.compute_query_dict(
dashboard._query_str_dict, dashboard._query_local_variables_dict
)
bnac_key = (
f"{bnac.x}_{bnac.y}"
f"{'_' + bnac.aggregate_col if bnac.aggregate_col else ''}"
f"_{bnac.aggregate_fn}_{bnac.chart_type}_{bnac.title}"
)
assert dashboard._query_str_dict[bnac_key] == query
for key in local_dict:
assert (
dashboard._query_local_variables_dict[key] == local_dict[key]
)
@pytest.mark.parametrize(
"add_interaction, reset_event, event_1, event_2",
[
(True, None, "selection_callback", None),
(True, "test_event", "selection_callback", "reset_callback"),
(False, "test_event", None, "reset_callback"),
],
)
def test_add_events(self, add_interaction, reset_event, event_1, event_2):
bnac = BaseNonAggregate()
bnac.add_interaction = add_interaction
bnac.reset_event = reset_event
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
self.event_1 = None
self.event_2 = None
def t_func(fn):
self.event_1 = fn.__name__
def t_func1(event, fn):
self.event_2 = fn.__name__
bnac.add_selection_geometry_event = t_func
bnac.add_event = t_func1
bnac.add_events(dashboard)
assert self.event_1 == event_1
assert self.event_2 == event_2
def test_add_reset_event(self):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac.x_range = (0, 2)
bnac.y_range = (3, 5)
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
dashboard._active_view = bnac
def t_func1(event, fn):
fn("event")
bnac.add_event = t_func1
bnac.add_reset_event(dashboard)
assert bnac.x_range is None
assert bnac.y_range is None
def test_query_chart_by_range(self):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac_1 = BaseNonAggregate()
bnac_1.chart_type = "test"
bnac_1.x = "b"
query_tuple = (4, 5)
df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [3, 4, 5, 6]})
bnac.source = df
self.result = None
self.patch_update = None
def t_func(data, patch_update):
self.result = data
self.patch_update = patch_update
# creating a dummy reload chart fn as its not implemented in core
# non aggregate chart class
bnac.reload_chart = t_func
bnac.query_chart_by_range(
active_chart=bnac_1, query_tuple=query_tuple, datatile=None
)
assert self.result.to_string() == " a b\n1 2 4\n2 3 5"
assert self.patch_update is False
@pytest.mark.parametrize(
"new_indices, result",
[
([4, 5], " a b\n1 2 4\n2 3 5"),
([], " a b\n0 1 3\n1 2 4\n2 3 5\n3 4 6"),
([3], " a b\n0 1 3"),
],
)
def test_query_chart_by_indices(self, new_indices, result):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac_1 = BaseNonAggregate()
bnac_1.chart_type = "test"
bnac_1.x = "b"
new_indices = new_indices
df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [3, 4, 5, 6]})
bnac.source = df
self.result = None
self.patch_update = None
def t_func(data, patch_update):
self.result = data
self.patch_update = patch_update
# creating a dummy reload chart fn as its not implemented in core
# non aggregate chart class
bnac.reload_chart = t_func
bnac.query_chart_by_indices(
active_chart=bnac_1,
old_indices=[],
new_indices=new_indices,
datatile=None,
)
assert self.result.to_string() == result
assert self.patch_update is False
|
recogym/envs/session.py | philomenec/reco-gym | 413 | 11165 | <reponame>philomenec/reco-gym
class Session(list):
"""Abstract Session class"""
def to_strings(self, user_id, session_id):
"""represent session as list of strings (one per event)"""
user_id, session_id = str(user_id), str(session_id)
session_type = self.get_type()
strings = []
for event, product in self:
columns = [user_id, session_type, session_id, event, str(product)]
strings.append(','.join(columns))
return strings
def get_type(self):
raise NotImplemented
class OrganicSessions(Session):
def __init__(self):
super(OrganicSessions, self).__init__()
def next(self, context, product):
self.append(
{
't': context.time(),
'u': context.user(),
'z': 'pageview',
'v': product
}
)
def get_type(self):
return 'organic'
def get_views(self):
return [p for _, _, e, p in self if e == 'pageview']
|
examples/plots/warmup_schedule.py | shuoyangd/pytorch_warmup | 170 | 11184 | import argparse
import matplotlib.pyplot as plt
import torch
from pytorch_warmup import *
def get_rates(warmup_cls, beta2, max_step):
rates = []
p = torch.nn.Parameter(torch.arange(10, dtype=torch.float32))
optimizer = torch.optim.Adam([{'params': p}], lr=1.0, betas=(0.9, beta2))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda step: 1.0)
warmup_scheduler = warmup_cls(optimizer)
for step in range(1, max_step+1):
rates.append(optimizer.param_groups[0]['lr'])
optimizer.zero_grad()
optimizer.step()
lr_scheduler.step()
warmup_scheduler.dampen()
return rates
parser = argparse.ArgumentParser(description='Warmup schedule')
parser.add_argument('--output', type=str, default='none',
choices=['none', 'png', 'pdf'],
help='Output file type (default: none)')
args = parser.parse_args()
beta2 = 0.999
max_step = 3000
plt.plot(range(1, max_step+1), get_rates(RAdamWarmup, beta2, max_step), label='RAdam')
plt.plot(range(1, max_step+1), get_rates(UntunedExponentialWarmup, beta2, max_step), label='Untuned Exponential')
plt.plot(range(1, max_step+1), get_rates(UntunedLinearWarmup, beta2, max_step), label='Untuned Linear')
plt.legend()
plt.title('Warmup Schedule')
plt.xlabel('Iteration')
plt.ylabel(r'Warmup factor $(\omega_t)$')
if args.output == 'none':
plt.show()
else:
plt.savefig(f'warmup_schedule.{args.output}')
|
utils/data_processing.py | LisaAnne/LocalizingMoments | 157 | 11206 | import numpy as np
import sys
import os
sys.path.append('utils/')
from config import *
from utils import *
sys.path.append(pycaffe_dir)
import time
import pdb
import random
import pickle as pkl
import caffe
from multiprocessing import Pool
from threading import Thread
import random
import h5py
import itertools
import math
import re
glove_dim = 300
glove_path = 'data/glove.6B.%dd.txt' %glove_dim
#glove_path = 'data/glove_debug_path.txt' #for debugging
if glove_path == 'data/glove_debug_path.txt':
print "continue?"
pdb.set_trace()
possible_segments = [(0,0), (1,1), (2,2), (3,3), (4,4), (5,5)]
for i in itertools.combinations(range(6), 2):
possible_segments.append(i)
length_prep_word = 40
length_prep_character = 250
vocab_file = 'data/vocab_glove_complete.txt'
def word_tokenize(s):
sent = s.lower()
sent = re.sub('[^A-Za-z0-9\s]+',' ', sent)
return sent.split()
def sentences_to_words(sentences):
words = []
for s in sentences:
words.extend(word_tokenize(str(s.lower())))
return words
class glove_embedding(object):
''' Creates glove embedding object
'''
def __init__(self, glove_file=glove_path):
glove_txt = open(glove_file).readlines()
glove_txt = [g.strip() for g in glove_txt]
glove_vector = [g.split(' ') for g in glove_txt]
glove_words = [g[0] for g in glove_vector]
glove_vecs = [g[1:] for g in glove_vector]
glove_array = np.zeros((glove_dim, len(glove_words)))
glove_dict = {}
for i, w in enumerate(glove_words): glove_dict[w] = i
for i, vec in enumerate(glove_vecs):
glove_array[:,i] = np.array(vec)
self.glove_array = glove_array
self.glove_dict = glove_dict
self.glove_words = glove_words
class zero_language_vector(object):
def __init__(self, data):
self.dim = glove_dim
def get_vector_dim(self):
return self.dim
def get_vocab_size(self):
return 0
def preprocess(self, data):
embedding = np.zeros((self.get_vector_dim(),))
for d in data:
d['language_input'] = embedding
d['gt'] = (d['gt'][0], d['gt'][1])
return data
class recurrent_language(object):
def get_vocab_size(self):
return len(self.vocab_dict.keys())
def preprocess_sentence(self, words):
vector_dim = self.get_vector_dim()
sentence_mat = np.zeros((len(words), vector_dim))
count_words = 0
for i, w in enumerate(words):
try:
sentence_mat[count_words,:] = self.vocab_dict[w]
count_words += 1
except:
if '<unk>' in self.vocab_dict.keys():
sentence_mat[count_words,:] = self.vocab_dict['<unk>']
count_words += 1
else:
pass
sentence_mat = sentence_mat[:count_words]
return sentence_mat
def preprocess(self, data):
for d in data:
words = sentences_to_words([d['description']])
d['language_input'] = self.preprocess(words)
return data
class recurrent_word(recurrent_language):
def __init__(self, data):
self.data = data
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
if '<unk>' not in vocab:
vocab.append('<unk>')
vocab_dict = {}
for i, word in enumerate(vocab):
vocab_dict[word] = i
self.vocab_dict = vocab_dict
def get_vector_dim(self):
return 1
class recurrent_embedding(recurrent_language):
def read_embedding(self):
print "Reading glove embedding"
embedding = glove_embedding(glove_path)
self.embedding = embedding
def get_vector_dim(self):
return glove_dim
def __init__(self, data):
self.read_embedding()
embedding = self.embedding
vector_dim = self.get_vector_dim()
self.data = data
self.data = data
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
if '<unk>' in vocab:
vocab.remove('<unk>') #don't have an <unk> vector. Alternatively, could map to random vector...
vocab_dict = {}
for i, word in enumerate(vocab):
try:
vocab_dict[word] = embedding.glove_array[:,embedding.glove_dict[word]]
except:
print "%s not in glove embedding" %word
self.vocab_dict = vocab_dict
def preprocess(self, data):
vector_dim = self.get_vector_dim()
for d in data:
d['language_input'] = sentences_to_words([d['description']])
return data
def get_vocab_dict(self):
return self.vocab_dict
#Methods for extracting visual features
def feature_process_base(start, end, features):
return np.mean(features[start:end+1,:], axis = 0)
def feature_process_norm(start, end, features):
base_feature = np.mean(features[start:end+1,:], axis = 0)
return base_feature/(np.linalg.norm(base_feature) + 0.00001)
def feature_process_context(start, end, features):
feature_dim = features.shape[1]
full_feature = np.zeros((feature_dim*2,))
if np.sum(features[5,:]) > 0:
full_feature[:feature_dim] = feature_process_norm(0,6, features)
else:
full_feature[:feature_dim] = feature_process_norm(0,5, features)
full_feature[feature_dim:feature_dim*2] = feature_process_norm(start, end, features)
return full_feature
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
class extractData(object):
""" General class to extract data.
"""
def increment(self):
#uses iteration, batch_size, data_list, and num_data to extract next batch identifiers
next_batch = [None]*self.batch_size
if self.iteration + self.batch_size >= self.num_data:
next_batch[:self.num_data-self.iteration] = self.data_list[self.iteration:]
next_batch[self.num_data-self.iteration:] = self.data_list[:self.batch_size -(self.num_data-self.iteration)]
random.shuffle(self.data_list)
self.iteration = self.num_data - self.iteration
else:
next_batch = self.data_list[self.iteration:self.iteration+self.batch_size]
self.iteration += self.batch_size
assert self.iteration > -1
assert len(next_batch) == self.batch_size
return next_batch
class extractLanguageFeatures(extractData):
def __init__(self, dataset, params, result=None):
self.data_list = range(len(dataset))
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.vocab_dict = params['vocab_dict']
self.batch_size = params['batch_size']
self.num_glove_centroids = self.vocab_dict.values()[0].shape[0]
self.T = params['sentence_length']
if isinstance(result, dict):
self.result = result
self.query_key = params['query_key']
self.cont_key = params['cont_key']
self.top_keys = [self.query_key, self.cont_key]
self.top_shapes = [(self.T, self.batch_size, self.num_glove_centroids),
(self.T, self.batch_size)]
else:
print "Will only be able to run in test mode"
def get_features(self, query):
feature = np.zeros((self.T, self.num_glove_centroids))
cont = np.zeros((self.T,))
len_query = min(len(query), self.T)
if len_query < len(query):
query = query[:len_query]
for count_word, word in enumerate(query):
try:
feature[-(len_query)+count_word,:] = self.vocab_dict[word]
except:
feature[-(len_query)+count_word,:] = np.zeros((glove_dim,))
cont[-(len_query-1):] = 1
assert np.sum(feature[:-len_query,:]) == 0
return feature, cont
def get_data_test(self, data):
query = data['language_input']
return self.get_features(query)
def get_data(self, next_batch):
data = self.dataset
query_mat = np.zeros((self.T, self.batch_size, self.num_glove_centroids))
cont = np.zeros((self.T, self.batch_size))
for i, nb in enumerate(next_batch):
query = data[nb]['language_input']
query_mat[:,i,:], cont[:,i] = self.get_features(query)
self.result[self.query_key] = query_mat
self.result[self.cont_key] = cont
class extractVisualFeatures(extractData):
def __init__(self, dataset, params, result):
self.data_list = range(len(dataset))
self.feature_process_algo = params['feature_process']
self.loc_feature = params['loc_feature']
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.loc = params['loc_feature']
loss_type = params['loss_type']
assert loss_type in ['triplet', 'inter', 'intra']
self.inter = False
self.intra = False
if loss_type in ['triplet', 'inter']:
self.inter = True
if loss_type in ['triplet', 'intra']:
self.intra = True
self.batch_size = params['batch_size']
self.num_glove_centroids = params['num_glove_centroids']
features_h5py = h5py.File(params['features'])
features = {}
for key in features_h5py.keys():
features[key] = np.array(features_h5py[key])
features_h5py.close()
self.features = features
assert self.feature_process_algo in feature_process_dict.keys()
self.feature_process = feature_process_dict[self.feature_process_algo]
self.feature_dim = self.feature_process(0,0,self.features[self.dataset[0]['video']]).shape[-1]
self.result = result
self.feature_key_p = params['feature_key_p']
self.feature_time_stamp_p = params['feature_time_stamp_p']
self.feature_time_stamp_n = params['feature_time_stamp_n']
self.top_keys = [self.feature_key_p, self.feature_time_stamp_p, self.feature_time_stamp_n]
self.top_shapes = [(self.batch_size, self.feature_dim),
(self.batch_size, 2),
(self.batch_size,2)]
if self.inter:
self.feature_key_inter = 'features_inter'
self.top_keys.append(self.feature_key_inter)
self.top_shapes.append((self.batch_size, self.feature_dim))
if self.intra:
self.feature_key_intra = 'features_intra'
self.top_keys.append(self.feature_key_intra)
self.top_shapes.append((self.batch_size, self.feature_dim))
self.possible_annotations = possible_segments
def get_data_test(self, d):
video_feats = self.features[d['video']]
features = np.zeros((len(self.possible_annotations), self.feature_dim))
loc_feats = np.zeros((len(self.possible_annotations), 2))
for i, p in enumerate(self.possible_annotations):
features[i,:] = self.feature_process(p[0], p[1], video_feats)
loc_feats[i,:] = [p[0]/6., p[1]/6.]
return features, loc_feats
def get_data(self, next_batch):
feature_process = self.feature_process
data = self.dataset
features_p = np.zeros((self.batch_size, self.feature_dim))
if self.inter: features_inter = np.zeros((self.batch_size, self.feature_dim))
if self.intra: features_intra = np.zeros((self.batch_size, self.feature_dim))
features_time_stamp_p = np.zeros((self.batch_size, 2))
features_time_stamp_n = np.zeros((self.batch_size, 2))
for i, nb in enumerate(next_batch):
rint = random.randint(0,len(data[nb]['times'])-1)
gt_s = data[nb]['times'][rint][0]
gt_e = data[nb]['times'][rint][1]
possible_n = list(set(self.possible_annotations) - set(((gt_s,gt_e),)))
random.shuffle(possible_n)
n = possible_n[0]
assert n != (gt_s, gt_e)
video = data[nb]['video']
feats = self.features[video]
if self.inter:
other_video = data[nb]['video']
while (other_video == video):
other_video_index = int(random.random()*len(data))
other_video = data[other_video_index]['video']
feats_inter = self.features[other_video]
features_p[i,:] = feature_process(gt_s, gt_e, feats)
if self.intra:
features_intra[i,:] = feature_process(n[0], n[1], feats)
if self.inter:
try:
features_inter[i,:] = feature_process(gt_s, gt_e, feats_inter)
except:
pdb.set_trace()
if self.loc:
features_time_stamp_p[i,0] = gt_s/6.
features_time_stamp_p[i,1] = gt_e/6.
features_time_stamp_n[i,0] = n[0]/6.
features_time_stamp_n[i,1] = n[1]/6.
else:
features_time_stamp_p[i,0] = 0
features_time_stamp_p[i,1] = 0
features_time_stamp_n[i,0] = 0
features_time_stamp_n[i,1] = 0
assert not math.isnan(np.mean(self.features[data[nb]['video']][n[0]:n[1]+1,:]))
assert not math.isnan(np.mean(self.features[data[nb]['video']][gt_s:gt_e+1,:]))
self.result[self.feature_key_p] = features_p
self.result[self.feature_time_stamp_p] = features_time_stamp_p
self.result[self.feature_time_stamp_n] = features_time_stamp_n
if self.inter:
self.result[self.feature_key_inter] = features_inter
if self.intra:
self.result[self.feature_key_intra] = features_intra
class batchAdvancer(object):
def __init__(self, extractors):
self.extractors = extractors
self.increment_extractor = extractors[0]
def __call__(self):
#The batch advancer just calls each extractor
next_batch = self.increment_extractor.increment()
for e in self.extractors:
e.get_data(next_batch)
class python_data_layer(caffe.Layer):
""" General class to extract data.
"""
def setup(self, bottom, top):
random.seed(10)
self.params = eval(self.param_str)
params = self.params
assert 'top_names' in params.keys()
#set up prefetching
self.thread_result = {}
self.thread = None
self.setup_extractors()
self.batch_advancer = batchAdvancer(self.data_extractors)
shape_dict = {}
self.top_names = []
for de in self.data_extractors:
for top_name, top_shape in zip(de.top_keys, de.top_shapes):
shape_dict[top_name] = top_shape
self.top_names.append((params['top_names'].index(top_name), top_name))
self.dispatch_worker()
self.top_shapes = [shape_dict[tn[1]] for tn in self.top_names]
print 'Outputs:', self.top_names
if len(top) != len(self.top_names):
raise Exception('Incorrect number of outputs (expected %d, got %d)' %
(len(self.top_names), len(top)))
self.join_worker()
#for top_index, name in enumerate(self.top_names.keys()):
top_count = 0
for top_index, name in self.top_names:
shape = self.top_shapes[top_count]
print 'Top name %s has shape %s.' %(name, shape)
top[top_index].reshape(*shape)
top_count += 1
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
if self.thread is not None:
self.join_worker()
for top_index, name in self.top_names:
top[top_index].data[...] = self.thread_result[name]
self.dispatch_worker()
def dispatch_worker(self):
assert self.thread is None
self.thread = Thread(target=self.batch_advancer)
self.thread.start()
def join_worker(self):
assert self.thread is not None
self.thread.join()
self.thread = None
def backward(self, top, propoagate_down, bottom):
pass
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
language_feature_process_dict = {'zero_language': zero_language_vector,
'recurrent_embedding': recurrent_embedding}
class dataLayer_ExtractPairedLanguageVision(python_data_layer):
def setup_extractors(self):
assert 'top_names' in self.params.keys()
assert 'descriptions' in self.params.keys()
assert 'features' in self.params.keys()
if 'batch_size' not in self.params.keys(): self.params['batch_size'] = 120
self.params['query_key'] = 'query'
self.params['feature_key_n'] = 'features_n'
self.params['feature_key_p'] = 'features_p'
self.params['feature_key_t'] = 'features_t'
self.params['feature_time_stamp_p'] = 'features_time_stamp_p'
self.params['feature_time_stamp_n'] = 'features_time_stamp_n'
self.params['cont_key'] = 'cont'
language_extractor_fcn = extractLanguageFeatures
visual_extractor_fcn = extractVisualFeatures
language_process = recurrent_embedding
data_orig = read_json(self.params['descriptions'])
random.shuffle(data_orig)
language_processor = language_process(data_orig)
data = language_processor.preprocess(data_orig)
self.params['vocab_dict'] = language_processor.vocab_dict
num_glove_centroids = language_processor.get_vector_dim()
self.params['num_glove_centroids'] = num_glove_centroids
visual_feature_extractor = visual_extractor_fcn(data, self.params, self.thread_result)
textual_feature_extractor = language_extractor_fcn(data, self.params, self.thread_result)
self.data_extractors = [visual_feature_extractor, textual_feature_extractor]
|
hwilib/devices/keepkey.py | cjackie/HWI | 285 | 11222 | <filename>hwilib/devices/keepkey.py
"""
Keepkey
*******
"""
from ..errors import (
DEVICE_NOT_INITIALIZED,
DeviceNotReadyError,
common_err_msgs,
handle_errors,
)
from .trezorlib import protobuf as p
from .trezorlib.transport import (
hid,
udp,
webusb,
)
from .trezor import TrezorClient, HID_IDS, WEBUSB_IDS
from .trezorlib.messages import (
DebugLinkState,
Features,
HDNodeType,
ResetDevice,
)
from typing import (
Any,
Dict,
List,
Optional,
)
py_enumerate = enumerate # Need to use the enumerate built-in but there's another function already named that
KEEPKEY_HID_IDS = {(0x2B24, 0x0001)}
KEEPKEY_WEBUSB_IDS = {(0x2B24, 0x0002)}
KEEPKEY_SIMULATOR_PATH = '127.0.0.1:11044'
HID_IDS.update(KEEPKEY_HID_IDS)
WEBUSB_IDS.update(KEEPKEY_WEBUSB_IDS)
class KeepkeyFeatures(Features): # type: ignore
def __init__(
self,
*,
firmware_variant: Optional[str] = None,
firmware_hash: Optional[bytes] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.firmware_variant = firmware_variant
self.firmware_hash = firmware_hash
@classmethod
def get_fields(cls) -> Dict[int, p.FieldInfo]:
return {
1: ('vendor', p.UnicodeType, None),
2: ('major_version', p.UVarintType, None),
3: ('minor_version', p.UVarintType, None),
4: ('patch_version', p.UVarintType, None),
5: ('bootloader_mode', p.BoolType, None),
6: ('device_id', p.UnicodeType, None),
7: ('pin_protection', p.BoolType, None),
8: ('passphrase_protection', p.BoolType, None),
9: ('language', p.UnicodeType, None),
10: ('label', p.UnicodeType, None),
12: ('initialized', p.BoolType, None),
13: ('revision', p.BytesType, None),
14: ('bootloader_hash', p.BytesType, None),
15: ('imported', p.BoolType, None),
16: ('unlocked', p.BoolType, None),
21: ('model', p.UnicodeType, None),
22: ('firmware_variant', p.UnicodeType, None),
23: ('firmware_hash', p.BytesType, None),
24: ('no_backup', p.BoolType, None),
25: ('wipe_code_protection', p.BoolType, None),
}
class KeepkeyResetDevice(ResetDevice): # type: ignore
def __init__(
self,
*,
auto_lock_delay_ms: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.auto_lock_delay_ms = auto_lock_delay_ms
@classmethod
def get_fields(cls) -> Dict[int, p.FieldInfo]:
return {
1: ('display_random', p.BoolType, None),
2: ('strength', p.UVarintType, 256), # default=256
3: ('passphrase_protection', p.BoolType, None),
4: ('pin_protection', p.BoolType, None),
5: ('language', p.UnicodeType, "en-US"), # default=en-US
6: ('label', p.UnicodeType, None),
7: ('no_backup', p.BoolType, None),
8: ('auto_lock_delay_ms', p.UVarintType, None),
9: ('u2f_counter', p.UVarintType, None),
}
class KeepkeyDebugLinkState(DebugLinkState): # type: ignore
def __init__(
self,
*,
recovery_cipher: Optional[str] = None,
recovery_auto_completed_word: Optional[str] = None,
firmware_hash: Optional[bytes] = None,
storage_hash: Optional[bytes] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.recovery_cipher = recovery_cipher
self.recovery_auto_completed_word = recovery_auto_completed_word
self.firmware_hash = firmware_hash
self.storage_hash = storage_hash
@classmethod
def get_fields(cls) -> Dict[int, p.FieldType]:
return {
1: ('layout', p.BytesType, None),
2: ('pin', p.UnicodeType, None),
3: ('matrix', p.UnicodeType, None),
4: ('mnemonic_secret', p.BytesType, None),
5: ('node', HDNodeType, None),
6: ('passphrase_protection', p.BoolType, None),
7: ('reset_word', p.UnicodeType, None),
8: ('reset_entropy', p.BytesType, None),
9: ('recovery_fake_word', p.UnicodeType, None),
10: ('recovery_word_pos', p.UVarintType, None),
11: ('recovery_cipher', p.UnicodeType, None),
12: ('recovery_auto_completed_word', p.UnicodeType, None),
13: ('firmware_hash', p.BytesType, None),
14: ('storage_hash', p.BytesType, None),
}
class KeepkeyClient(TrezorClient):
def __init__(self, path: str, password: str = "", expert: bool = False) -> None:
"""
The `KeepkeyClient` is a `HardwareWalletClient` for interacting with the Keepkey.
As Keepkeys are clones of the Trezor 1, please refer to `TrezorClient` for documentation.
"""
super(KeepkeyClient, self).__init__(path, password, expert, KEEPKEY_HID_IDS, KEEPKEY_WEBUSB_IDS, KEEPKEY_SIMULATOR_PATH)
self.type = 'Keepkey'
self.client.vendors = ("keepkey.com")
self.client.minimum_versions = {"K1-14AM": (0, 0, 0)}
self.client.map_type_to_class_override[KeepkeyFeatures.MESSAGE_WIRE_TYPE] = KeepkeyFeatures
self.client.map_type_to_class_override[KeepkeyResetDevice.MESSAGE_WIRE_TYPE] = KeepkeyResetDevice
if self.simulator:
self.client.debug.map_type_to_class_override[KeepkeyDebugLinkState.MESSAGE_WIRE_TYPE] = KeepkeyDebugLinkState
def enumerate(password: str = "") -> List[Dict[str, Any]]:
results = []
devs = hid.HidTransport.enumerate(usb_ids=KEEPKEY_HID_IDS)
devs.extend(webusb.WebUsbTransport.enumerate(usb_ids=KEEPKEY_WEBUSB_IDS))
devs.extend(udp.UdpTransport.enumerate(KEEPKEY_SIMULATOR_PATH))
for dev in devs:
d_data: Dict[str, Any] = {}
d_data['type'] = 'keepkey'
d_data['model'] = 'keepkey'
d_data['path'] = dev.get_path()
client = None
with handle_errors(common_err_msgs["enumerate"], d_data):
client = KeepkeyClient(d_data['path'], password)
try:
client.client.refresh_features()
except TypeError:
continue
if 'keepkey' not in client.client.features.vendor:
continue
d_data['label'] = client.client.features.label
if d_data['path'].startswith('udp:'):
d_data['model'] += '_simulator'
d_data['needs_pin_sent'] = client.client.features.pin_protection and not client.client.features.unlocked
d_data['needs_passphrase_sent'] = client.client.features.passphrase_protection # always need the passphrase sent for Keepkey if it has passphrase protection enabled
if d_data['needs_pin_sent']:
raise DeviceNotReadyError('Keepkey is locked. Unlock by using \'promptpin\' and then \'sendpin\'.')
if d_data['needs_passphrase_sent'] and not password:
raise DeviceNotReadyError("Passphrase needs to be specified before the fingerprint information can be retrieved")
if client.client.features.initialized:
d_data['fingerprint'] = client.get_master_fingerprint().hex()
d_data['needs_passphrase_sent'] = False # Passphrase is always needed for the above to have worked, so it's already sent
else:
d_data['error'] = 'Not initialized'
d_data['code'] = DEVICE_NOT_INITIALIZED
if client:
client.close()
results.append(d_data)
return results
|
tests/test_lamost_tools.py | igomezv/astroNN | 156 | 11240 | import unittest
import numpy as np
from astroNN.lamost import wavelength_solution, pseudo_continuum
class LamostToolsTestCase(unittest.TestCase):
def test_wavelength_solution(self):
wavelength_solution()
wavelength_solution(dr=5)
self.assertRaises(ValueError, wavelength_solution, dr=1)
def test_norm(self):
pseudo_continuum(np.ones(3909), np.ones(3909))
if __name__ == '__main__':
unittest.main()
|
src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage/v2018_03_28/file/__init__.py | Mannan2812/azure-cli-extensions | 207 | 11278 | <reponame>Mannan2812/azure-cli-extensions
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .fileservice import FileService
from .models import (
Share,
ShareProperties,
File,
FileProperties,
Directory,
DirectoryProperties,
FileRange,
ContentSettings,
CopyProperties,
SharePermissions,
FilePermissions,
DeleteSnapshot,
)
|
tt/satisfiability/picosat.py | fkromer/tt | 233 | 11327 | <reponame>fkromer/tt
"""Python wrapper around the _clibs PicoSAT extension."""
import os
from tt.errors.arguments import (
InvalidArgumentTypeError,
InvalidArgumentValueError)
if os.environ.get('READTHEDOCS') != 'True':
from tt._clibs import picosat as _c_picosat
VERSION = _c_picosat.VERSION
def sat_one(clauses, assumptions=None):
"""Find a solution that satisfies the specified clauses and assumptions.
This provides a light Python wrapper around the same method in the PicoSAT
C-extension. While completely tested and usable, this method is probably
not as useful as the interface provided through the
:func:`sat_one <tt.expressions.bexpr.BooleanExpression.sat_one>` method in
the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
class.
:param clauses: CNF (AND of ORs) clauses; positive integers represent
non-negated terms and negative integers represent negated terms.
:type clauses: List[List[:class:`int <python:int>`]]
:param assumptions: Assumed terms; same negation logic from ``clauses``
applies here. Note that assumptions *cannot* be an empty list; leave it
as ``None`` if there are no assumptions to include.
:type assumptions: List[:class:`int <python:int>`]
:returns: If solution is found, a list of ints representing the terms of
the solution; otherwise, if no solution found, ``None``.
:rtype: List[:class:`int <python:int>`] or ``None``
:raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of
ints or ``assumptions`` is not a list of ints.
:raises InvalidArgumentValueError: If any literal ints are equal to zero.
Let's look at a simple example with no satisfiable solution::
>>> from tt import picosat
>>> picosat.sat_one([[1], [-1]]) is None
True
Here's an example where a solution exists::
>>> picosat.sat_one([[1, 2, 3], [-2, -3], [1, -2], [2, -3], [-2]])
[1, -2, -3]
Finally, here's an example using assumptions::
>>> picosat.sat_one([[1, 2, 3], [2, 3]], assumptions=[-1, -3])
[-1, 2, -3]
"""
try:
return _c_picosat.sat_one(clauses, assumptions=assumptions)
except TypeError as e:
raise InvalidArgumentTypeError(str(e))
except ValueError as e:
raise InvalidArgumentValueError(str(e))
def sat_all(clauses, assumptions=None):
"""Find all solutions that satisfy the specified clauses and assumptions.
This provides a light Python wrapper around the same method in the PicoSAT
C-extension. While completely tested and usable, this method is probably
not as useful as the interface provided through the
:func:`sat_all <tt.expressions.bexpr.BooleanExpression.sat_all>` method in
the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
class.
:param clauses: CNF (AND of ORs) clauses; positive integers represent
non-negated terms and negative integers represent negated terms.
:type clauses: List[List[:class:`int <python:int>`]]
:param assumptions: Assumed terms; same negation logic from ``clauses``
applies here. Note that assumptions *cannot* be an empty list; leave it
as ``None`` if there are no assumptions to include.
:type assumptions: List[:class:`int <python:int>`]
:returns: An iterator of solutions; if no satisfiable solutions exist, the
iterator will be empty.
:rtype: Iterator[List[:class:`int <python:int>`]]
:raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of
ints or ``assumptions`` is not a list of ints.
:raises InvalidArgumentValueError: If any literal ints are equal to zero.
Here's an example showing the basic usage::
>>> from tt import picosat
>>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]]):
... print(solution)
...
[1, 2, 3, 4]
[1, 2, 3, -4]
[1, 2, -3, 4]
[1, 2, -3, -4]
[1, -2, 3, 4]
[1, -2, 3, -4]
We can cut down on some of the above solutions by including an assumption::
>>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]],
... assumptions=[-3]):
... print(solution)
...
[1, 2, -3, 4]
[1, 2, -3, -4]
"""
try:
return _c_picosat.sat_all(clauses, assumptions=assumptions)
except TypeError as e:
raise InvalidArgumentTypeError(str(e))
except ValueError as e:
raise InvalidArgumentValueError(str(e))
|
src/prefect/schedules/adjustments.py | concreted/prefect | 8,633 | 11330 | """
Schedule adjustments are functions that accept a `datetime` and modify it in some way.
Adjustments have the signature `Callable[[datetime], datetime]`.
"""
from datetime import datetime, timedelta
from typing import Callable
import pendulum
import prefect.schedules.filters
def add(interval: timedelta) -> Callable[[datetime], datetime]:
"""
Adjustment that adds a specified interval to the date.
Args:
- interval (timedelta): the amount of time to add
Returns:
- Callable[[datetime], bool]: the adjustment function
"""
def _adjustment_fn(dt: datetime) -> datetime:
return pendulum.instance(dt) + interval
return _adjustment_fn
def next_weekday(dt: datetime) -> datetime:
"""
Adjustment that advances a date to the next weekday. If the date is already a weekday,
it is returned unadjusted.
Args:
- dt (datetime): the datetime to adjust
Returns:
- datetime: the adjusted datetime
"""
pdt = pendulum.instance(dt)
while not prefect.schedules.filters.is_weekday(pdt):
pdt = pdt.add(days=1)
return pdt
|
src/pynwb/retinotopy.py | weiglszonja/pynwb | 132 | 11332 | <reponame>weiglszonja/pynwb
from collections.abc import Iterable
import warnings
from hdmf.utils import docval, popargs, call_docval_func, get_docval
from . import register_class, CORE_NAMESPACE
from .core import NWBDataInterface, NWBData
class RetinotopyImage(NWBData):
"""Gray-scale anatomical image of cortical surface. Array structure: [rows][columns]
"""
__nwbfields__ = ('bits_per_pixel',
'dimension',
'format',
'field_of_view')
@docval({'name': 'name', 'type': str, 'doc': 'Name of this retinotopy image'},
{'name': 'data', 'type': Iterable, 'doc': 'Data field.'},
{'name': 'bits_per_pixel', 'type': int,
'doc': 'Number of bits used to represent each value. This is necessary to determine maximum '
'(white) pixel value.'},
{'name': 'dimension', 'type': Iterable, 'shape': (2, ), 'doc': 'Number of rows and columns in the image.'},
{'name': 'format', 'type': Iterable, 'doc': 'Format of image. Right now only "raw" supported.'},
{'name': 'field_of_view', 'type': Iterable, 'shape': (2, ), 'doc': 'Size of viewing area, in meters.'})
def __init__(self, **kwargs):
bits_per_pixel, dimension, format, field_of_view = popargs(
'bits_per_pixel', 'dimension', 'format', 'field_of_view', kwargs)
call_docval_func(super().__init__, kwargs)
self.bits_per_pixel = bits_per_pixel
self.dimension = dimension
self.format = format
self.field_of_view = field_of_view
class FocalDepthImage(RetinotopyImage):
"""Gray-scale image taken with same settings/parameters (e.g., focal depth,
wavelength) as data collection. Array format: [rows][columns].
"""
__nwbfields__ = ('focal_depth', )
@docval(*get_docval(RetinotopyImage.__init__),
{'name': 'focal_depth', 'type': 'float', 'doc': 'Focal depth offset, in meters.'})
def __init__(self, **kwargs):
focal_depth = popargs('focal_depth', kwargs)
call_docval_func(super().__init__, kwargs)
self.focal_depth = focal_depth
class RetinotopyMap(NWBData):
"""Abstract two-dimensional map of responses to stimuli along a single response axis (e.g., altitude)
"""
__nwbfields__ = ('field_of_view',
'dimension')
@docval({'name': 'name', 'type': str, 'doc': 'the name of this axis map'},
{'name': 'data', 'type': Iterable, 'shape': (None, None), 'doc': 'data field.'},
{'name': 'field_of_view', 'type': Iterable, 'shape': (2, ), 'doc': 'Size of viewing area, in meters.'},
{'name': 'dimension', 'type': Iterable, 'shape': (2, ),
'doc': 'Number of rows and columns in the image'})
def __init__(self, **kwargs):
field_of_view, dimension = popargs('field_of_view', 'dimension', kwargs)
call_docval_func(super().__init__, kwargs)
self.field_of_view = field_of_view
self.dimension = dimension
class AxisMap(RetinotopyMap):
"""Abstract two-dimensional map of responses to stimuli along a single response axis (e.g., altitude) with unit
"""
__nwbfields__ = ('unit', )
@docval(*get_docval(RetinotopyMap.__init__, 'name', 'data', 'field_of_view'),
{'name': 'unit', 'type': str, 'doc': 'Unit that axis data is stored in (e.g., degrees)'},
*get_docval(RetinotopyMap.__init__, 'dimension'))
def __init__(self, **kwargs):
unit = popargs('unit', kwargs)
call_docval_func(super().__init__, kwargs)
self.unit = unit
@register_class('ImagingRetinotopy', CORE_NAMESPACE)
class ImagingRetinotopy(NWBDataInterface):
"""
Intrinsic signal optical imaging or widefield imaging for measuring retinotopy. Stores orthogonal
maps (e.g., altitude/azimuth; radius/theta) of responses to specific stimuli and a combined
polarity map from which to identify visual areas.
This group does not store the raw responses imaged during retinotopic mapping or the
stimuli presented, but rather the resulting phase and power maps after applying a Fourier
transform on the averaged responses.
Note: for data consistency, all images and arrays are stored in the format [row][column] and
[row, col], which equates to [y][x]. Field of view and dimension arrays may appear backward
(i.e., y before x).
"""
__nwbfields__ = ({'name': 'sign_map', 'child': True},
{'name': 'axis_1_phase_map', 'child': True},
{'name': 'axis_1_power_map', 'child': True},
{'name': 'axis_2_phase_map', 'child': True},
{'name': 'axis_2_power_map', 'child': True},
{'name': 'focal_depth_image', 'child': True},
{'name': 'vasculature_image', 'child': True},
'axis_descriptions')
@docval({'name': 'sign_map', 'type': RetinotopyMap,
'doc': 'Sine of the angle between the direction of the gradient in axis_1 and axis_2.'},
{'name': 'axis_1_phase_map', 'type': AxisMap,
'doc': 'Phase response to stimulus on the first measured axis.'},
{'name': 'axis_1_power_map', 'type': AxisMap,
'doc': 'Power response on the first measured axis. Response is scaled so 0.0 is no power in '
'the response and 1.0 is maximum relative power.'},
{'name': 'axis_2_phase_map', 'type': AxisMap,
'doc': 'Phase response to stimulus on the second measured axis.'},
{'name': 'axis_2_power_map', 'type': AxisMap,
'doc': 'Power response on the second measured axis. Response is scaled so 0.0 is no '
'power in the response and 1.0 is maximum relative power.'},
{'name': 'axis_descriptions', 'type': Iterable, 'shape': (2, ),
'doc': 'Two-element array describing the contents of the two response axis fields. '
'Description should be something like ["altitude", "azimuth"] or ["radius", "theta"].'},
{'name': 'focal_depth_image', 'type': FocalDepthImage,
'doc': 'Gray-scale image taken with same settings/parameters (e.g., focal depth, wavelength) '
'as data collection. Array format: [rows][columns].'},
{'name': 'vasculature_image', 'type': RetinotopyImage,
'doc': 'Gray-scale anatomical image of cortical surface. Array structure: [rows][columns].'},
{'name': 'name', 'type': str, 'doc': 'the name of this container', 'default': 'ImagingRetinotopy'})
def __init__(self, **kwargs):
axis_1_phase_map, axis_1_power_map, axis_2_phase_map, axis_2_power_map, axis_descriptions, \
focal_depth_image, sign_map, vasculature_image = popargs(
'axis_1_phase_map', 'axis_1_power_map', 'axis_2_phase_map', 'axis_2_power_map',
'axis_descriptions', 'focal_depth_image', 'sign_map', 'vasculature_image', kwargs)
call_docval_func(super().__init__, kwargs)
warnings.warn("The ImagingRetinotopy class currently cannot be written to or read from a file. "
"This is a known bug and will be fixed in a future release of PyNWB.")
self.axis_1_phase_map = axis_1_phase_map
self.axis_1_power_map = axis_1_power_map
self.axis_2_phase_map = axis_2_phase_map
self.axis_2_power_map = axis_2_power_map
self.axis_descriptions = axis_descriptions
self.focal_depth_image = focal_depth_image
self.sign_map = sign_map
self.vasculature_image = vasculature_image
|
nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | 136 | 11340 | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.common.quantization.quantizer_propagation.structs import QuantizationTrait
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConvolutionMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXLinearMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSigmoidMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXHardSigmoidMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAveragePoolMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXGlobalAveragePoolMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAddLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXMulLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConcatLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXBatchNormMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXResizeMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSoftmaxMetatype
from nncf.common.graph.operator_metatypes import UnknownMetatype
DEFAULT_ONNX_QUANT_TRAIT_TO_OP_DICT = {
QuantizationTrait.INPUTS_QUANTIZABLE: [
ONNXConvolutionMetatype,
ONNXLinearMetatype,
ONNXAveragePoolMetatype,
ONNXGlobalAveragePoolMetatype,
ONNXAddLayerMetatype,
ONNXMulLayerMetatype,
ONNXBatchNormMetatype,
ONNXHardSigmoidMetatype,
ONNXResizeMetatype,
],
QuantizationTrait.NON_QUANTIZABLE: [ONNXSigmoidMetatype,
ONNXSoftmaxMetatype,
UnknownMetatype],
QuantizationTrait.CONCAT: [ONNXConcatLayerMetatype],
QuantizationTrait.OUTPUT_QUANTIZATION_AS_WEIGHTS: []
}
|
RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | 852 | 11393 | import FWCore.ParameterSet.Config as cms
from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools import *
# Documentation of the MVA
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/MultivariateElectronIdentificationRun2
# https://rembserj.web.cern.ch/rembserj/notes/Electron_MVA_ID_2017_documentation
#
# In this file we define the locations of the MVA weights, cuts on the MVA values
# for specific working points, and configure those cuts in VID
#
# The tag is an extra string attached to the names of the products
# such as ValueMaps that needs to distinguish cases when the same MVA estimator
# class is used with different tuning/weights
mvaTag = "Fall17NoIsoV1"
# There are 6 categories in this MVA. They have to be configured in this strict order
# (cuts and weight files order):
# 0 EB1 (eta<0.8) pt 5-10 GeV | pt < ptSplit && |eta| < ebSplit
# 1 EB2 (eta>=0.8) pt 5-10 GeV | pt < ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 2 EE pt 5-10 GeV | pt < ptSplit && |eta| >= ebeeSplit
# 3 EB1 (eta<0.8) pt 10-inf GeV | pt >= ptSplit && |eta| < ebSplit
# 4 EB2 (eta>=0.8) pt 10-inf GeV | pt >= ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 5 EE pt 10-inf GeV | pt >= ptSplit && |eta| >= ebeeSplit
mvaFall17WeightFiles_V1 = cms.vstring(
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_10_2017_puinfo_BDT.weights.xml.gz"
)
## The working point for this MVA that is expected to have about 90% signal
# WP tuned to give about 90 and 80% signal efficiecny for electrons from Drell-Yan with pT > 25 GeV
# The working point for the low pt categories is just taken over from the high pt
idName90 = "mvaEleID-Fall17-noIso-V1-wp90"
MVA_WP90 = EleMVA_WP(
idName = idName90, mvaTag = mvaTag,
cutCategory0 = "0.9165112826974601 - exp(-pt / 2.7381703555094217) * 1.03549199648109", # EB1 low pt
cutCategory1 = "0.8655738322220173 - exp(-pt / 2.4027944652597073) * 0.7975615613282494", # EB2 low pt
cutCategory2 = "-3016.035055227131 - exp(-pt / -52140.61856333602) * -3016.3029387236506", # EE low pt
cutCategory3 = "0.9616542816132922 - exp(-pt / 8.757943837889817) * 3.1390200321591206", # EB1
cutCategory4 = "0.9319258011430132 - exp(-pt / 8.846057432565809) * 3.5985063793347787", # EB2
cutCategory5 = "0.8899260780999244 - exp(-pt / 10.124234115859881) * 4.352791250718547", # EE
)
idName80 = "mvaEleID-Fall17-noIso-V1-wp80"
MVA_WP80 = EleMVA_WP(
idName = idName80, mvaTag = mvaTag,
cutCategory0 = "0.9530240956555949 - exp(-pt / 2.7591425841003647) * 0.4669644718545271", # EB1 low pt
cutCategory1 = "0.9336564763961019 - exp(-pt / 2.709276284272272) * 0.33512286599215946", # EB2 low pt
cutCategory2 = "0.9313133688365339 - exp(-pt / 1.5821934800715558) * 3.8889462619659265", # EE low pt
cutCategory3 = "0.9825268564943458 - exp(-pt / 8.702601455860762) * 1.1974861596609097", # EB1
cutCategory4 = "0.9727509457929913 - exp(-pt / 8.179525631018565) * 1.7111755094657688", # EB2
cutCategory5 = "0.9562619539540145 - exp(-pt / 8.109845366281608) * 3.013927699126942", # EE
)
### WP tuned for HZZ analysis with very high efficiency (about 98%)
# The working points were found by requiring the same signal efficiencies in
# each category as for the Spring 16 HZZ ID
# (see RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring16_HZZ_V1_cff.py)
idNamewpLoose = "mvaEleID-Fall17-noIso-V1-wpLoose"
MVA_WPLoose = EleMVA_WP(
idName = idNamewpLoose, mvaTag = mvaTag,
cutCategory0 = "-0.13285867293779202", # EB1 low pt
cutCategory1 = "-0.31765300958836074", # EB2 low pt
cutCategory2 = "-0.0799205914718861" , # EE low pt
cutCategory3 = "-0.856871961305474" , # EB1
cutCategory4 = "-0.8107642141584835" , # EB2
cutCategory5 = "-0.7179265933023059" # EE
)
#
# Finally, set up VID configuration for all cuts
#
# Create the PSet that will be fed to the MVA value map producer
mvaEleID_Fall17_noIso_V1_producer_config = cms.PSet(
mvaName = cms.string(mvaClassName),
mvaTag = cms.string(mvaTag),
# Category parameters
nCategories = cms.int32(6),
categoryCuts = cms.vstring(*EleMVA_6CategoriesCuts),
# Weight files and variable definitions
weightFileNames = mvaFall17WeightFiles_V1,
variableDefinition = cms.string("RecoEgamma/ElectronIdentification/data/ElectronMVAEstimatorRun2Fall17V1Variables.txt")
)
# Create the VPset's for VID cuts
mvaEleID_Fall17_V1_wpLoose = configureVIDMVAEleID( MVA_WPLoose )
mvaEleID_Fall17_V1_wp90 = configureVIDMVAEleID( MVA_WP90 )
mvaEleID_Fall17_V1_wp80 = configureVIDMVAEleID( MVA_WP80 )
mvaEleID_Fall17_V1_wpLoose.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp90.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp80.isPOGApproved = cms.untracked.bool(True)
|
test/python/test_elementwise_ops.py | avijit-chakroborty/ngraph-bridge | 142 | 11405 | <reponame>avijit-chakroborty/ngraph-bridge
# ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge elementwise operations test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from common import NgraphTest
class TestElementwiseOperations(NgraphTest):
@pytest.mark.parametrize(("v1", "v2", "expected"),
((1.0, -1.0, [1.0]), (100, 200, ([200],)),
([0.0, 5.0, 10.0], [6.0],
(np.array([[6.0, 6.0, 10.0]]),))))
def test_maximum(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.maximum(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [False]), (-1.0, -1.0, ([True],)), (-1.0, 1000, [True]),
(200, 200, ([True],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[True, False, True]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[True, False, True]]),))))
def test_less_equal(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.less_equal(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [False]), (-1.0, -1.0, ([False],)), (-1.0, 1000, [True]),
(200, 200, ([False],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[True, False, False]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[False, False, True]]),))))
def test_less(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.less(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [True]), (-1.0, -1.0, ([True],)), (-1.0, 1000, [False]),
(200, 200, ([True],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[False, True, True]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[True, True, False]]),))))
def test_greater_equal(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.greater_equal(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [True]), (-1.0, -1.0, ([False],)), (-1.0, 1000, [False]),
(200, 200, ([False],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[False, True, False]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[False, True, False]]),))))
def test_greater(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.greater(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(("v1", "v2", "expected"),
((True, True, [True]), (True, False, ([False],)),
(1.0, -2.0, ([True],)), (False, 100, ([False],)),
([False, True, False], [True],
(np.array([[False, True, False]]),))))
def test_logical_and(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.bool, shape=(None))
val2 = tf.compat.v1.placeholder(tf.bool, shape=(None))
out = tf.logical_and(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(("test_input", "expected"), ((False, True),
(True, False)))
def test_logicalnot_1d(self, test_input, expected):
val = tf.compat.v1.placeholder(tf.bool, shape=(1,))
out = tf.logical_not(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: (test_input,)})[
0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
def test_logicalnot_2d(self):
test_input = ((True, False, True), (False, True, False))
expected = np.logical_not(test_input)
val = tf.compat.v1.placeholder(tf.bool, shape=(2, 3))
out = tf.logical_not(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: test_input})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
|
lingvo/tasks/image/input_generator.py | allenwang28/lingvo | 2,611 | 11408 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input generator for image data."""
import os
import lingvo.compat as tf
from lingvo.core import base_input_generator
from tensorflow.python.ops import io_ops
class _MnistInputBase(base_input_generator.BaseTinyDatasetInput):
"""Base input params for MNIST."""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.data_dtype = tf.uint8
p.data_shape = (28, 28, 1)
p.label_dtype = tf.uint8
return p
def _Preprocess(self, raw):
data = tf.stack([
tf.image.per_image_standardization(img) for img in tf.unstack(raw)
])
data.set_shape(raw.shape)
return data
class MnistTrainInput(_MnistInputBase):
"""MNist training set."""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.data = 'x_train'
p.label = 'y_train'
p.num_samples = 60000
p.batch_size = 256
p.repeat = True
return p
class MnistTestInput(_MnistInputBase):
"""MNist test set."""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.data = 'x_test'
p.label = 'y_test'
p.num_samples = 10000
p.batch_size = 256
p.repeat = False
return p
def _GetRandomImages(batch_size):
images = tf.random.uniform((batch_size, 28, 28, 1), 0, 255, tf.int32)
return tf.cast(images, tf.uint8)
def _GetRandomLabels(batch_size):
labels = tf.random.categorical(0.1 * tf.ones((1, 10)), batch_size)
return tf.cast(labels, tf.uint8)
def FakeMnistData(tmpdir, train_size=60000, test_size=10000):
"""Fake Mnist data for unit tests."""
data_path = os.path.join(tmpdir, 'ckpt')
with tf.Graph().as_default():
tf.random.set_seed(91)
with tf.Session() as sess:
sess.run(
io_ops.save_v2(
data_path,
tensor_names=['x_train', 'y_train', 'x_test', 'y_test'],
shape_and_slices=['', '', '', ''],
tensors=[
_GetRandomImages(train_size),
_GetRandomLabels(train_size),
_GetRandomImages(test_size),
_GetRandomLabels(test_size)
]))
return data_path
|
leetcode.com/python/314_Binary_Tree_Vertical_Order_Traversal.py | mamane19/coding-interview-gym | 713 | 11438 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
from collections import defaultdict
class Solution(object):
def verticalOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
queue = deque([(root, 0)])
verticalNodeMap = defaultdict(list)
while queue:
node, horrizotalDistace = queue.popleft()
if node:
verticalNodeMap[horrizotalDistace].append(node.val)
queue.append((node.left, horrizotalDistace - 1))
queue.append((node.right, horrizotalDistace + 1))
minHorrizotalDistace, maxHorrizotalDistace = min(verticalNodeMap.keys()), max(verticalNodeMap.keys())
result = []
for key in range(minHorrizotalDistace, maxHorrizotalDistace + 1):
result.append(verticalNodeMap[key])
return result
# My solution during mock, getting TLE, don't know why
from collections import defaultdict
from collections import deque
class Solution(object):
def verticalOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
orderMap = defaultdict(list)
queue = deque([(root, 0)])
while queue:
currentNode, vLine = queue.popleft()
if currentNode:
orderMap[vLine].append(root.val)
queue.append((root.left, vLine - 1))
queue.append((root.right, vLine + 1))
result = []
for i in range(min(orderMap.keys()), max(orderMap.keys()) + 1):
result.append(orderMap[i])
return result |
tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 726 | 11453 | """Test API utilities."""
import json
from pytradfri.api.libcoap_api import APIFactory
from pytradfri.gateway import Gateway
def test_constructor_timeout_passed_to_subprocess(monkeypatch):
"""Test that original timeout is passed to subprocess."""
capture = {}
def capture_args(*args, **kwargs):
capture.update(kwargs)
return json.dumps([])
monkeypatch.setattr("subprocess.check_output", capture_args)
api = APIFactory("anything", timeout=20, psk="abc")
api.request(Gateway().get_devices())
assert capture["timeout"] == 20
def test_custom_timeout_passed_to_subprocess(monkeypatch):
"""Test that custom timeout is passed to subprocess."""
capture = {}
def capture_args(*args, **kwargs):
capture.update(kwargs)
return json.dumps([])
monkeypatch.setattr("subprocess.check_output", capture_args)
api = APIFactory("anything", psk="abc")
api.request(Gateway().get_devices(), timeout=1)
assert capture["timeout"] == 1
|
acronym/scoring.py | sigma67/acronym | 340 | 11469 | import re
regex = re.compile('[^a-zA-Z]')
def score_word(word, corpus=None):
word = regex.sub('', word) # leave only alpha
score = 0
consec_bonus = 2
for i, letter in enumerate(word):
if letter.islower():
continue
if i > 0 and word[i-1].upper():
score += consec_bonus
if i == 0:
score += 10
elif (i == 1) or (i == len(word)-1):
score += 3
else:
score += 1
if (i >= 1) and (corpus is not None) and (word[i:].lower() in corpus):
score += len(word[i:])-1
return score
def score_acronym(capitalized_acronym, corpus=None):
"""
For each capitalized letter in the acronym:
* 10 points if first letter in a word (with exception of first letter)
* 3 point if second or last letter in a word
* 1 point otherwise
* N bonus points if begins an N-length valid sub-word
(ex: multiVariable -> 8 bonus points)
* 2 bonus points if immediately following a capitalizd letter
"""
return sum([score_word(word, corpus=corpus) for word in capitalized_acronym.split(' ')]) - 10
|
wagtail/admin/views/pages/unpublish.py | brownaa/wagtail | 8,851 | 11501 | <reponame>brownaa/wagtail<gh_stars>1000+
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.translation import gettext as _
from wagtail.admin import messages
from wagtail.admin.views.pages.utils import get_valid_next_url_from_request
from wagtail.core import hooks
from wagtail.core.models import Page, UserPagePermissionsProxy
def unpublish(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
user_perms = UserPagePermissionsProxy(request.user)
if not user_perms.for_page(page).can_unpublish():
raise PermissionDenied
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
include_descendants = request.POST.get("include_descendants", False)
for fn in hooks.get_hooks('before_unpublish_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
page.unpublish(user=request.user)
if include_descendants:
for live_descendant_page in page.get_descendants().live().defer_streamfields().specific():
if user_perms.for_page(live_descendant_page).can_unpublish():
live_descendant_page.unpublish()
for fn in hooks.get_hooks('after_unpublish_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
messages.success(request, _("Page '{0}' unpublished.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', page.get_parent().id)
return TemplateResponse(request, 'wagtailadmin/pages/confirm_unpublish.html', {
'page': page,
'next': next_url,
'live_descendant_count': page.get_descendants().live().count(),
})
|
cloud/db/db.py | bother3000/Smart-IoT-Planting-System | 171 | 11503 | #!/usr/bin/env python
import pymysql #Python3
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
print ("Database version : %s " % data)
db.close()
def create_table():
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
sql = """CREATE TABLE EMPLOYEE (
FIRST_NAME CHAR(20) NOT NULL,
LAST_NAME CHAR(20),
AGE INT,
SEX CHAR(1),
INCOME FLOAT )"""
cursor.execute(sql)
db.close()
def db_insert():
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
sql = """INSERT INTO EMPLOYEE(FIRST_NAME,
LAST_NAME, AGE, SEX, INCOME)
VALUES ('Mac', 'Mohan', 20, 'M', 2000)"""
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close()
|
data.py | zhaoyun630/R-NET-in-Keras | 207 | 11506 | <filename>data.py
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import cPickle as pickle
from keras import backend as K
from keras.utils import np_utils
from keras.preprocessing import sequence
from random import shuffle
import itertools
def load_dataset(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
def padded_batch_input(input, indices=None, dtype=K.floatx(), maxlen=None):
if indices is None:
indices = np.arange(len(input))
batch_input = [input[i] for i in indices]
return sequence.pad_sequences(batch_input, maxlen, dtype, padding='post')
def categorical_batch_target(target, classes, indices=None, dtype=K.floatx()):
if indices is None:
indices = np.arange(len(target))
batch_target = [min(target[i], classes-1) for i in indices]
return np_utils.to_categorical(batch_target, classes).astype(dtype)
def lengthGroup(length):
if length < 150:
return 0
if length < 240:
return 1
if length < 380:
return 2
if length < 520:
return 3
if length < 660:
return 4
return 5
class BatchGen(object):
def __init__(self, inputs, targets=None, batch_size=None, stop=False,
shuffle=True, balance=False, dtype=K.floatx(),
flatten_targets=False, sort_by_length=False,
group=False, maxlen=None):
assert len(set([len(i) for i in inputs])) == 1
assert(not shuffle or not sort_by_length)
self.inputs = inputs
self.nb_samples = len(inputs[0])
self.batch_size = batch_size if batch_size else self.nb_samples
self.dtype = dtype
self.stop = stop
self.shuffle = shuffle
self.balance = balance
self.targets = targets
self.flatten_targets = flatten_targets
if isinstance(maxlen, (list, tuple)):
self.maxlen = maxlen
else:
self.maxlen = [maxlen] * len(inputs)
self.sort_by_length = None
if sort_by_length:
self.sort_by_length = np.argsort([-len(p) for p in inputs[0]])
# if self.targets and self.balance:
# self.class_weight = class_weight(self.targets)
self.generator = self._generator()
self._steps = -(-self.nb_samples // self.batch_size) # round up
self.groups = None
if group is not False:
indices = np.arange(self.nb_samples)
ff = lambda i: lengthGroup(len(inputs[0][i]))
indices = np.argsort([ff(i) for i in indices])
self.groups = itertools.groupby(indices, ff)
self.groups = {k: np.array(list(v)) for k, v in self.groups}
def _generator(self):
while True:
if self.shuffle:
permutation = np.random.permutation(self.nb_samples)
elif self.sort_by_length is not None:
permutation = self.sort_by_length
elif self.groups is not None:
# permutation = np.arange(self.nb_samples)
# tmp = permutation.copy()
# for id in self.group_ids:
# mask = (self.groups==id)
# tmp[mask] = np.random.permutation(permutation[mask])
# permutation = tmp
# import ipdb
# ipdb.set_trace()
for k, v in self.groups.items():
np.random.shuffle(v)
tmp = np.concatenate(self.groups.values())
batches = np.array_split(tmp, self._steps)
remainder = []
if len(batches[-1]) < self._steps:
remainder = batches[-1:]
batches = batches[:-1]
shuffle(batches)
batches += remainder
permutation = np.concatenate(batches)
else:
permutation = np.arange(self.nb_samples)
i = 0
longest = 767
while i < self.nb_samples:
if self.sort_by_length is not None:
bs = self.batch_size * 767 // self.inputs[0][permutation[i]].shape[0]
else:
bs = self.batch_size
indices = permutation[i : i + bs]
i = i + bs
# for i in range(0, self.nb_samples, self.batch_size):
# indices = permutation[i : i + self.batch_size]
batch_X = [padded_batch_input(x, indices, self.dtype, maxlen)
for x, maxlen in zip(self.inputs, self.maxlen)]
P = batch_X[0].shape[1]
if not self.targets:
yield batch_X
continue
batch_Y = [categorical_batch_target(target, P,
indices, self.dtype)
for target in self.targets]
if self.flatten_targets:
batch_Y = np.concatenate(batch_Y, axis=-1)
if not self.balance:
yield (batch_X, batch_Y)
continue
# batch_W = np.array([self.class_weight[y] for y in batch_targets])
batch_W = np.array([bs / self.batch_size for x in batch_X[0]]).astype(self.dtype)
yield (batch_X, batch_Y, batch_W)
if self.stop:
raise StopIteration
def __iter__(self):
return self.generator
def next(self):
return self.generator.next()
def __next__(self):
return self.generator.__next__()
def steps(self):
if self.sort_by_length is None:
return self._steps
print("Steps was called")
if self.shuffle:
permutation = np.random.permutation(self.nb_samples)
elif self.sort_by_length is not None:
permutation = self.sort_by_length
else:
permutation = np.arange(self.nb_samples)
i = 0
longest = 767
self._steps = 0
while i < self.nb_samples:
bs = self.batch_size * 767 // self.inputs[0][permutation[i]].shape[0]
i = i + bs
self._steps += 1
return self._steps
batch_gen = BatchGen # for backward compatibility
|
doit/exceptions.py | m4ta1l/doit | 1,390 | 11519 | <reponame>m4ta1l/doit
"""Handle exceptions generated from 'user' code"""
import sys
import traceback
class InvalidCommand(Exception):
"""Invalid command line argument."""
def __init__(self, *args, **kwargs):
self.not_found = kwargs.pop('not_found', None)
super(InvalidCommand, self).__init__(*args, **kwargs)
self.cmd_used = None
self.bin_name = 'doit' # default but might be overwriten
def __str__(self):
if self.not_found is None:
return super(InvalidCommand, self).__str__()
if self.cmd_used:
msg_task_not_found = (
'command `{cmd_used}` invalid parameter: "{not_found}".' +
' Must be a task, or a target.\n' +
'Type "{bin_name} list" to see available tasks')
return msg_task_not_found.format(**self.__dict__)
else:
msg_cmd_task_not_found = (
'Invalid parameter: "{not_found}".' +
' Must be a command, task, or a target.\n' +
'Type "{bin_name} help" to see available commands.\n' +
'Type "{bin_name} list" to see available tasks.\n')
return msg_cmd_task_not_found.format(**self.__dict__)
class InvalidDodoFile(Exception):
"""Invalid dodo file"""
pass
class InvalidTask(Exception):
"""Invalid task instance. User error on specifying the task."""
pass
class CatchedException(object):
"""This used to save info from caught exceptions
The traceback from the original exception is saved
"""
def __init__(self, msg, exception=None):
self.message = msg
self.traceback = ''
if isinstance(exception, CatchedException):
self.traceback = exception.traceback
elif exception is not None:
# TODO remove doit-code part from traceback
self.traceback = traceback.format_exception(
exception.__class__, exception, sys.exc_info()[2])
def get_msg(self):
"""return full exception description (includes traceback)"""
return "%s\n%s" % (self.message, "".join(self.traceback))
def get_name(self):
"""get Exception name"""
return self.__class__.__name__
def __repr__(self):
return "(<%s> %s)" % (self.get_name(), self.message)
def __str__(self):
return "%s\n%s" % (self.get_name(), self.get_msg())
class TaskFailed(CatchedException):
"""Task execution was not successful."""
pass
class UnmetDependency(TaskFailed):
"""Task was not executed because a dependent task failed or is ignored"""
pass
class TaskError(CatchedException):
"""Error while trying to execute task."""
pass
class SetupError(CatchedException):
"""Error while trying to execute setup object"""
pass
class DependencyError(CatchedException):
"""Error while trying to check if task is up-to-date or saving task status"""
pass
|
owtf/__main__.py | Udbhavbisarya23/owtf | 1,514 | 11572 | <gh_stars>1000+
"""
owtf.__main__
~~~~~~~~~~~~~
A __main__ method for OWTF so that internal services can be called as Python modules.
"""
import sys
from owtf.core import main
if __name__ == "__main__":
main()
|
recipes/Python/576543_Prime_Number_Generator_Checker/recipe-576543.py | tdiprima/code | 2,023 | 11576 | <reponame>tdiprima/code<gh_stars>1000+
#
# prime number generator
# This program gets two number as input
# and prints
# Prime numbers in the range
# Actual number of primes in the range
# and Estimation based on formula
# n
# pi(n)= -------
# log(n)
# pi(n)=number of primes less than n
#
from math import *
def isPrime(n):
if n%2==0 and n!=2:return False #if number is EVEN AND it is NOT 2
k = n**0.5 ; m = ceil(k) #if number is PERFECT SQUARE
if k==m:return False
for i in xrange(3,int(m),2): #divisibility test ODDS ONLY
if n%i==0:return False
return True #otherwise it is PRIME
if __name__=='__main__':
s = input('Enter Start: ')
e = input('Enter End: ')
s|=1 #if s%2==0:s+=1 # ODDS only
list = [x for x in range(s,e,2) if isPrime(x)]
print list,'\n',len(list),'\n',int(ceil(e/log(e)-s/log(s)))
#prints list of primes , length of list , estimate using the formula
|
src/apiron/service/discoverable.py | tushar-deepsource/apiron | 109 | 11587 | from typing import List, Type
from apiron.service.base import ServiceBase
class DiscoverableService(ServiceBase):
"""
A Service whose hosts are determined via a host resolver.
A host resolver is any class with a :func:`resolve` method
that takes a service name as its sole argument
and returns a list of host names that correspond to that service.
"""
host_resolver_class: Type
service_name: str
@classmethod
def get_hosts(cls) -> List[str]:
return cls.host_resolver_class.resolve(cls.service_name)
def __str__(self) -> str:
return self.service_name
def __repr__(self) -> str:
klass = self.__class__
return "{klass}(service_name={service_name}, host_resolver={host_resolver})".format(
klass=klass.__name__, service_name=klass.service_name, host_resolver=klass.host_resolver_class.__name__
)
|
vespene/workers/registration.py | Conan-Kudo/vespene | 680 | 11590 | # Copyright 2018, <NAME> LLC
# License: Apache License Version 2.0
# -------------------------------------------------------------------------
# registration.py - updates the database to say who is building something
# and what the current settings are, which is used by the file serving
# code to see if it is ok to serve up files in the buildroot. But also
# for record keeping.
# --------------------------------------------------------------------------
from datetime import datetime
import random
import fcntl
import subprocess
import os
from django.utils import timezone
from django.conf import settings
from vespene.common.logger import Logger
from vespene.models.worker import Worker
LOG = Logger()
WORKER_ID_FILE = "/etc/vespene/worker_id"
# =============================================================================
class RegistrationManager(object):
def __init__(self, builder, build):
self.builder = builder
self.build = build
self.project = self.build.project
def create_worker_id(self):
wid = ''.join(random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50))
fd = open(WORKER_ID_FILE, "w+")
fd.write(wid)
fd.close()
return wid
def get_worker_id(self, fd):
return fd.readlines()[0].strip()
def get_worker_record(self, worker_id):
qs = Worker.objects.filter(worker_uid=worker_id)
if not qs.exists():
return None
return qs.first()
# worker_pool = models.ForeignKey('WorkerPool', null=False, on_delete=models.SET_NULL)
# hostname = models.CharField(max_length=1024, null=True)
# port = models.IntField(null=False, default=8080)
# working_dir = models.CharField(max_length=1024, null=True)
# first_checkin = models.DateTimeField(null=True, blank=True)
# last_checkin = models.DateTimeField(null=True, blank=True)
# fileserving_enabled = models.BooleanField(null=False, default=False)
def get_hostname(self):
if settings.FILESERVING_HOSTNAME:
return settings.FILESERVING_HOSTNAME
return self.guess_hostname()
def guess_hostname(self):
return subprocess.check_output("hostname").decode('utf-8').strip()
def get_port(self):
if settings.FILESERVING_PORT:
return settings.FILESERVING_PORT
else:
return 8000
def get_build_root(self):
return settings.BUILD_ROOT
def get_fileserving_enabled(self):
return settings.FILESERVING_ENABLED
def create_worker_record(self, worker_id):
now = datetime.now(tz=timezone.utc)
obj = Worker(
worker_uid = worker_id,
hostname = self.get_hostname(),
port = self.get_port(),
build_root = self.get_build_root(),
first_checkin = now,
last_checkin = now,
fileserving_enabled = self.get_fileserving_enabled()
)
obj.save()
return obj
def update_worker_record(self, worker):
now = datetime.now(tz=timezone.utc)
worker.hostname = self.get_hostname()
worker.port = self.get_port()
worker.build_root = self.get_build_root()
worker.last_checkin = now
worker.fileserving_enabled = self.get_fileserving_enabled()
worker.save()
return worker
def go(self):
"""
Trigger next stage of pipeline if build was successful
"""
if not os.path.exists(WORKER_ID_FILE):
worker_id = self.create_worker_id()
fd = open(WORKER_ID_FILE, "r")
fcntl.flock(fd, fcntl.LOCK_EX)
worker_id = self.get_worker_id(fd)
worker_record = self.get_worker_record(worker_id)
if not worker_record:
worker_record = self.create_worker_record(worker_id)
else:
worker_record = self.update_worker_record(worker_record)
self.build.worker = worker_record
self.build.save()
fcntl.flock(fd, fcntl.LOCK_UN)
|
timeglass.py | mountwebs/timeglass | 110 | 11606 | <filename>timeglass.py
import rumps
import sys
import icon_manager
from datetime import timedelta
import timekeeper
import os
# pyinstaller --onefile -w --add-data "Icons/:Icons" --icon="Icons/timeglass.png" --clean timeglass.spec
# rumps.debug_mode(True)
class TimerApp(rumps.App):
def __init__(self, initial_seconds):
super(TimerApp, self).__init__("")
self.mode = "hourglass"
self.timekeeper = timekeeper.Timer(initial_seconds)
self.template = True
self.im = icon_manager.Icon_manager(initial_seconds)
self.change_icon()
self.remaining_sec = rumps.MenuItem(self.timekeeper.get_remaining_string())
self.menu = [self.remaining_sec]
self.next_icon_change = self.im.icon_interval
self.rumps_timer = rumps.Timer(self.tick,0.5)
self.rumps_timer.callback(self.tick)
self.invert_counter = 0
self.notified = False
self.sound = True
def change_icon(self):
print("frame:", self.im.icon_counter)
self.icon = self.im.get_icon_path()
def change_remaining(self):
self.remaining_sec.title = self.timekeeper.get_remaining_string()
def tick(self, _):
if self.timekeeper.tick():
self.notDone = True
self.invert_counter = 0
self.change_remaining()
if self.timekeeper.elapsed >= self.next_icon_change:
self.im.icon_counter = int(self.timekeeper.elapsed/self.im.icon_interval) + 1 #1-89
self.change_icon()
self.next_icon_change += self.im.icon_interval
if self.timekeeper.done:
self.im.active = False
self.change_icon()
if not self.notified:
self.notify()
self.notified = True
if self.notDone:
self.icon = self.im.invert()
self.invert_counter += 1
if self.invert_counter > 5:
self.notDone = False
self.rumps_timer.stop()
self.reset()
def notify(self):
title = "Time is up!"
text = ""
sound = "Glass"
try:
if self.sound:
os.system("""osascript -e 'display notification "{}" with title "{}" sound name "{}"'""".format(text, title, sound))
else:
os.system("""osascript -e 'display notification "{}" with title "{}"'""".format(text, title, sound))
except:
print("Could not send notification")
@rumps.clicked("Start", key="s")
def pause(self, sender):
if sender.title == "Pause":
self.timekeeper.pause_timer()
self.rumps_timer.stop()
sender.title = "Start"
elif sender.title == "Start":
self.timekeeper.start()
self.im.active = True
self.change_icon()
self.rumps_timer.start()
sender.title = "Pause"
@rumps.clicked("Reset", key="r")
def reset_button(self, sender):
self.reset()
self.menu["Start"].title = "Start"
def reset(self):
self.timekeeper.reset()
self.rumps_timer.stop()
self.im.active = False
self.im.reset()
self.change_icon()
self.change_remaining()
self.next_icon_change = self.im.icon_interval
self.menu["Start"].title = "Start"
self.notified = False
def string_to_sec(self, text):
nums = text.split(":")
nums.reverse()
seconds = 0
for i,n in enumerate(nums):
if i == 0:
seconds += int(n)
else:
seconds += (60**i) * int(n)
print((i * 60) * int(n))
return seconds
def validate_input(self, text):
texts = text.split(":")
if len(texts)>3: return False
for s in texts:
try:
int(s)
except:
return False
return True
@rumps.clicked("Set time", key="t")
def set_time(self, _):
self.timekeeper.pause_timer()
response = rumps.Window("Enter time: (hours:minutes:seconds)").run()
if response.clicked:
if not self.validate_input(response.text):
skip = True
rumps.alert("Does not compute! Please try again.")
else:
seconds = self.string_to_sec(response.text)
print(seconds)
skip = False
if not skip:
self.rumps_timer.stop()
self.timekeeper.set_time(seconds)
self.im.set_icon_interval(seconds)
self.im.reset()
self.im.active = False
self.next_icon_change = self.im.icon_interval
self.change_icon()
self.change_remaining()
self.menu["Start"].title = "Start"
if __name__ == "__main__":
default_secounds = 60 * 60
TimerApp(default_secounds).run()
|
3rdParty/boost/1.71.0/libs/python/test/iterator.py | rajeev02101987/arangodb | 12,278 | 11617 | # Copyright <NAME> 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from __future__ import print_function
'''
>>> from iterator_ext import *
>>> from input_iterator import *
>>> x = list_int()
>>> x.push_back(1)
>>> x.back()
1
>>> x.push_back(3)
>>> x.push_back(5)
>>> for y in x:
... print(y)
1
3
5
>>> z = range(x)
>>> for y in z:
... print(y)
1
3
5
Range2 wraps a transform_iterator which doubles the elements it
traverses. This proves we can wrap input iterators
>>> z2 = range2(x)
>>> for y in z2:
... print(y)
2
6
10
>>> l2 = two_lists()
>>> for y in l2.primes:
... print(y)
2
3
5
7
11
13
>>> for y in l2.evens:
... print(y)
2
4
6
8
10
12
>>> ll = list_list()
>>> ll.push_back(x)
>>> x.push_back(7)
>>> ll.push_back(x)
>>> for a in ll: #doctest: +NORMALIZE_WHITESPACE
... for b in a:
... print(b, end='')
... print('')
...
1 3 5
1 3 5 7
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
|
tools/mo/openvino/tools/mo/front/caffe/proposal_ext.py | ryanloney/openvino-1 | 1,127 | 11623 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.ops.proposal import ProposalOp
from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs
from openvino.tools.mo.front.extractor import FrontExtractorOp
class ProposalFrontExtractor(FrontExtractorOp):
op = 'Proposal'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.proposal_param
update_attrs = {
'feat_stride': param.feat_stride,
'base_size': param.base_size,
'min_size': param.min_size,
'ratio': mo_array(param.ratio),
'scale': mo_array(param.scale),
'pre_nms_topn': param.pre_nms_topn,
'post_nms_topn': param.post_nms_topn,
'nms_thresh': param.nms_thresh
}
mapping_rule = merge_attrs(param, update_attrs)
# update the attributes of the node
ProposalOp.update_node_stat(node, mapping_rule)
return cls.enabled
|
nonebot/command/argfilter/controllers.py | EVAyo/nonebot | 676 | 11629 | """
提供几种常用的控制器。
这些验证器通常需要提供一些参数进行一次调用,返回的结果才是真正的验证器,其中的技巧在于通过闭包使要控制的对象能够被内部函数访问。
版本: 1.3.0+
"""
import re
from nonebot import CommandSession
from nonebot.helpers import render_expression
def handle_cancellation(session: CommandSession):
"""
在用户发送 `算了`、`不用了`、`取消吧`、`停` 之类的话的时候,结束当前传入的命令会话(调用 `session.finish()`),并发送配置项 `SESSION_CANCEL_EXPRESSION` 所填的内容。
如果不是上述取消指令,则将输入原样输出。
参数:
session: 要控制的命令会话
"""
def control(value):
if _is_cancellation(value) is True:
session.finish(
render_expression(session.bot.config.SESSION_CANCEL_EXPRESSION))
return value
return control
def _is_cancellation(sentence: str) -> bool:
for kw in ('算', '别', '不', '停', '取消'):
if kw in sentence:
# a keyword matches
break
else:
# no keyword matches
return False
if re.match(r'^那?[算别不停]\w{0,3}了?吧?$', sentence) or \
re.match(r'^那?(?:[给帮]我)?取消了?吧?$', sentence):
return True
return False
__all__ = [
'handle_cancellation',
]
|
desktop_local_tests/windows/test_windows_public_ip_disrupt_reorder_adapters.py | UAEKondaya1/expressvpn_leak_testing | 219 | 11638 | <filename>desktop_local_tests/windows/test_windows_public_ip_disrupt_reorder_adapters.py
from desktop_local_tests.public_ip_during_disruption import PublicIPDuringDisruptionTestCase
from desktop_local_tests.windows.windows_reorder_adapters_disrupter import WindowsReorderAdaptersDisrupter
class TestWindowsPublicIPDisruptReorderAdapters(PublicIPDuringDisruptionTestCase):
'''Summary:
Tests whether traffic leaving the user's device has the public IP hidden when the adapter order
is changed.
Details:
This test will connect to VPN then swap the priority of the primary and secondary network
adapters. The test then queries a webpage to detect it's public IP.
Discussion:
It's not 100% clear if, in the real world, adapters can change their order without user
involvement. It is still however a good stress test of the application.
On Windows adapter order is determined by the interface metric. It can be manually set but
otherwise it is determined by the system by deciding how "good" an adapter is, e.g. what is the
throughput. In theory that means metrics can change dynamically.
Weaknesses:
The time taken to perform each IP request is relatively long. Tests using IPResponder should be
preferred over these tests.
Scenarios:
Requires two active adapters.
TODO:
Consider a variant which changes the network "Location". This is much more likely to be
something a user might do.
'''
def __init__(self, devices, parameters):
super().__init__(WindowsReorderAdaptersDisrupter, devices, parameters)
|
rotkehlchen/api/server.py | rotkehlchenio/rotkehlchen | 137 | 11639 | import json
import logging
from http import HTTPStatus
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import werkzeug
from flask import Blueprint, Flask, Response, abort, jsonify
from flask.views import MethodView
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
from geventwebsocket import Resource as WebsocketResource, WebSocketServer
from marshmallow import Schema
from marshmallow.exceptions import ValidationError
from webargs.flaskparser import parser
from werkzeug.exceptions import NotFound
from rotkehlchen.api.rest import RestAPI, api_response, wrap_in_fail_result
from rotkehlchen.api.v1.parser import ignore_kwarg_parser, resource_parser
from rotkehlchen.api.v1.resources import (
AaveBalancesResource,
AaveHistoryResource,
AccountingReportDataResource,
AccountingReportsResource,
AdexBalancesResource,
AdexHistoryResource,
AllAssetsResource,
AllBalancesResource,
AssetIconsResource,
AssetMovementsResource,
AssetsReplaceResource,
AssetsTypesResource,
AssetUpdatesResource,
AssociatedLocations,
AsyncTasksResource,
AvalancheTransactionsResource,
BalancerBalancesResource,
BalancerEventsHistoryResource,
BalancerTradesHistoryResource,
BinanceAvailableMarkets,
BinanceUserMarkets,
BlockchainBalancesResource,
BlockchainsAccountsResource,
BTCXpubResource,
CompoundBalancesResource,
CompoundHistoryResource,
CounterpartiesResource,
CurrentAssetsPriceResource,
DatabaseBackupsResource,
DatabaseInfoResource,
DataImportResource,
DBSnapshotDeletingResource,
DBSnapshotDownloadingResource,
DBSnapshotExportingResource,
DBSnapshotImportingResource,
DefiBalancesResource,
ERC20TokenInfo,
ERC20TokenInfoAVAX,
Eth2DailyStatsResource,
Eth2StakeDepositsResource,
Eth2StakeDetailsResource,
Eth2ValidatorsResource,
EthereumAirdropsResource,
EthereumAssetsResource,
EthereumModuleDataResource,
EthereumModuleResource,
EthereumTransactionsResource,
ExchangeBalancesResource,
ExchangeRatesResource,
ExchangesDataResource,
ExchangesResource,
ExternalServicesResource,
HistoricalAssetsPriceResource,
HistoryActionableItemsResource,
HistoryBaseEntryResource,
HistoryDownloadingResource,
HistoryExportingResource,
HistoryProcessingResource,
HistoryStatusResource,
IgnoredActionsResource,
IgnoredAssetsResource,
InfoResource,
LedgerActionsResource,
LiquityStakingHistoryResource,
LiquityStakingResource,
LiquityTrovesHistoryResource,
LiquityTrovesResource,
LoopringBalancesResource,
MakerdaoDSRBalanceResource,
MakerdaoDSRHistoryResource,
MakerdaoVaultDetailsResource,
MakerdaoVaultsResource,
ManuallyTrackedBalancesResource,
MessagesResource,
NamedEthereumModuleDataResource,
NamedOracleCacheResource,
NFTSBalanceResource,
NFTSResource,
OraclesResource,
OwnedAssetsResource,
PeriodicDataResource,
PickleDillResource,
PingResource,
QueriedAddressesResource,
ReverseEnsResource,
SettingsResource,
StakingResource,
StatisticsAssetBalanceResource,
StatisticsNetvalueResource,
StatisticsRendererResource,
StatisticsValueDistributionResource,
SushiswapBalancesResource,
SushiswapEventsHistoryResource,
SushiswapTradesHistoryResource,
TagsResource,
TradesResource,
UniswapBalancesResource,
UniswapEventsHistoryResource,
UniswapTradesHistoryResource,
UserAssetsResource,
UserPasswordChangeResource,
UserPremiumKeyResource,
UserPremiumSyncResource,
UsersByNameResource,
UsersResource,
WatchersResource,
YearnVaultsBalancesResource,
YearnVaultsHistoryResource,
YearnVaultsV2BalancesResource,
YearnVaultsV2HistoryResource,
create_blueprint,
)
from rotkehlchen.api.websockets.notifier import RotkiNotifier, RotkiWSApp
from rotkehlchen.logging import RotkehlchenLogsAdapter
URLS = List[
Union[
Tuple[str, Type[MethodView]],
Tuple[str, Type[MethodView], str],
]
]
URLS_V1: URLS = [
('/users', UsersResource),
('/watchers', WatchersResource),
('/users/<string:name>', UsersByNameResource),
('/users/<string:name>/password', UserPasswordChangeResource),
('/premium', UserPremiumKeyResource),
('/premium/sync', UserPremiumSyncResource),
('/settings', SettingsResource),
('/tasks/', AsyncTasksResource),
('/tasks/<int:task_id>', AsyncTasksResource, 'specific_async_tasks_resource'),
('/exchange_rates', ExchangeRatesResource),
('/external_services/', ExternalServicesResource),
('/oracles', OraclesResource),
('/oracles/<string:oracle>/cache', NamedOracleCacheResource),
('/exchanges', ExchangesResource),
('/exchanges/balances', ExchangeBalancesResource),
(
'/exchanges/balances/<string:location>',
ExchangeBalancesResource,
'named_exchanges_balances_resource',
),
('/assets/<string:asset>/icon', AssetIconsResource),
('/trades', TradesResource),
('/ledgeractions', LedgerActionsResource),
('/asset_movements', AssetMovementsResource),
('/tags', TagsResource),
('/exchanges/binance/pairs', BinanceAvailableMarkets),
('/exchanges/binance/pairs/<string:name>', BinanceUserMarkets),
('/exchanges/data/', ExchangesDataResource),
('/exchanges/data/<string:location>', ExchangesDataResource, 'named_exchanges_data_resource'),
('/balances/blockchains', BlockchainBalancesResource),
(
'/balances/blockchains/<string:blockchain>',
BlockchainBalancesResource,
'named_blockchain_balances_resource',
),
('/balances/', AllBalancesResource),
('/balances/manual', ManuallyTrackedBalancesResource),
('/statistics/netvalue', StatisticsNetvalueResource),
('/statistics/balance/<string:asset>', StatisticsAssetBalanceResource),
('/statistics/value_distribution', StatisticsValueDistributionResource),
('/statistics/renderer', StatisticsRendererResource),
('/messages/', MessagesResource),
('/periodic/', PeriodicDataResource),
('/history/', HistoryProcessingResource),
('/history/status', HistoryStatusResource),
('/history/export/', HistoryExportingResource),
('/history/download/', HistoryDownloadingResource),
('/history/events', HistoryBaseEntryResource),
('/history/actionable_items', HistoryActionableItemsResource),
('/reports/', AccountingReportsResource),
(
'/reports/<int:report_id>',
AccountingReportsResource,
'per_report_resource',
),
(
'/reports/<int:report_id>/data',
AccountingReportDataResource,
'per_report_data_resource',
),
('/queried_addresses', QueriedAddressesResource),
('/blockchains/ETH/transactions', EthereumTransactionsResource),
(
'/blockchains/ETH/transactions/<string:address>',
EthereumTransactionsResource,
'per_address_ethereum_transactions_resource',
),
('/blockchains/ETH2/validators', Eth2ValidatorsResource),
('/blockchains/ETH2/stake/deposits', Eth2StakeDepositsResource),
('/blockchains/ETH2/stake/details', Eth2StakeDetailsResource),
('/blockchains/ETH2/stake/dailystats', Eth2DailyStatsResource),
('/blockchains/ETH/defi', DefiBalancesResource),
('/blockchains/ETH/airdrops', EthereumAirdropsResource),
('/blockchains/ETH/erc20details/', ERC20TokenInfo),
('/blockchains/ETH/modules/<string:module_name>/data', NamedEthereumModuleDataResource),
('/blockchains/ETH/modules/data', EthereumModuleDataResource),
('/blockchains/ETH/modules/data/counterparties', CounterpartiesResource),
('/blockchains/ETH/modules/', EthereumModuleResource),
('/blockchains/ETH/modules/makerdao/dsrbalance', MakerdaoDSRBalanceResource),
('/blockchains/ETH/modules/makerdao/dsrhistory', MakerdaoDSRHistoryResource),
('/blockchains/ETH/modules/makerdao/vaults', MakerdaoVaultsResource),
('/blockchains/ETH/modules/makerdao/vaultdetails', MakerdaoVaultDetailsResource),
('/blockchains/ETH/modules/aave/balances', AaveBalancesResource),
('/blockchains/ETH/modules/aave/history', AaveHistoryResource),
('/blockchains/ETH/modules/adex/balances', AdexBalancesResource),
('/blockchains/ETH/modules/adex/history', AdexHistoryResource),
('/blockchains/ETH/modules/balancer/balances', BalancerBalancesResource),
('/blockchains/ETH/modules/balancer/history/trades', BalancerTradesHistoryResource),
('/blockchains/ETH/modules/balancer/history/events', BalancerEventsHistoryResource),
('/blockchains/ETH/modules/compound/balances', CompoundBalancesResource),
('/blockchains/ETH/modules/compound/history', CompoundHistoryResource),
('/blockchains/ETH/modules/uniswap/balances', UniswapBalancesResource),
('/blockchains/ETH/modules/uniswap/history/events', UniswapEventsHistoryResource),
('/blockchains/ETH/modules/uniswap/history/trades', UniswapTradesHistoryResource),
('/blockchains/ETH/modules/sushiswap/balances', SushiswapBalancesResource),
('/blockchains/ETH/modules/sushiswap/history/events', SushiswapEventsHistoryResource),
('/blockchains/ETH/modules/sushiswap/history/trades', SushiswapTradesHistoryResource),
('/blockchains/ETH/modules/yearn/vaults/balances', YearnVaultsBalancesResource),
('/blockchains/ETH/modules/yearn/vaults/history', YearnVaultsHistoryResource),
('/blockchains/ETH/modules/yearn/vaultsv2/balances', YearnVaultsV2BalancesResource),
('/blockchains/ETH/modules/yearn/vaultsv2/history', YearnVaultsV2HistoryResource),
('/blockchains/ETH/modules/liquity/balances', LiquityTrovesResource),
('/blockchains/ETH/modules/liquity/events/trove', LiquityTrovesHistoryResource),
('/blockchains/ETH/modules/liquity/events/staking', LiquityStakingHistoryResource),
('/blockchains/ETH/modules/liquity/staking', LiquityStakingResource),
('/blockchains/ETH/modules/pickle/dill', PickleDillResource),
('/blockchains/ETH/modules/loopring/balances', LoopringBalancesResource),
('/blockchains/<string:blockchain>', BlockchainsAccountsResource),
('/blockchains/BTC/xpub', BTCXpubResource),
('/blockchains/AVAX/transactions', AvalancheTransactionsResource),
(
'/blockchains/AVAX/transactions/<string:address>',
AvalancheTransactionsResource,
'per_address_avalanche_transactions_resource',
),
('/blockchains/AVAX/erc20details/', ERC20TokenInfoAVAX),
('/assets', OwnedAssetsResource),
('/assets/types', AssetsTypesResource),
('/assets/replace', AssetsReplaceResource),
('/assets/all', AllAssetsResource),
('/assets/ethereum', EthereumAssetsResource),
('/assets/prices/current', CurrentAssetsPriceResource),
('/assets/prices/historical', HistoricalAssetsPriceResource),
('/assets/ignored', IgnoredAssetsResource),
('/assets/updates', AssetUpdatesResource),
('/assets/user', UserAssetsResource),
('/actions/ignored', IgnoredActionsResource),
('/info', InfoResource),
('/ping', PingResource),
('/import', DataImportResource),
('/nfts', NFTSResource),
('/nfts/balances', NFTSBalanceResource),
('/database/info', DatabaseInfoResource),
('/database/backups', DatabaseBackupsResource),
('/locations/associated', AssociatedLocations),
('/staking/kraken', StakingResource),
('/snapshot/download', DBSnapshotDownloadingResource),
('/snapshot/export', DBSnapshotExportingResource),
('/snapshot/import', DBSnapshotImportingResource),
('/snapshot/delete', DBSnapshotDeletingResource),
('/ens/reverse', ReverseEnsResource),
]
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def setup_urls(
rest_api: RestAPI,
blueprint: Blueprint,
urls: URLS,
) -> None:
for url_tuple in urls:
if len(url_tuple) == 2:
route, resource_cls = url_tuple # type: ignore
endpoint = resource_cls.__name__.lower()
elif len(url_tuple) == 3:
route, resource_cls, endpoint = url_tuple # type: ignore
else:
raise ValueError(f"Invalid URL format: {url_tuple!r}")
blueprint.add_url_rule(
route,
view_func=resource_cls.as_view(endpoint, rest_api_object=rest_api),
)
def endpoint_not_found(e: NotFound) -> Response:
msg = 'invalid endpoint'
# The isinstance check is because I am not sure if `e` is always going to
# be a "NotFound" error here
if isinstance(e, NotFound):
msg = e.description
return api_response(wrap_in_fail_result(msg), HTTPStatus.NOT_FOUND)
@parser.error_handler # type: ignore
@resource_parser.error_handler
@ignore_kwarg_parser.error_handler
def handle_request_parsing_error(
err: ValidationError,
_request: werkzeug.local.LocalProxy,
_schema: Schema,
error_status_code: Optional[int], # pylint: disable=unused-argument
error_headers: Optional[Dict], # pylint: disable=unused-argument
) -> None:
""" This handles request parsing errors generated for example by schema
field validation failing."""
msg = str(err)
if isinstance(err.messages, dict):
# first key is just the location. Ignore
key = list(err.messages.keys())[0]
msg = json.dumps(err.messages[key])
elif isinstance(err.messages, list):
msg = ','.join(err.messages)
err_response = jsonify(result=None, message=msg)
err_response.status_code = HTTPStatus.BAD_REQUEST
abort(err_response)
class APIServer():
_api_prefix = '/api/1'
def __init__(
self,
rest_api: RestAPI,
ws_notifier: RotkiNotifier,
cors_domain_list: List[str] = None,
) -> None:
flask_app = Flask(__name__)
if cors_domain_list:
CORS(flask_app, origins=cors_domain_list)
blueprint = create_blueprint(self._api_prefix)
setup_urls(
blueprint=blueprint,
rest_api=rest_api,
urls=URLS_V1,
)
self.rest_api = rest_api
self.rotki_notifier = ws_notifier
self.flask_app = flask_app
self.blueprint = blueprint
self.wsgiserver: Optional[WSGIServer] = None
self.flask_app.register_blueprint(self.blueprint)
self.ws_server: Optional[WebSocketServer] = None
self.flask_app.errorhandler(HTTPStatus.NOT_FOUND)(endpoint_not_found)
self.flask_app.register_error_handler(Exception, self.unhandled_exception)
@staticmethod
def unhandled_exception(exception: Exception) -> Response:
""" Flask.errorhandler when an exception wasn't correctly handled """
log.critical(
'Unhandled exception when processing endpoint request',
exc_info=True,
exception=str(exception),
)
return api_response(wrap_in_fail_result(str(exception)), HTTPStatus.INTERNAL_SERVER_ERROR)
def run(self, host: str = '127.0.0.1', port: int = 5042, **kwargs: Any) -> None:
"""This is only used for the data faker and not used in production"""
self.flask_app.run(host=host, port=port, **kwargs)
def start(
self,
host: str = '127.0.0.1',
rest_port: int = 5042,
websockets_port: int = 5043,
) -> None:
"""This is used to start the API server in production"""
wsgi_logger = logging.getLogger(__name__ + '.pywsgi')
self.wsgiserver = WSGIServer(
listener=(host, rest_port),
application=self.flask_app,
log=wsgi_logger,
error_log=wsgi_logger,
)
msg = f'rotki REST API server is running at: {host}:{rest_port}'
print(msg)
log.info(msg)
self.wsgiserver.start()
self.ws_server = WebSocketServer(
listener=(host, websockets_port),
application=WebsocketResource([
('^/', RotkiWSApp),
]),
debug=False,
environ={'rotki_notifier': self.rotki_notifier},
)
msg = f'rotki Websockets API server is running at: {host}:{websockets_port}'
print(msg)
log.info(msg)
self.ws_server.start()
def stop(self, timeout: int = 5) -> None:
"""Stops the API server. If handlers are running after timeout they are killed"""
if self.wsgiserver is not None:
self.wsgiserver.stop(timeout)
self.wsgiserver = None
if self.ws_server is not None:
self.ws_server.stop(timeout)
self.wsgiserver = None
self.rest_api.stop()
|
src/GridCal/Gui/TowerBuilder/gui.py | SanPen/GridCal | 284 | 11656 | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'gui.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from .matplotlibwidget import MatplotlibWidget
from .icons_rc import *
class Ui_Dialog(object):
def setupUi(self, Dialog):
if not Dialog.objectName():
Dialog.setObjectName(u"Dialog")
Dialog.resize(1183, 675)
self.gridLayout = QGridLayout(Dialog)
self.gridLayout.setObjectName(u"gridLayout")
self.gridLayout.setContentsMargins(1, 1, 1, 1)
self.tabWidget = QTabWidget(Dialog)
self.tabWidget.setObjectName(u"tabWidget")
self.tab_2 = QWidget()
self.tab_2.setObjectName(u"tab_2")
self.verticalLayout_6 = QVBoxLayout(self.tab_2)
self.verticalLayout_6.setObjectName(u"verticalLayout_6")
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.main_splitter = QSplitter(self.tab_2)
self.main_splitter.setObjectName(u"main_splitter")
self.main_splitter.setOrientation(Qt.Horizontal)
self.frame_8 = QFrame(self.main_splitter)
self.frame_8.setObjectName(u"frame_8")
self.frame_8.setFrameShape(QFrame.NoFrame)
self.frame_8.setFrameShadow(QFrame.Raised)
self.verticalLayout_5 = QVBoxLayout(self.frame_8)
self.verticalLayout_5.setObjectName(u"verticalLayout_5")
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.frame_5 = QFrame(self.frame_8)
self.frame_5.setObjectName(u"frame_5")
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_5.sizePolicy().hasHeightForWidth())
self.frame_5.setSizePolicy(sizePolicy)
self.frame_5.setFrameShape(QFrame.NoFrame)
self.frame_5.setFrameShadow(QFrame.Raised)
self.horizontalLayout = QHBoxLayout(self.frame_5)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.label_9 = QLabel(self.frame_5)
self.label_9.setObjectName(u"label_9")
self.horizontalLayout.addWidget(self.label_9)
self.name_lineEdit = QLineEdit(self.frame_5)
self.name_lineEdit.setObjectName(u"name_lineEdit")
self.horizontalLayout.addWidget(self.name_lineEdit)
self.verticalLayout_5.addWidget(self.frame_5)
self.frame_6 = QFrame(self.frame_8)
self.frame_6.setObjectName(u"frame_6")
sizePolicy.setHeightForWidth(self.frame_6.sizePolicy().hasHeightForWidth())
self.frame_6.setSizePolicy(sizePolicy)
self.frame_6.setFrameShape(QFrame.NoFrame)
self.frame_6.setFrameShadow(QFrame.Raised)
self.horizontalLayout_3 = QHBoxLayout(self.frame_6)
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.horizontalSpacer_2 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(self.horizontalSpacer_2)
self.label_8 = QLabel(self.frame_6)
self.label_8.setObjectName(u"label_8")
self.horizontalLayout_3.addWidget(self.label_8)
self.frequency_doubleSpinBox = QDoubleSpinBox(self.frame_6)
self.frequency_doubleSpinBox.setObjectName(u"frequency_doubleSpinBox")
self.frequency_doubleSpinBox.setDecimals(0)
self.frequency_doubleSpinBox.setValue(50.000000000000000)
self.horizontalLayout_3.addWidget(self.frequency_doubleSpinBox)
self.label_11 = QLabel(self.frame_6)
self.label_11.setObjectName(u"label_11")
self.horizontalLayout_3.addWidget(self.label_11)
self.rho_doubleSpinBox = QDoubleSpinBox(self.frame_6)
self.rho_doubleSpinBox.setObjectName(u"rho_doubleSpinBox")
self.rho_doubleSpinBox.setMaximum(9999999.000000000000000)
self.rho_doubleSpinBox.setValue(100.000000000000000)
self.horizontalLayout_3.addWidget(self.rho_doubleSpinBox)
self.verticalLayout_5.addWidget(self.frame_6)
self.splitter = QSplitter(self.frame_8)
self.splitter.setObjectName(u"splitter")
self.splitter.setMaximumSize(QSize(16777215, 16777215))
self.splitter.setOrientation(Qt.Vertical)
self.frame_3 = QFrame(self.splitter)
self.frame_3.setObjectName(u"frame_3")
self.frame_3.setFrameShape(QFrame.NoFrame)
self.frame_3.setFrameShadow(QFrame.Raised)
self.verticalLayout_8 = QVBoxLayout(self.frame_3)
self.verticalLayout_8.setObjectName(u"verticalLayout_8")
self.label_12 = QLabel(self.frame_3)
self.label_12.setObjectName(u"label_12")
self.verticalLayout_8.addWidget(self.label_12)
self.wires_tableView = QTableView(self.frame_3)
self.wires_tableView.setObjectName(u"wires_tableView")
self.verticalLayout_8.addWidget(self.wires_tableView)
self.frame_7 = QFrame(self.frame_3)
self.frame_7.setObjectName(u"frame_7")
self.frame_7.setFrameShape(QFrame.StyledPanel)
self.frame_7.setFrameShadow(QFrame.Raised)
self.horizontalLayout_4 = QHBoxLayout(self.frame_7)
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.add_to_tower_pushButton = QPushButton(self.frame_7)
self.add_to_tower_pushButton.setObjectName(u"add_to_tower_pushButton")
icon = QIcon()
icon.addFile(u":/Icons/icons/plus.svg", QSize(), QIcon.Normal, QIcon.Off)
self.add_to_tower_pushButton.setIcon(icon)
self.horizontalLayout_4.addWidget(self.add_to_tower_pushButton)
self.horizontalSpacer_3 = QSpacerItem(990, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(self.horizontalSpacer_3)
self.verticalLayout_8.addWidget(self.frame_7)
self.splitter.addWidget(self.frame_3)
self.frame_4 = QFrame(self.splitter)
self.frame_4.setObjectName(u"frame_4")
self.frame_4.setFrameShape(QFrame.NoFrame)
self.frame_4.setFrameShadow(QFrame.Raised)
self.verticalLayout_4 = QVBoxLayout(self.frame_4)
self.verticalLayout_4.setObjectName(u"verticalLayout_4")
self.verticalLayout_4.setContentsMargins(9, 9, 9, 9)
self.label_10 = QLabel(self.frame_4)
self.label_10.setObjectName(u"label_10")
self.verticalLayout_4.addWidget(self.label_10)
self.tower_tableView = QTableView(self.frame_4)
self.tower_tableView.setObjectName(u"tower_tableView")
self.verticalLayout_4.addWidget(self.tower_tableView)
self.frame = QFrame(self.frame_4)
self.frame.setObjectName(u"frame")
self.frame.setFrameShape(QFrame.NoFrame)
self.frame.setFrameShadow(QFrame.Raised)
self.horizontalLayout_2 = QHBoxLayout(self.frame)
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.delete_from_tower_pushButton = QPushButton(self.frame)
self.delete_from_tower_pushButton.setObjectName(u"delete_from_tower_pushButton")
icon1 = QIcon()
icon1.addFile(u":/Icons/icons/minus.svg", QSize(), QIcon.Normal, QIcon.Off)
self.delete_from_tower_pushButton.setIcon(icon1)
self.horizontalLayout_2.addWidget(self.delete_from_tower_pushButton)
self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(self.horizontalSpacer)
self.compute_pushButton = QPushButton(self.frame)
self.compute_pushButton.setObjectName(u"compute_pushButton")
icon2 = QIcon()
icon2.addFile(u":/Icons/icons/calc.svg", QSize(), QIcon.Normal, QIcon.Off)
self.compute_pushButton.setIcon(icon2)
self.compute_pushButton.setIconSize(QSize(16, 16))
self.horizontalLayout_2.addWidget(self.compute_pushButton)
self.verticalLayout_4.addWidget(self.frame)
self.splitter.addWidget(self.frame_4)
self.verticalLayout_5.addWidget(self.splitter)
self.main_splitter.addWidget(self.frame_8)
self.PlotFrame = QFrame(self.main_splitter)
self.PlotFrame.setObjectName(u"PlotFrame")
self.PlotFrame.setFrameShape(QFrame.NoFrame)
self.PlotFrame.setFrameShadow(QFrame.Raised)
self.verticalLayout_7 = QVBoxLayout(self.PlotFrame)
self.verticalLayout_7.setObjectName(u"verticalLayout_7")
self.verticalLayout_7.setContentsMargins(9, 9, 9, 9)
self.label_4 = QLabel(self.PlotFrame)
self.label_4.setObjectName(u"label_4")
self.verticalLayout_7.addWidget(self.label_4)
self.plotwidget = MatplotlibWidget(self.PlotFrame)
self.plotwidget.setObjectName(u"plotwidget")
self.verticalLayout_7.addWidget(self.plotwidget)
self.frame_9 = QFrame(self.PlotFrame)
self.frame_9.setObjectName(u"frame_9")
self.frame_9.setMaximumSize(QSize(16777215, 24))
self.frame_9.setFrameShape(QFrame.StyledPanel)
self.frame_9.setFrameShadow(QFrame.Raised)
self.horizontalLayout_5 = QHBoxLayout(self.frame_9)
self.horizontalLayout_5.setObjectName(u"horizontalLayout_5")
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalSpacer_4 = QSpacerItem(19, 19, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(self.horizontalSpacer_4)
self.acceptButton = QPushButton(self.frame_9)
self.acceptButton.setObjectName(u"acceptButton")
self.horizontalLayout_5.addWidget(self.acceptButton)
self.verticalLayout_7.addWidget(self.frame_9)
self.main_splitter.addWidget(self.PlotFrame)
self.verticalLayout_6.addWidget(self.main_splitter)
self.tabWidget.addTab(self.tab_2, "")
self.tab = QWidget()
self.tab.setObjectName(u"tab")
self.verticalLayout_3 = QVBoxLayout(self.tab)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.frame_10 = QFrame(self.tab)
self.frame_10.setObjectName(u"frame_10")
self.frame_10.setFrameShape(QFrame.StyledPanel)
self.frame_10.setFrameShadow(QFrame.Raised)
self.gridLayout_2 = QGridLayout(self.frame_10)
self.gridLayout_2.setObjectName(u"gridLayout_2")
self.label_2 = QLabel(self.frame_10)
self.label_2.setObjectName(u"label_2")
self.gridLayout_2.addWidget(self.label_2, 0, 1, 1, 1)
self.label_6 = QLabel(self.frame_10)
self.label_6.setObjectName(u"label_6")
self.gridLayout_2.addWidget(self.label_6, 2, 0, 1, 1)
self.z_tableView_abcn = QTableView(self.frame_10)
self.z_tableView_abcn.setObjectName(u"z_tableView_abcn")
self.gridLayout_2.addWidget(self.z_tableView_abcn, 1, 0, 1, 1)
self.y_tableView_abcn = QTableView(self.frame_10)
self.y_tableView_abcn.setObjectName(u"y_tableView_abcn")
self.gridLayout_2.addWidget(self.y_tableView_abcn, 1, 1, 1, 1)
self.label_7 = QLabel(self.frame_10)
self.label_7.setObjectName(u"label_7")
self.gridLayout_2.addWidget(self.label_7, 4, 0, 1, 1)
self.z_tableView_abc = QTableView(self.frame_10)
self.z_tableView_abc.setObjectName(u"z_tableView_abc")
self.gridLayout_2.addWidget(self.z_tableView_abc, 3, 0, 1, 1)
self.label = QLabel(self.frame_10)
self.label.setObjectName(u"label")
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.z_tableView_seq = QTableView(self.frame_10)
self.z_tableView_seq.setObjectName(u"z_tableView_seq")
self.gridLayout_2.addWidget(self.z_tableView_seq, 5, 0, 1, 1)
self.label_3 = QLabel(self.frame_10)
self.label_3.setObjectName(u"label_3")
self.gridLayout_2.addWidget(self.label_3, 2, 1, 1, 1)
self.y_tableView_abc = QTableView(self.frame_10)
self.y_tableView_abc.setObjectName(u"y_tableView_abc")
self.gridLayout_2.addWidget(self.y_tableView_abc, 3, 1, 1, 1)
self.label_5 = QLabel(self.frame_10)
self.label_5.setObjectName(u"label_5")
self.gridLayout_2.addWidget(self.label_5, 4, 1, 1, 1)
self.y_tableView_seq = QTableView(self.frame_10)
self.y_tableView_seq.setObjectName(u"y_tableView_seq")
self.gridLayout_2.addWidget(self.y_tableView_seq, 5, 1, 1, 1)
self.verticalLayout_3.addWidget(self.frame_10)
self.tabWidget.addTab(self.tab, "")
self.gridLayout.addWidget(self.tabWidget, 4, 0, 1, 1)
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QMetaObject.connectSlotsByName(Dialog)
# setupUi
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QCoreApplication.translate("Dialog", u"Tower creation", None))
self.label_9.setText(QCoreApplication.translate("Dialog", u"Name", None))
self.label_8.setText(QCoreApplication.translate("Dialog", u"Frequency (Hz)", None))
self.label_11.setText(QCoreApplication.translate("Dialog", u"Earth resistivity (Ohm/m^3)", None))
self.label_12.setText(QCoreApplication.translate("Dialog", u"Wire catalogue", None))
#if QT_CONFIG(tooltip)
self.add_to_tower_pushButton.setToolTip(QCoreApplication.translate("Dialog", u"Add wire", None))
#endif // QT_CONFIG(tooltip)
self.add_to_tower_pushButton.setText("")
self.label_10.setText(QCoreApplication.translate("Dialog", u"Wire compisition", None))
#if QT_CONFIG(tooltip)
self.delete_from_tower_pushButton.setToolTip(QCoreApplication.translate("Dialog", u"Delete wire", None))
#endif // QT_CONFIG(tooltip)
self.delete_from_tower_pushButton.setText("")
#if QT_CONFIG(tooltip)
self.compute_pushButton.setToolTip(QCoreApplication.translate("Dialog", u"Compute matrices", None))
#endif // QT_CONFIG(tooltip)
self.compute_pushButton.setText("")
self.label_4.setText(QCoreApplication.translate("Dialog", u"Tower", None))
self.acceptButton.setText(QCoreApplication.translate("Dialog", u"Accept", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QCoreApplication.translate("Dialog", u"Tower designer", None))
self.label_2.setText(QCoreApplication.translate("Dialog", u" Y shunt (uS / km) for ABCN", None))
self.label_6.setText(QCoreApplication.translate("Dialog", u" Z series (Ohm / km) for ABC", None))
self.label_7.setText(QCoreApplication.translate("Dialog", u" Z series (Ohm / km) in sequence components", None))
self.label.setText(QCoreApplication.translate("Dialog", u" Z series (Ohm / km) for ABCN", None))
self.label_3.setText(QCoreApplication.translate("Dialog", u" Y shunt (uS / km) for ABC", None))
self.label_5.setText(QCoreApplication.translate("Dialog", u" Y shunt (uS / km) for the sequence components", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QCoreApplication.translate("Dialog", u"Impedance matrices", None))
# retranslateUi
|
tools/isolate/data/isolate/with_flag.py | Scopetta197/chromium | 212 | 11669 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
def main():
print 'with_flag: Verify the test data files were mapped properly'
assert len(sys.argv) == 2
mode = sys.argv[1]
assert mode in ('run', 'trace')
files = sorted(os.listdir('files1'))
tree = {
'test_file1.txt': 'Foo\n',
'test_file2.txt': 'Bar\n',
}
# Ignore .svn directory which happens to be there with --mode=trace
# from a svn checkout. The file shouldn't be there when --mode=run is used.
if mode == 'trace' and '.svn' in files:
files.remove('.svn')
if files != sorted(tree):
print '%s != %s' % (files, sorted(tree))
return 2
for k, v in tree.iteritems():
content = open(os.path.join('files1', k), 'rb').read()
if v != content:
print '%s: %r != %r' % (k, v, content)
return 3
root_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir, base = os.path.split(root_dir)
if mode == 'trace':
# Verify the parent directory.
parent_dir, base2 = os.path.split(parent_dir)
if base != 'isolate' or base2 != 'data':
print 'mode trace: Invalid root dir %s' % root_dir
return 4
else:
# Verify that we are not inside a checkout.
if base == 'data':
print 'mode run: Invalid root dir %s' % root_dir
return 5
return 0
if __name__ == '__main__':
sys.exit(main())
|
Lib/gds/burp/config.py | mwielgoszewski/jython-burp-api | 134 | 11694 | <reponame>mwielgoszewski/jython-burp-api
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from ConfigParser import ConfigParser
from copy import deepcopy
from inspect import cleandoc
import os.path
from .core import ExtensionPoint
__all__ = ['Configuration', 'ConfigSection', 'Option', 'BoolOption',
'IntOption', 'FloatOption', 'ListOption',
'OrderedExtensionsOption']
_use_default = object()
def as_bool(value):
"""Convert the given value to a `bool`.
If `value` is a string, return `True` for any of "yes", "true", "enabled",
"on" or non-zero numbers, ignoring case. For non-string arguments, return
the argument converted to a `bool`, or `False` if the conversion fails.
"""
if isinstance(value, basestring):
try:
return bool(float(value))
except ValueError:
return value.strip().lower() in ('yes', 'true', 'enabled', 'on')
try:
return bool(value)
except (TypeError, ValueError):
return False
def to_unicode(text, charset=None):
"""Convert input to an `unicode` object.
For a `str` object, we'll first try to decode the bytes using the given
`charset` encoding (or UTF-8 if none is specified), then we fall back to
the latin1 encoding which might be correct or not, but at least preserves
the original byte sequence by mapping each byte to the corresponding
unicode code point in the range U+0000 to U+00FF.
For anything else, a simple `unicode()` conversion is attempted,
with special care taken with `Exception` objects.
"""
if isinstance(text, str):
try:
return unicode(text, charset or 'utf-8')
except UnicodeDecodeError:
return unicode(text, 'latin1')
elif isinstance(text, Exception):
# two possibilities for storing unicode strings in exception data:
try:
# custom __str__ method on the exception (e.g. PermissionError)
return unicode(text)
except UnicodeError:
# unicode arguments given to the exception (e.g. parse_date)
return ' '.join([to_unicode(arg) for arg in text.args])
return unicode(text)
def _to_utf8(basestr):
return to_unicode(basestr, 'utf-8').encode('utf-8')
class Configuration(object):
"""Thin layer over `ConfigParser` from the Python standard library.
In addition to providing some convenience methods, the class remembers
the last modification time of the configuration file, and reparses it
when the file has changed.
"""
def __init__(self, filename, params={}):
self.filename = filename
self.parser = ConfigParser()
self.parser.optionxform = str
self._old_sections = {}
self.parents = []
self._lastmtime = 0
self._sections = {}
self.parser.read(filename)
def __contains__(self, name):
"""Return whether the configuration contains a section of the given
name.
"""
return name in self.sections()
def __getitem__(self, name):
"""Return the configuration section with the specified name."""
if name not in self._sections:
self._sections[name] = Section(self, name)
return self._sections[name]
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.filename)
def get(self, section, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
return self[section].get(key, default)
def getbool(self, section, key, default=''):
"""Return the specified option as boolean value.
If the value of the option is one of "yes", "true", "enabled", "on",
or "1", this method wll return `True`, otherwise `False`.
Valid default input is a string or a bool. Returns a bool.
"""
return self[section].getbool(key, default)
def getint(self, section, key, default=''):
"""Return the value of the specified option as integer.
Valid default input is a string or an int. Returns an int.
"""
return self[section].getint(key, default)
def getfloat(self, section, key, default=''):
"""Return the value of the specified option as float.
Valid default input is a string, float or int. Returns a float.
"""
return self[section].getfloat(key, default)
def getlist(self, section, key, default='', sep=',', keep_empty=False):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. If
the `keep_empty` parameter is set to `True`, empty elements are
included in the list.
Valid default input is a string or a list. Returns a string.
"""
return self[section].getlist(key, default, sep, keep_empty)
def getpath(self, section, key, default=''):
"""Return a configuration value as an absolute path.
Relative paths are resolved relative to the location of this
configuration file.
Valid default input is a string. Returns a normalized path.
"""
return self[section].getpath(key, default)
def defaults(self, compmgr=None):
"""Returns a dictionary of the default configuration values
If `compmgr` is specified, return only options declared in components
that are enabled in the given `ComponentManager`.
"""
defaults = {}
for (section, key), option in Option.get_registry(compmgr).items():
defaults.setdefault(section, {})[key] = option.default
return defaults
def options(self, section, compmgr=None):
"""Return a list of `(name, value)` tuples for every option in the
specified section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
return self[section].options(compmgr)
def remove(self, section, key):
"""Remove the specified option."""
self[section].remove(key)
def sections(self, compmgr=None, defaults=True):
"""Return a list of section names.
If `compmgr` is specified, only the section names corresponding to
options declared in components that are enabled in the given
`ComponentManager` are returned.
"""
sections = set([to_unicode(s) for s in self.parser.sections()])
for parent in self.parents:
sections.update(parent.sections(compmgr, defaults=False))
if defaults:
sections.update(self.defaults(compmgr))
return sorted(sections)
def has_option(self, section, option, defaults=True):
"""Returns True if option exists in section in either the project
burp.ini or one of the parents, or is available through the Option
registry.
"""
section_str = _to_utf8(section)
if self.parser.has_section(section_str):
if _to_utf8(option) in self.parser.options(section_str):
return True
for parent in self.parents:
if parent.has_option(section, option, defaults=False):
return True
return defaults and (section, option) in Option.registry
def parse_if_needed(self, force=False):
if not self.filename or not os.path.isfile(self.filename):
return False
changed = False
modtime = os.path.getmtime(self.filename)
if force or modtime > self._lastmtime:
self._sections = {}
self.parser._sections = {}
if not self.parser.read(self.filename):
raise IOError("Error reading '%(file)s', make sure it is "
"readable." % (self.filename, ))
self._lastmtime = modtime
self._old_sections = deepcopy(self.parser._sections)
changed = True
if changed:
self.parents = []
if self.parser.has_option('inherit', 'file'):
for filename in self.parser.get('inherit', 'file').split(','):
filename = to_unicode(filename.strip())
if not os.path.isabs(filename):
filename = os.path.join(os.path.dirname(self.filename),
filename)
self.parents.append(Configuration(filename))
else:
for parent in self.parents:
changed |= parent.parse_if_needed(force=force)
if changed:
self._cache = {}
return changed
class Section(object):
"""Proxy for a specific configuration section.
Objects of this class should not be instantiated directly.
"""
__slots__ = ['config', 'name', 'overridden', '_cache']
def __init__(self, config, name):
self.config = config
self.name = name
self.overridden = {}
self._cache = {}
def contains(self, key, defaults=True):
if self.config.parser.has_option(_to_utf8(self.name), _to_utf8(key)):
return True
for parent in self.config.parents:
if parent[self.name].contains(key, defaults=False):
return True
return defaults and Option.registry.has_key((self.name, key))
__contains__ = contains
def iterate(self, compmgr=None, defaults=True):
"""Iterate over the options in this section.
If `compmgr` is specified, only return default option values for
components that are enabled in the given `ComponentManager`.
"""
options = set()
name_str = _to_utf8(self.name)
if self.config.parser.has_section(name_str):
for option_str in self.config.parser.options(name_str):
option = to_unicode(option_str)
options.add(option.lower())
yield option
for parent in self.config.parents:
for option in parent[self.name].iterate(defaults=False):
loption = option.lower()
if loption not in options:
options.add(loption)
yield option
if defaults:
for section, option in Option.get_registry(compmgr).keys():
if section == self.name and option.lower() not in options:
yield option
__iter__ = iterate
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.name)
def get(self, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
cached = self._cache.get(key, _use_default)
if cached is not _use_default:
return cached
name_str = _to_utf8(self.name)
key_str = _to_utf8(key)
if self.config.parser.has_option(name_str, key_str):
value = self.config.parser.get(name_str, key_str)
else:
for parent in self.config.parents:
value = parent[self.name].get(key, _use_default)
if value is not _use_default:
break
else:
if default is not _use_default:
option = Option.registry.get((self.name, key))
value = option.default if option else _use_default
else:
value = _use_default
if value is _use_default:
return default
if not value:
value = u''
elif isinstance(value, basestring):
value = to_unicode(value)
self._cache[key] = value
return value
def getbool(self, key, default=''):
"""Return the value of the specified option as boolean.
This method returns `True` if the option value is one of "yes", "true",
"enabled", "on", or non-zero numbers, ignoring case. Otherwise `False`
is returned.
Valid default input is a string or a bool. Returns a bool.
"""
return as_bool(self.get(key, default))
def getint(self, key, default=''):
"""Return the value of the specified option as integer.
Valid default input is a string or an int. Returns an int.
"""
value = self.get(key, default)
if not value:
return 0
return int(value)
def getfloat(self, key, default=''):
"""Return the value of the specified option as float.
Valid default input is a string, float or int. Returns a float.
"""
value = self.get(key, default)
if not value:
return 0.0
return float(value)
def getlist(self, key, default='', sep=',', keep_empty=True):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. If
the `keep_empty` parameter is set to `False`, empty elements are
omitted from the list.
Valid default input is a string or a list. Returns a list.
"""
value = self.get(key, default)
if not value:
return []
if isinstance(value, basestring):
items = [item.strip() for item in value.split(sep)]
else:
items = list(value)
if not keep_empty:
items = filter(None, items)
return items
def getpath(self, key, default=''):
"""Return the value of the specified option as a path, relative to
the location of this configuration file.
Valid default input is a string. Returns a normalized path.
"""
path = self.get(key, default)
if not path:
return default
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(self.config.filename), path)
return os.path.normcase(os.path.realpath(path))
def options(self, compmgr=None):
"""Return `(key, value)` tuples for every option in the section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
for key in self.iterate(compmgr):
yield key, self.get(key)
def _get_registry(cls, compmgr=None):
"""Return the descriptor registry.
If `compmgr` is specified, only return descriptors for components that
are enabled in the given `ComponentManager`.
"""
if compmgr is None:
return cls.registry
from .core import ComponentMeta
components = {}
for comp in ComponentMeta._components:
for attr in comp.__dict__.itervalues():
if isinstance(attr, cls):
components[attr] = comp
return dict(each for each in cls.registry.iteritems()
if each[1] not in components
or compmgr.is_enabled(components[each[1]]))
class ConfigSection(object):
"""Descriptor for configuration sections."""
registry = {}
@staticmethod
def get_registry(compmgr=None):
"""Return the section registry, as a `dict` mapping section names to
`ConfigSection` objects.
If `compmgr` is specified, only return sections for components that are
enabled in the given `ComponentManager`.
"""
return _get_registry(ConfigSection, compmgr)
def __init__(self, name, doc, doc_domain='burpini'):
"""Create the configuration section."""
self.name = name
self.registry[self.name] = self
self.__doc__ = cleandoc(doc)
self.doc_domain = doc_domain
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
return config[self.name]
def __repr__(self):
return '<%s [%s]>' % (self.__class__.__name__, self.name)
class Option(object):
"""Descriptor for configuration options."""
registry = {}
accessor = Section.get
@staticmethod
def get_registry(compmgr=None):
"""Return the option registry, as a `dict` mapping `(section, key)`
tuples to `Option` objects.
If `compmgr` is specified, only return options for components that are
enabled in the given `ComponentManager`.
"""
return _get_registry(Option, compmgr)
def __init__(self, section, name, default=None, doc='',
doc_domain='burpini'):
"""Create the configuration option.
:param section: the name of the configuration section this option
belongs to
:param name: the name of the option
:param default: the default value for the option
:param doc: documentation of the option
"""
self.section = section
self.name = name
self.default = default
self.registry[(self.section, self.name)] = self
self.__doc__ = cleandoc(doc)
self.doc_domain = doc_domain
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
section = config[self.section]
value = self.accessor(section, self.name, self.default)
return value
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __repr__(self):
return '<%s [%s] "%s">' % (self.__class__.__name__, self.section,
self.name)
class BoolOption(Option):
"""Descriptor for boolean configuration options."""
accessor = Section.getbool
class IntOption(Option):
"""Descriptor for integer configuration options."""
accessor = Section.getint
class FloatOption(Option):
"""Descriptor for float configuration options."""
accessor = Section.getfloat
class ListOption(Option):
"""Descriptor for configuration options that contain multiple values
separated by a specific character.
"""
def __init__(self, section, name, default=None, sep=',', keep_empty=False,
doc='', doc_domain='burpini'):
Option.__init__(self, section, name, default, doc, doc_domain)
self.sep = sep
self.keep_empty = keep_empty
def accessor(self, section, name, default):
return section.getlist(name, default, self.sep, self.keep_empty)
class OrderedExtensionsOption(ListOption):
"""A comma separated, ordered, list of components implementing `interface`.
Can be empty.
If `include_missing` is true (the default) all components implementing
the interface are returned, with those specified by the option ordered
first."""
def __init__(self, section, name, interface, default=None,
include_missing=True, doc='', doc_domain='burpini'):
ListOption.__init__(self, section, name, default, doc=doc,
doc_domain=doc_domain)
self.xtnpt = ExtensionPoint(interface)
self.include_missing = include_missing
def __get__(self, instance, owner):
if instance is None:
return self
order = ListOption.__get__(self, instance, owner)
components = []
for impl in self.xtnpt.extensions(instance):
if self.include_missing or impl.__class__.__name__ in order:
components.append(impl)
def compare(x, y):
x, y = x.__class__.__name__, y.__class__.__name__
if x not in order:
return int(y in order)
if y not in order:
return -int(x in order)
return cmp(order.index(x), order.index(y))
components.sort(compare)
return components
|
regtests/webclgl/call_external_method.py | bpmbank/PythonJS | 319 | 11706 | <filename>regtests/webclgl/call_external_method.py
"""external method"""
class myclass:
def __init__(self, i):
self.index = i
def get_index(self):
return self.index
def run(self, n):
self.intarray = new(Int16Array(n))
self.intarray[ self.index ] = 99
@returns( array=n )
@gpu.main
def gpufunc():
int* A = self.intarray
## GLSL compile error: `Index expression must be constant`
#int idx = self.get_index()
#return float( A[idx] )
return float( A[self.get_index()] )
return gpufunc()
def main():
m = myclass(10)
r = m.run(64)
print(r)
TestError( int(r[10])==99 ) |
tests/test_loop_seer.py | Kyle-Kyle/angr | 6,132 | 11723 | <gh_stars>1000+
import os
import sys
import angr
import nose.tools
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def test_various_loops():
p = angr.Project(os.path.join(test_location, 'x86_64', 'various_loops'), auto_load_libs=False)
cfg = p.analyses.CFGFast(normalize=True)
state = p.factory.entry_state()
state.register_plugin('loop_data', angr.state_plugins.SimStateLoopData())
dummy = p.loader.main_object.get_symbol('dummy')
bvs = state.solver.BVS(dummy.name, 8 * dummy.size)
state.memory.store(dummy.rebased_addr, bvs, endness='Iend_LE')
simgr = p.factory.simulation_manager(state)
simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, functions=None, bound=None))
simgr.run()
nose.tools.assert_equal(len(simgr.deadended), 10)
nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts), 14)
for i, d in enumerate(simgr.deadended):
f = p.kb.functions.function(name='symbolic_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0], i)
f = p.kb.functions.function(name='for_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0], 9)
f = p.kb.functions.function(name='while_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0], 9)
f = p.kb.functions.function(name='do_while_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.header_trip_counts[l.entry.addr][0], 9)
f = p.kb.functions.function(name='nullify')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(len(d.loop_data.back_edge_trip_counts[l.entry.addr]), 8)
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0], 9)
f = p.kb.functions.function(name='nested_for_loop')
ol = p.analyses.LoopFinder(functions=[f]).loops[0]
il = ol.subloops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[ol.entry.addr][0], 3)
nose.tools.assert_equal(len(d.loop_data.back_edge_trip_counts[il.entry.addr]), 3)
nose.tools.assert_true(all(s == 3 for s in d.loop_data.back_edge_trip_counts[il.entry.addr]))
f = p.kb.functions.function(name='nested_while_loop')
ol = p.analyses.LoopFinder(functions=[f]).loops[0]
il = ol.subloops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[ol.entry.addr][0], 3)
nose.tools.assert_equal(len(d.loop_data.back_edge_trip_counts[il.entry.addr]), 3)
nose.tools.assert_true(all(s == 3 for s in d.loop_data.back_edge_trip_counts[il.entry.addr]))
f = p.kb.functions.function(name='nested_do_while_loop')
ol = p.analyses.LoopFinder(functions=[f]).loops[0]
il = ol.subloops[0]
nose.tools.assert_equal(d.loop_data.header_trip_counts[ol.entry.addr][0], 3)
nose.tools.assert_equal(len(d.loop_data.header_trip_counts[il.entry.addr]), 3)
nose.tools.assert_true(all(s == 3 for s in d.loop_data.header_trip_counts[il.entry.addr]))
f = p.kb.functions.function(name='break_for_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.back_edge_trip_counts[l.entry.addr][0], 9)
f = p.kb.functions.function(name='break_do_while_loop')
l = p.analyses.LoopFinder(functions=[f]).loops[0]
nose.tools.assert_equal(d.loop_data.header_trip_counts[l.entry.addr][0], 9)
def test_loops_with_invalid_parameter():
p = angr.Project(os.path.join(test_location, 'x86_64', 'test_loops'), auto_load_libs=False)
state = p.factory.entry_state()
state.register_plugin('loop_data', angr.state_plugins.SimStateLoopData())
simgr = p.factory.simulation_manager(state)
simgr.use_technique(angr.exploration_techniques.LoopSeer(functions=['main', 0x1234], bound=None))
simgr.run()
nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts), 3)
nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400665][0], 10)
nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400665]), 10)
nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400675][0], 10)
nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x4006b2][0], 100)
def test_arrays():
p = angr.Project(os.path.join(test_location, 'x86_64', 'test_arrays'), auto_load_libs=False)
cfg = p.analyses.CFGFast(normalize=True)
state = p.factory.entry_state()
state.register_plugin('loop_data', angr.state_plugins.SimStateLoopData())
simgr = p.factory.simulation_manager(state)
simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, functions='main', bound=None))
simgr.run()
nose.tools.assert_equal(len(simgr.deadended[0].loop_data.back_edge_trip_counts), 2)
nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x400636][0], 26)
nose.tools.assert_equal(simgr.deadended[0].loop_data.back_edge_trip_counts[0x4005fd][0], 26)
def test_loop_limiter():
p = angr.Project(os.path.join(test_location, 'x86_64', 'test_arrays'), auto_load_libs=False)
cfg = p.analyses.CFGFast(normalize=True)
state = p.factory.entry_state()
state.register_plugin('loop_data', angr.state_plugins.SimStateLoopData())
simgr = p.factory.simulation_manager(state)
simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, functions='main', bound=5))
simgr.run()
nose.tools.assert_true('spinning' in simgr.stashes)
nose.tools.assert_equal(simgr.spinning[0].loop_data.back_edge_trip_counts[0x4005fd][0], 6)
def test_loop_limiter_constant_loop():
p = angr.Project(os.path.join(test_location, 'x86_64', 'constant_loopseer'), auto_load_libs=False)
cfg = p.analyses.CFGFast(normalize=True)
state = p.factory.entry_state()
simgr = p.factory.simulation_manager(state)
simgr.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, functions='main', bound=5, limit_concrete_loops=False))
simgr.run()
nose.tools.assert_true(simgr.deadended[0].regs.eax.concrete)
val = simgr.deadended[0].solver.eval_one(simgr.deadended[0].regs.eax)
nose.tools.assert_equal(val, 420)
if __name__ == "__main__":
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
g = globals().copy()
for k, v in g.items():
if k.startswith("test_") and hasattr(v, '__call__'):
print(k)
v()
|
968 Binary Tree Cameras.py | krishna13052001/LeetCode | 872 | 11751 | #!/usr/bin/python3
"""
Given a binary tree, we install cameras on the nodes of the tree.
Each camera at a node can monitor its parent, itself, and its immediate children.
Calculate the minimum number of cameras needed to monitor all nodes of the tree.
Example 1:
Input: [0,0,null,0,0]
Output: 1
Explanation: One camera is enough to monitor all nodes if placed as shown.
Example 2:
Input: [0,0,null,0,null,0,null,null,0]
Output: 2
Explanation: At least two cameras are needed to monitor all nodes of the tree.
The above image shows one of the valid configurations of camera placement.
Note:
The number of nodes in the given tree will be in the range [1, 1000].
Every node has value 0.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.covered = {None}
self.cnt = 0
def minCameraCover(self, root: TreeNode) -> int:
"""
Greedy?
Bottom up, cover leaf's parent is strictly better than cover leaf
"""
self.dfs(root, None)
if root not in self.covered:
self.covered.add(root)
self.cnt += 1
return self.cnt
def dfs(self, node, pi):
"""
post order
rely on the parents to cover it
"""
if not node:
return
self.dfs(node.left, node)
self.dfs(node.right, node)
if node.left not in self.covered or node.right not in self.covered:
self.cnt += 1
self.covered.add(node.left)
self.covered.add(node.right)
self.covered.add(node)
self.covered.add(pi)
class SolutionErrror:
def __init__(self):
self.covered = set()
def minCameraCover(self, root: TreeNode) -> int:
"""
Greedy?
Top-down, no good.
Bottom up, cover leaf's parent is strictly better than cover leaf
"""
dummy = TreeNode(0)
dummy.left = root
self.dfs(root, dummy)
self.covered.discard(dummy) # swallow KeyError
return len(self.covered)
def dfs(self, node, pi):
"""
post order
"""
if not node:
return
self.dfs(node.left, node)
self.dfs(node.right, node)
# post oder
if (
(not node.left or node.left in self.covered) and
(not node.right or node.right in self.covered)
):
self.covered.add(pi)
return
|
examples/get_tiktoks_by_sound.py | twitter-79/TikTok-Api | 2,095 | 11752 | <gh_stars>1000+
from TikTokApi import TikTokApi
api = TikTokApi.get_instance()
count = 30
# You can find this from a tiktok getting method in another way or find songs from the discoverMusic method.
sound_id = "6601861313180207878"
tiktoks = api.by_sound(sound_id, count=count)
for tiktok in tiktoks:
print(tiktok)
|
robo_gym/envs/ur/ur_avoidance_basic.py | psFournier/robo-gym | 236 | 11777 | """
Environment for basic obstacle avoidance controlling a robotic arm from UR.
In this environment the obstacle is only moving up and down in a vertical line in front of the robot.
The goal is for the robot to stay within a predefined minimum distance to the moving obstacle.
When feasible the robot should continue to the original configuration,
otherwise wait for the obstacle to move away before proceeding
"""
import numpy as np
from typing import Tuple
from robo_gym_server_modules.robot_server.grpc_msgs.python import robot_server_pb2
from robo_gym.envs.simulation_wrapper import Simulation
from robo_gym.envs.ur.ur_base_avoidance_env import URBaseAvoidanceEnv
# base, shoulder, elbow, wrist_1, wrist_2, wrist_3
JOINT_POSITIONS = [-1.57, -1.31, -1.31, -2.18, 1.57, 0.0]
DEBUG = True
MINIMUM_DISTANCE = 0.3 # the distance [cm] the robot should keep to the obstacle
class BasicAvoidanceUR(URBaseAvoidanceEnv):
"""Universal Robots UR basic obstacle avoidance environment.
Args:
rs_address (str): Robot Server address. Formatted as 'ip:port'. Defaults to None.
fix_base (bool): Wether or not the base joint stays fixed or is moveable. Defaults to False.
fix_shoulder (bool): Wether or not the shoulder joint stays fixed or is moveable. Defaults to False.
fix_elbow (bool): Wether or not the elbow joint stays fixed or is moveable. Defaults to False.
fix_wrist_1 (bool): Wether or not the wrist 1 joint stays fixed or is moveable. Defaults to False.
fix_wrist_2 (bool): Wether or not the wrist 2 joint stays fixed or is moveable. Defaults to False.
fix_wrist_3 (bool): Wether or not the wrist 3 joint stays fixed or is moveable. Defaults to True.
ur_model (str): determines which ur model will be used in the environment. Defaults to 'ur5'.
include_polar_to_elbow (bool): determines wether or not the polar coordinates to the elbow joint are included in the state. Defaults to False.
Attributes:
ur (:obj:): Robot utilities object.
client (:obj:str): Robot Server client.
real_robot (bool): True if the environment is controlling a real robot.
"""
max_episode_steps = 1000
def _set_initial_robot_server_state(self, rs_state, fixed_object_position = None) -> robot_server_pb2.State:
if fixed_object_position:
state_msg = super()._set_initial_robot_server_state(rs_state=rs_state, fixed_object_position=fixed_object_position)
return state_msg
z_amplitude = np.random.default_rng().uniform(low=0.09, high=0.35)
z_frequency = 0.125
z_offset = np.random.default_rng().uniform(low=0.2, high=0.6)
string_params = {"object_0_function": "triangle_wave"}
float_params = {"object_0_x": 0.12,
"object_0_y": 0.34,
"object_0_z_amplitude": z_amplitude,
"object_0_z_frequency": z_frequency,
"object_0_z_offset": z_offset}
state = {}
state_msg = robot_server_pb2.State(state = state, float_params = float_params,
string_params = string_params, state_dict = rs_state)
return state_msg
def reset(self, joint_positions = JOINT_POSITIONS, fixed_object_position = None) -> np.array:
"""Environment reset.
Args:
joint_positions (list[6] or np.array[6]): robot joint positions in radians.
fixed_object_position (list[3]): x,y,z fixed position of object
"""
self.prev_action = np.zeros(6)
state = super().reset(joint_positions = joint_positions, fixed_object_position = fixed_object_position)
return state
def reward(self, rs_state, action) -> Tuple[float, bool, dict]:
env_state = self._robot_server_state_to_env_state(rs_state)
reward = 0
done = False
info = {}
# Reward weights
close_distance_weight = -2
delta_joint_weight = 1
action_usage_weight = 1
rapid_action_weight = -0.2
# Difference in joint position current vs. starting position
delta_joint_pos = env_state[9:15]
# Calculate distance to the obstacle
obstacle_coord = np.array([rs_state['object_0_to_ref_translation_x'], rs_state['object_0_to_ref_translation_y'], rs_state['object_0_to_ref_translation_z']])
ee_coord = np.array([rs_state['ee_to_ref_translation_x'], rs_state['ee_to_ref_translation_y'], rs_state['ee_to_ref_translation_z']])
forearm_coord = np.array([rs_state['forearm_to_ref_translation_x'], rs_state['forearm_to_ref_translation_y'], rs_state['forearm_to_ref_translation_z']])
distance_to_ee = np.linalg.norm(obstacle_coord - ee_coord)
distance_to_forearm = np.linalg.norm(obstacle_coord - forearm_coord)
distance_to_target = np.min([distance_to_ee, distance_to_forearm])
# Reward staying close to the predefined joint position
if abs(env_state[-6:]).sum() < 0.1 * action.size:
reward += delta_joint_weight * (1 - (abs(delta_joint_pos).sum()/(0.1 * action.size))) * (1/1000)
# Reward for not acting
if abs(action).sum() <= action.size:
reward += action_usage_weight * (1 - (np.square(action).sum()/action.size)) * (1/1000)
# Negative reward if actions change to rapidly between steps
for i in range(len(action)):
if abs(action[i] - self.prev_action[i]) > 0.5:
reward += rapid_action_weight * (1/1000)
# Negative reward if the obstacle is close than the predefined minimum distance
if distance_to_target < MINIMUM_DISTANCE:
reward += close_distance_weight * (1/self.max_episode_steps)
# Check if there is a collision
collision = True if rs_state['in_collision'] == 1 else False
if collision:
done = True
info['final_status'] = 'collision'
info['target_coord'] = obstacle_coord
self.last_position_on_success = []
if self.elapsed_steps >= self.max_episode_steps:
done = True
info['final_status'] = 'success'
info['target_coord'] = obstacle_coord
self.last_position_on_success = []
return reward, done, info
def step(self, action) -> Tuple[np.array, float, bool, dict]:
if type(action) == list: action = np.array(action)
state, reward, done, info = super().step(action)
self.prev_action = self.add_fixed_joints(action)
return state, reward, done, info
class BasicAvoidanceURSim(BasicAvoidanceUR, Simulation):
cmd = "roslaunch ur_robot_server ur_robot_server.launch \
world_name:=tabletop_sphere50.world \
reference_frame:=base_link \
max_velocity_scale_factor:=0.2 \
action_cycle_rate:=20 \
rviz_gui:=false \
gazebo_gui:=true \
objects_controller:=true \
rs_mode:=1moving2points \
n_objects:=1.0 \
object_0_model_name:=sphere50 \
object_0_frame:=target"
def __init__(self, ip=None, lower_bound_port=None, upper_bound_port=None, gui=False, ur_model='ur5', **kwargs):
self.cmd = self.cmd + ' ' + 'ur_model:=' + ur_model
Simulation.__init__(self, self.cmd, ip, lower_bound_port, upper_bound_port, gui, **kwargs)
BasicAvoidanceUR.__init__(self, rs_address=self.robot_server_ip, ur_model=ur_model, **kwargs)
class BasicAvoidanceURRob(BasicAvoidanceUR):
real_robot = True
# roslaunch ur_robot_server ur_robot_server.launch ur_model:=ur5 real_robot:=true rviz_gui:=true gui:=true reference_frame:=base max_velocity_scale_factor:=0.2 action_cycle_rate:=20 rs_mode:=moving |
test/programytest/storage/entities/test_nodes.py | cdoebler1/AIML2 | 345 | 11785 | import unittest
import unittest.mock
from programy.storage.entities.nodes import NodesStore
class NodesStoreTest(unittest.TestCase):
def test_load(self):
store = NodesStore()
with self.assertRaises(NotImplementedError):
collector = unittest.mock.Mock()
store.load(collector)
|
Python/Tests/TestData/TestDiscoverer/ConfigUnittest/Product/prefix_not_included.py | techkey/PTVS | 404 | 11788 | import unittest
class PrefixNotIncluded(unittest.TestCase):
def test_not_included(self):
pass
if __name__ == '__main__':
unittest.main()
|
demo/examples/stability/advection_d2q4.py | bgraille/pylbm | 106 | 11805 |
"""
Stability analysis of the
D2Q4 solver for the advection equation
d_t(u) + c_x d_x(u) + c_y d_y(u) = 0
"""
import sympy as sp
import pylbm
# pylint: disable=invalid-name
# symbolic variables
U, X, Y = sp.symbols('U, X, Y')
# symbolic parameters
LA, CX, CY = sp.symbols('lambda, cx, cy', constants=True)
S_1, S_2 = sp.symbols('s1, s2', constants=True)
# numerical parameters
la = 1. # velocity of the scheme
s_1, s_2 = 2., 1. # relaxation parameters
c_x, c_y = 0.5, 0.25 # velocity of the advection equation
dico = {
'dim': 2,
'scheme_velocity': LA,
'schemes': [
{
'velocities': [1, 2, 3, 4],
'conserved_moments': U,
'polynomials': [1, X, Y, X**2-Y**2],
'relaxation_parameters': [0, S_1, S_1, S_2],
'equilibrium': [
U,
CX*U, CY*U,
(CX**2-CY**2)*U
],
},
],
'parameters': {
LA: la,
S_1: s_1,
S_2: s_2,
CX: c_x,
CY: c_y,
},
'relative_velocity': [CX, CY],
}
scheme = pylbm.Scheme(dico)
stab = pylbm.Stability(scheme)
stab.visualize({
'parameters': {
CX: {
'range': [0, 1],
'init': c_x,
'step': 0.01,
},
CY: {
'range': [0, 1],
'init': c_y,
'step': 0.01,
},
S_1: {
'name': r"$s_1$",
'range': [0, 2],
'init': s_1,
'step': 0.01,
},
S_2: {
'name': r"$s_2$",
'range': [0, 2],
'init': s_2,
'step': 0.01,
},
},
'number_of_wave_vectors': 4096,
})
|
mmtbx/validation/regression/tst_restraints.py | dperl-sol/cctbx_project | 155 | 11813 | <reponame>dperl-sol/cctbx_project<gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from libtbx.utils import null_out
from libtbx import easy_pickle
from six.moves import cStringIO as StringIO
def run_validation(pdb_file, ignore_hd=True):
from mmtbx.validation import restraints
import mmtbx.command_line
cmdline = mmtbx.command_line.load_model_and_data(
args=[pdb_file],
master_phil=mmtbx.command_line.generic_simple_input_phil(),
process_pdb_file=True,
require_data=False,
out=null_out())
validation = restraints.combined(
pdb_hierarchy=cmdline.pdb_hierarchy,
xray_structure=cmdline.xray_structure,
geometry_restraints_manager=cmdline.geometry,
ignore_hd=ignore_hd)
return validation
def exercise_simple():
# extracted from 1lyz, with hydrogens from reduce
pdb_in = """
ATOM 1 N LYS A 1 3.296 9.888 10.739 1.00 7.00 N
ATOM 2 CA LYS A 1 2.439 10.217 9.791 1.00 6.00 C
ATOM 3 C LYS A 1 2.439 11.997 9.160 1.00 6.00 C
ATOM 4 O LYS A 1 2.637 12.656 10.107 1.00 8.00 O
ATOM 5 CB LYS A 1 0.659 10.086 8.844 1.00 6.00 C
ATOM 6 CG LYS A 1 0.198 10.415 8.086 1.00 6.00 C
ATOM 7 CD LYS A 1 -1.187 10.086 8.212 1.00 6.00 C
ATOM 8 CE LYS A 1 -2.175 10.086 7.264 1.00 6.00 C
ATOM 9 NZ LYS A 1 -3.527 9.869 7.288 1.00 7.00 N
ATOM 0 H1 LYS A 1 3.156 9.045 10.986 1.00 7.00 H
ATOM 0 H2 LYS A 1 4.127 9.972 10.431 1.00 7.00 H
ATOM 0 H3 LYS A 1 3.184 10.425 11.440 1.00 7.00 H
ATOM 0 HA LYS A 1 2.772 9.314 9.912 1.00 6.00 H
ATOM 0 HB2 LYS A 1 0.584 9.128 8.712 1.00 6.00 H
ATOM 0 HB3 LYS A 1 0.046 10.323 9.557 1.00 6.00 H
ATOM 0 HG2 LYS A 1 0.310 11.376 8.015 1.00 6.00 H
ATOM 0 HG3 LYS A 1 0.563 10.027 7.276 1.00 6.00 H
ATOM 0 HD2 LYS A 1 -1.193 9.186 8.573 1.00 6.00 H
ATOM 0 HD3 LYS A 1 -1.516 10.674 8.910 1.00 6.00 H
ATOM 0 HE2 LYS A 1 -2.097 10.964 6.860 1.00 6.00 H
ATOM 0 HE3 LYS A 1 -1.857 9.444 6.610 1.00 6.00 H
ATOM 0 HZ1 LYS A 1 -3.725 9.170 6.774 1.00 7.00 H
ATOM 0 HZ2 LYS A 1 -3.787 9.706 8.123 1.00 7.00 H
ATOM 0 HZ3 LYS A 1 -3.949 10.590 6.982 1.00 7.00 H
ATOM 10 N VAL A 2 2.637 12.722 7.707 1.00 7.00 N
ATOM 11 CA VAL A 2 2.307 14.172 7.580 1.00 6.00 C
ATOM 12 C VAL A 2 0.857 14.041 6.949 1.00 6.00 C
ATOM 13 O VAL A 2 0.659 13.843 5.875 1.00 8.00 O
ATOM 14 CB VAL A 2 3.625 14.172 6.759 1.00 6.00 C
ATOM 15 CG1 VAL A 2 3.494 15.491 6.317 1.00 6.00 C
ATOM 16 CG2 VAL A 2 4.746 13.843 7.580 1.00 6.00 C
ATOM 0 H VAL A 2 2.920 12.338 6.992 1.00 7.00 H
ATOM 0 HA VAL A 2 2.195 14.925 8.181 1.00 6.00 H
ATOM 0 HB VAL A 2 3.767 13.528 6.048 1.00 6.00 H
ATOM 0 HG11 VAL A 2 4.250 15.721 5.755 1.00 6.00 H
ATOM 0 HG12 VAL A 2 2.674 15.582 5.808 1.00 6.00 H
ATOM 0 HG13 VAL A 2 3.467 16.087 7.081 1.00 6.00 H
ATOM 0 HG21 VAL A 2 5.554 13.850 7.043 1.00 6.00 H
ATOM 0 HG22 VAL A 2 4.827 14.495 8.294 1.00 6.00 H
ATOM 0 HG23 VAL A 2 4.620 12.960 7.962 1.00 6.00 H
END
"""
pdb_file = "tst_validate_restraints_simple.pdb"
open(pdb_file, "w").write(pdb_in)
v1 = run_validation(pdb_file, ignore_hd=True)
out1 = StringIO()
v1.show(out=out1)
assert ("""
----------Chiral volumes----------
atoms ideal model delta sigma residual deviation
A 1 LYS CA
A 1 LYS N
A 1 LYS C
A 1 LYS CB 2.57 1.12 1.45 2.00e-01 5.25e+01 7.2*sigma
""" in "\n".join([ l.rstrip() for l in out1.getvalue().splitlines() ]))
s = easy_pickle.dumps(v1)
v1p = easy_pickle.loads(s)
out1p = StringIO()
v1p.show(out=out1p)
assert (out1.getvalue() == out1p.getvalue())
v2 = run_validation(pdb_file, ignore_hd=False)
out2 = StringIO()
v2.show(out=out2)
assert (out2.getvalue() != out1.getvalue())
assert ("""\
A 1 LYS HA 110.00 57.00 53.00 3.00e+00 3.12e+02 17.7*sigma
A 1 LYS N
A 1 LYS CA
""" in "\n".join([ l.rstrip() for l in out2.getvalue().splitlines() ]))
#
# C-alpha-only model (from 3b5d)
pdb_raw = """\
CRYST1 115.100 43.700 76.400 90.00 108.10 90.00 C 1 2 1 8
ATOM 1 CA TYR A 6 -7.551 -11.355 -17.946 1.00148.04 C
ATOM 2 CA LEU A 7 -8.052 -8.804 -20.730 1.00310.75 C
ATOM 3 CA GLY A 8 -10.874 -6.691 -19.353 1.00158.95 C
ATOM 4 CA GLY A 9 -9.359 -7.332 -15.966 1.00217.68 C
ATOM 5 CA ALA A 10 -5.806 -6.508 -16.946 1.00239.12 C
ATOM 6 CA ILE A 11 -7.024 -3.514 -18.905 1.00103.16 C
ATOM 7 CA LEU A 12 -10.023 -2.071 -17.056 1.00230.80 C
ATOM 8 CA ALA A 13 -7.313 -1.820 -14.420 1.00141.04 C
"""
pdb_file = "tst_validate_restraints_calpha.pdb"
open(pdb_file, "w").write(pdb_raw)
v1 = run_validation(pdb_file, ignore_hd=True)
if (__name__ == "__main__"):
exercise_simple()
print("OK")
|
get_ip_list_ru_gov.py | gil9red/SimplePyScripts | 117 | 11827 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Скрипт выводит список ip государственных организаций.
"""
import ipaddress
import sys
import requests
rs = requests.get('https://jarib.github.io/anon-history/RuGovEdits/ru/latest/ranges.json')
# Проверка удачного запроса и полученных данных
if not rs or not rs.json() or 'ranges' not in rs.json():
print('Не получилось получить список ip государственных организаций')
sys.exit()
# Получение и сортировка элементов по названию организации
items = sorted(rs.json()['ranges'].items(), key=lambda x: x[0])
ip_counter = 0
for i, (name, ip_network_list) in enumerate(items, 1):
print(f'{i}. {name}')
# Получение ip с маской подсети
for ip_network in ip_network_list:
print(f' {ip_network}:')
# Получение ip подсети
net4 = ipaddress.ip_network(ip_network)
# Перебор ip адресов указанной организации
for ip in net4.hosts():
print(f' {ip}')
ip_counter += 1
print()
print('Всего ip:', ip_counter)
|
tools/docs/generate_api_rst.py | dcillera/envoy | 17,703 | 11838 | import os
import shutil
import sys
import tarfile
def include_package(envoy_api_protos, rst_file_path, prefix):
# `envoy_api_rst_files` is a list of file paths for .proto.rst files
# generated by protodoc
#
# we are only interested in the proto files generated for envoy protos,
# not for non-envoy dependencies
if ("pkg/" + prefix) not in rst_file_path:
return None
# derive the "canonical" path from the filepath
canonical = f"{rst_file_path.split('pkg/' + prefix)[1]}"
# we are only interested in the actual v3 protos, not their dependencies
if (prefix + canonical) not in envoy_api_protos:
return None
return canonical
def main():
proto_srcs = sys.argv[1]
envoy_api_rst_files = sys.argv[1:-1]
output_filename = sys.argv[-1]
with open(proto_srcs) as f:
# the contents of `proto_srcs` are the result of a bazel genquery,
# containing bazel target rules, eg:
#
# @envoy_api//envoy/watchdog/v3:abort_action.proto
#
# this transforms them to a list with a "canonical" form of:
#
# envoy/watchdog/v3/abort_action.proto.rst
#
envoy_api_protos = [
f"{src.split('//')[1].replace(':', '/')}.rst" for src in f.read().split("\n") if src
]
for rst_file_path in envoy_api_rst_files:
canonical = include_package(envoy_api_protos, rst_file_path, "envoy/")
if canonical is None:
canonical = include_package(envoy_api_protos, rst_file_path, "contrib/envoy/")
if canonical is None:
continue
target = os.path.join("rst-out/api-v3", canonical)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copy(rst_file_path, target)
# output the generated rst files to a tarfile for consumption
# by other bazel rules
with tarfile.open(output_filename, "w") as tar:
tar.add("rst-out", arcname=".")
if __name__ == "__main__":
main()
|
tests/library/test_ceph_volume_simple_activate.py | u-kosmonaft-u/ceph-ansible | 1,570 | 11866 | from mock.mock import patch
import os
import pytest
import ca_test_common
import ceph_volume_simple_activate
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
fake_id = '42'
fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)
class TestCephVolumeSimpleActivateModule(object):
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
def test_with_check_mode(self, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'_ansible_check_mode': True
})
m_exit_json.side_effect = ca_test_common.exit_json
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == 0
assert not result['stdout']
assert not result['stderr']
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_with_failure(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = 'error'
rc = 2
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_all_osds(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_all': True
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=True)
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=False)
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_activate_path_not_exists(self, m_fail_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['msg'] == '{} does not exist'.format(fake_path)
assert result['rc'] == 1
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_without_systemd(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'systemd': False
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
@patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_with_container(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == [fake_container_binary,
'run', '--rm', '--privileged',
'--ipc=host', '--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'-v', '/run/lvm/:/run/lvm/',
'-v', '/run/lock/lvm/:/run/lock/lvm/',
'--entrypoint=ceph-volume', fake_container_image,
'--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
|
common/src/stack/command/stack/commands/set/firmware/model/imp/__init__.py | kmcm0/stacki | 123 | 11884 | # @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import stack.commands
class Command(stack.commands.set.firmware.command):
"""
Associates a firmware implementation with one or more models.
<arg type='string' name='models'>
One or more models to associate the implementation with.
</arg>
<param type='string' name='imp'>
The name of the implementation to associate with the provided models.
</param>
<param type='string' name='make'>
The make of the models.
</param>
<example cmd="set firmware model imp m7800 m6036 imp=mellanox_6xxx_7xxx make=mellanox">
Sets the mellanox_6xxx_7xxx implementation as the one to run for the models m7800 and m6036 for make mellanox.
</example>
"""
def run(self, params, args):
self.runPlugins(args = (params, args))
|
GasBotty/models/utils.py | GreenCUBIC/GasBotty | 353 | 11891 | <reponame>GreenCUBIC/GasBotty
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
|
venv/Lib/site-packages/nipype/conftest.py | richung99/digitizePlots | 585 | 11907 | <gh_stars>100-1000
import os
import shutil
from tempfile import mkdtemp
import pytest
import numpy
import py.path as pp
NIPYPE_DATADIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), "testing/data")
)
temp_folder = mkdtemp()
data_dir = os.path.join(temp_folder, "data")
shutil.copytree(NIPYPE_DATADIR, data_dir)
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace["np"] = numpy
doctest_namespace["os"] = os
doctest_namespace["pytest"] = pytest
doctest_namespace["datadir"] = data_dir
@pytest.fixture(autouse=True)
def _docdir(request):
"""Grabbed from https://stackoverflow.com/a/46991331"""
# Trigger ONLY for the doctests.
doctest_plugin = request.config.pluginmanager.getplugin("doctest")
if isinstance(request.node, doctest_plugin.DoctestItem):
# Get the fixture dynamically by its name.
tmpdir = pp.local(data_dir)
# Chdir only for the duration of the test.
with tmpdir.as_cwd():
yield
else:
# For normal tests, we have to yield, since this is a yield-fixture.
yield
def pytest_unconfigure(config):
# Delete temp folder after session is finished
shutil.rmtree(temp_folder)
|
tools/SDKTool/src/ui/dialog/progress_bar_dialog.py | Passer-D/GameAISDK | 1,210 | 11963 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QProgressDialog
class ProgressBarDialog(QWidget):
def __init__(self, title='', label='', minValue=0, maxValue=100, parent=None):
super(ProgressBarDialog, self).__init__(parent)
self.process_bar = QProgressDialog(self)
self.set_bar_window_title(title)
self.set_label_text(label)
self.set_min_value(minValue)
self.set_max_value(maxValue)
self.process_bar.setWindowModality(Qt.WindowModal)
self.setGeometry(800, 300, 580, 570)
self.process_bar.canceled.connect(self.close_bar)
def set_bar_window_title(self, text):
self.process_bar.setWindowTitle(text)
self.setWindowTitle(text)
def set_label_text(self, text):
self.process_bar.setLabelText(text)
def set_min_value(self, minValue):
self.process_bar.setMinimum(minValue)
def set_max_value(self, maxvalue):
self.process_bar.setMaximum(maxvalue)
def set_value(self, value):
self.process_bar.setValue(value)
def close_bar(self):
self.process_bar.close()
def reset_bar(self):
self.process_bar = None
def show(self):
self.process_bar.show()
def is_valid(self):
return bool(self.process_bar)
|
src/oci/dns/models/external_master.py | Manny27nyc/oci-python-sdk | 249 | 11991 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ExternalMaster(object):
"""
An external master name server used as the source of zone data.
"""
def __init__(self, **kwargs):
"""
Initializes a new ExternalMaster object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param address:
The value to assign to the address property of this ExternalMaster.
:type address: str
:param port:
The value to assign to the port property of this ExternalMaster.
:type port: int
:param tsig_key_id:
The value to assign to the tsig_key_id property of this ExternalMaster.
:type tsig_key_id: str
"""
self.swagger_types = {
'address': 'str',
'port': 'int',
'tsig_key_id': 'str'
}
self.attribute_map = {
'address': 'address',
'port': 'port',
'tsig_key_id': 'tsigKeyId'
}
self._address = None
self._port = None
self._tsig_key_id = None
@property
def address(self):
"""
**[Required]** Gets the address of this ExternalMaster.
The server's IP address (IPv4 or IPv6).
:return: The address of this ExternalMaster.
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""
Sets the address of this ExternalMaster.
The server's IP address (IPv4 or IPv6).
:param address: The address of this ExternalMaster.
:type: str
"""
self._address = address
@property
def port(self):
"""
Gets the port of this ExternalMaster.
The server's port. Port value must be a value of 53, otherwise omit
the port value.
:return: The port of this ExternalMaster.
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port of this ExternalMaster.
The server's port. Port value must be a value of 53, otherwise omit
the port value.
:param port: The port of this ExternalMaster.
:type: int
"""
self._port = port
@property
def tsig_key_id(self):
"""
Gets the tsig_key_id of this ExternalMaster.
The OCID of the TSIG key.
:return: The tsig_key_id of this ExternalMaster.
:rtype: str
"""
return self._tsig_key_id
@tsig_key_id.setter
def tsig_key_id(self, tsig_key_id):
"""
Sets the tsig_key_id of this ExternalMaster.
The OCID of the TSIG key.
:param tsig_key_id: The tsig_key_id of this ExternalMaster.
:type: str
"""
self._tsig_key_id = tsig_key_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
scripts/build_folding_map.py | tsieprawski/md4c | 475 | 11995 | #!/usr/bin/env python3
import os
import sys
import textwrap
self_path = os.path.dirname(os.path.realpath(__file__));
f = open(self_path + "/unicode/CaseFolding.txt", "r")
status_list = [ "C", "F" ]
folding_list = [ dict(), dict(), dict() ]
# Filter the foldings for "full" folding.
for line in f:
comment_off = line.find("#")
if comment_off >= 0:
line = line[:comment_off]
line = line.strip()
if not line:
continue
raw_codepoint, status, raw_mapping, ignored_tail = line.split(";", 3)
if not status.strip() in status_list:
continue
codepoint = int(raw_codepoint.strip(), 16)
mapping = [int(it, 16) for it in raw_mapping.strip().split(" ")]
mapping_len = len(mapping)
if mapping_len in range(1, 4):
folding_list[mapping_len-1][codepoint] = mapping
else:
assert(False)
f.close()
# If we assume that (index0 ... index-1) makes a range (as defined below),
# check that the newly provided index is compatible with the range too; i.e.
# verify that the range can be extended without breaking its properties.
#
# Currently, we can handle ranges which:
#
# (1) either form consecutive sequence of codepoints and which map that range
# to other consecutive range of codepoints (of the same length);
#
# (2) or a consecutive sequence of codepoints with step 2 where each codepoint
# CP is mapped to the codepoint CP+1
# (e.g. 0x1234 -> 0x1235; 0x1236 -> 0x1237; 0x1238 -> 0x1239; ...).
#
# Note: When the codepoints in the range are mapped to multiple codepoints,
# only the 1st mapped codepoint is considered. All the other ones have to be
# shared by all the mappings covered by the range.
def is_range_compatible(folding, codepoint_list, index0, index):
N = index - index0
codepoint0 = codepoint_list[index0]
codepoint1 = codepoint_list[index0+1]
codepointN = codepoint_list[index]
mapping0 = folding[codepoint0]
mapping1 = folding[codepoint1]
mappingN = folding[codepointN]
# Check the range type (1):
if codepoint1 - codepoint0 == 1 and codepointN - codepoint0 == N \
and mapping1[0] - mapping0[0] == 1 and mapping1[1:] == mapping0[1:] \
and mappingN[0] - mapping0[0] == N and mappingN[1:] == mapping0[1:]:
return True
# Check the range type (2):
if codepoint1 - codepoint0 == 2 and codepointN - codepoint0 == 2 * N \
and mapping0[0] - codepoint0 == 1 \
and mapping1[0] - codepoint1 == 1 and mapping1[1:] == mapping0[1:] \
and mappingN[0] - codepointN == 1 and mappingN[1:] == mapping0[1:]:
return True
return False
def mapping_str(list, mapping):
return ",".join("0x{:04x}".format(x) for x in mapping)
for mapping_len in range(1, 4):
folding = folding_list[mapping_len-1]
codepoint_list = list(folding)
index0 = 0
count = len(folding)
records = list()
data_records = list()
while index0 < count:
index1 = index0 + 1
while index1 < count and is_range_compatible(folding, codepoint_list, index0, index1):
index1 += 1
if index1 - index0 > 2:
# Range of codepoints
records.append("R(0x{:04x},0x{:04x})".format(codepoint_list[index0], codepoint_list[index1-1]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index1-1]]))
index0 = index1
else:
# Single codepoint
records.append("S(0x{:04x})".format(codepoint_list[index0]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
index0 += 1
sys.stdout.write("static const unsigned FOLD_MAP_{}[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
sys.stdout.write("static const unsigned FOLD_MAP_{}_DATA[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(data_records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
|
tensorflow/contrib/training/python/training/hparam_test.py | DEVESHTARASIA/tensorflow | 384 | 12020 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hparam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.platform import test
class HParamsTest(test.TestCase):
def _assertDictEquals(self, d1, d2):
self.assertEqual(len(d1), len(d2))
for k, v in six.iteritems(d1):
self.assertTrue(k in d2, k)
self.assertEquals(v, d2[k], d2[k])
def testEmpty(self):
hparams = hparam.HParams()
self._assertDictEquals({}, hparams.values())
hparams.parse('')
self._assertDictEquals({}, hparams.values())
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('xyz=123')
def testSomeValues(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6')
self._assertDictEquals(
{'aaa': 1, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
expected_str = '[(\'aaa\', 1), (\'b\', 2.0), (\'c_c\', \'relu6\')]'
self.assertEquals(expected_str, str(hparams.__str__()))
self.assertEquals(expected_str, str(hparams))
self.assertEquals(1, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse('aaa=12')
self._assertDictEquals(
{'aaa': 12, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse('c_c=relu4,b=-2.0e10')
self._assertDictEquals({'aaa': 12, 'b': -2.0e10, 'c_c': 'relu4'},
hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(-2.0e10, hparams.b)
self.assertEquals('relu4', hparams.c_c)
hparams.parse('c_c=,b=0,')
self._assertDictEquals({'aaa': 12, 'b': 0, 'c_c': ''}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(0.0, hparams.b)
self.assertEquals('', hparams.c_c)
hparams.parse('c_c=2.3",b=+2,')
self.assertEquals(2.0, hparams.b)
self.assertEquals('2.3"', hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=123')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=poipoi')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=1.0')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=12x')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=relu')
with self.assertRaisesRegexp(ValueError, 'Must not pass a list'):
hparams.parse('aaa=[123]')
self.assertEquals(12, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('2.3"', hparams.c_c)
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEquals(12, hparams2.aaa)
self.assertEquals(2.0, hparams2.b)
self.assertEquals('2.3"', hparams2.c_c)
def testBoolParsing(self):
for value in 'true', 'false', 'True', 'False', '1', '0':
for initial in False, True:
hparams = hparam.HParams(use_gpu=initial)
hparams.parse('use_gpu=' + value)
self.assertEqual(hparams.use_gpu, value in ['True', 'true', '1'])
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
self.assertEquals(hparams.use_gpu, hparams2.use_gpu)
# Check that hparams2.use_gpu is a bool rather than an int.
# The assertEquals() call above won't catch this, since
# (0 == False) and (1 == True) in Python.
self.assertEquals(bool, type(hparams2.use_gpu))
def testBoolParsingFail(self):
hparams = hparam.HParams(use_gpu=True)
with self.assertRaisesRegexp(ValueError, r'Could not parse.*use_gpu'):
hparams.parse('use_gpu=yep')
def testLists(self):
hparams = hparam.HParams(aaa=[1], b=[2.0, 3.0], c_c=['relu6'])
self._assertDictEquals({'aaa': [1], 'b': [2.0, 3.0], 'c_c': ['relu6']},
hparams.values())
self.assertEquals([1], hparams.aaa)
self.assertEquals([2.0, 3.0], hparams.b)
self.assertEquals(['relu6'], hparams.c_c)
hparams.parse('aaa=[12]')
self.assertEquals([12], hparams.aaa)
hparams.parse('aaa=[12,34,56]')
self.assertEquals([12, 34, 56], hparams.aaa)
hparams.parse('c_c=[relu4,relu12],b=[1.0]')
self.assertEquals(['relu4', 'relu12'], hparams.c_c)
self.assertEquals([1.0], hparams.b)
hparams.parse('c_c=[],aaa=[-34]')
self.assertEquals([-34], hparams.aaa)
self.assertEquals([], hparams.c_c)
hparams.parse('c_c=[_12,3\'4"],aaa=[+3]')
self.assertEquals([3], hparams.aaa)
self.assertEquals(['_12', '3\'4"'], hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=[123]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[poipoi]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[1.0]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[12x]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[relu]')
with self.assertRaisesRegexp(ValueError, 'Must pass a list'):
hparams.parse('aaa=123')
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEquals([3], hparams2.aaa)
self.assertEquals([1.0], hparams2.b)
self.assertEquals(['_12', '3\'4"'], hparams2.c_c)
def testJson(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self._assertDictEquals(
{'aaa': 1, 'b': 2.0, 'c_c': 'relu6', 'd': True}, hparams.values())
self.assertEquals(1, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse_json('{"aaa": 12, "b": 3.0, "c_c": "relu4", "d": false}')
self._assertDictEquals(
{'aaa': 12, 'b': 3.0, 'c_c': 'relu4', 'd': False}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(3.0, hparams.b)
self.assertEquals('relu4', hparams.c_c)
json_str = hparams.to_json()
hparams2 = hparam.HParams(aaa=10, b=20.0, c_c='hello', d=False)
hparams2.parse_json(json_str)
self.assertEquals(12, hparams2.aaa)
self.assertEquals(3.0, hparams2.b)
self.assertEquals('relu4', hparams2.c_c)
self.assertEquals(False, hparams2.d)
def testNonProtoFails(self):
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1.0)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def='hello')
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=[1, 2, 3])
if __name__ == '__main__':
test.main()
|
onnxsim/__init__.py | Wheest/onnx-simplifier | 1,977 | 12029 | <gh_stars>1000+
from onnxsim.onnx_simplifier import simplify
__version__ = '0.0.0'
|
validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py | samcom12/anuga_core | 136 | 12036 | <filename>validation_tests/analytical_exact/river_at_rest_varying_topo_width/numerical_varying_width.py
"""Simple water flow example using ANUGA
Water driven up a linear slope and time varying boundary,
similar to a beach environment
"""
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
import sys
import anuga
from anuga import myid, finalize, distribute
from anuga import Domain as Domain
from math import cos
from numpy import zeros, ones, array, interp, polyval, ones_like, zeros_like
from numpy import where, logical_and
from time import localtime, strftime, gmtime
from scipy.interpolate import interp1d
from anuga.geometry.polygon import inside_polygon, is_inside_triangle
#from balanced_dev import *
#-------------------------------------------------------------------------------
# Copy scripts to time stamped output directory and capture screen
# output to file
#-------------------------------------------------------------------------------
time = strftime('%Y%m%d_%H%M%S',localtime())
#output_dir = 'varying_width'+time
output_dir = '.'
output_file = 'varying_width'
#anuga.copy_code_files(output_dir,__file__)
#start_screen_catcher(output_dir+'_')
args = anuga.get_args()
alg = args.alg
verbose = args.verbose
#------------------------------------------------------------------------------
# Setup domain
#------------------------------------------------------------------------------
dx = 1.
dy = dx
L = 1500.
W = 60.
#===============================================================================
# Create sequential domain
#===============================================================================
if myid == 0:
# structured mesh
points, vertices, boundary = anuga.rectangular_cross(int(L/dx), int(W/dy), L, W, (0.,-W/2.))
#domain = anuga.Domain(points, vertices, boundary)
domain = Domain(points, vertices, boundary)
domain.set_name(output_file)
domain.set_datadir(output_dir)
#------------------------------------------------------------------------------
# Setup Algorithm, either using command line arguments
# or override manually yourself
#------------------------------------------------------------------------------
domain.set_flow_algorithm(alg)
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
domain.set_quantity('friction', 0.0)
domain.set_quantity('stage', 12.0)
XX = array([0.,50.,100.,150.,250.,300.,350.,400.,425.,435.,450.,470.,475.,500.,
505.,530.,550.,565.,575.,600.,650.,700.,750.,800.,820.,900.,950.,
1000.,1500.])
ZZ = array([0.,0.,2.5,5.,5.,3.,5.,5.,7.5,8.,9.,9.,9.,9.1,9.,9.,6.,5.5,5.5,5.,
4.,3.,3.,2.3,2.,1.2,0.4,0.,0.])
WW = array([40.,40.,30.,30.,30.,30.,25.,25.,30.,35.,35.,40.,40.,40.,45.,45.,50.,
45.,40.,40.,30.,40.,40.,5.,40.,35.,25.,40.,40.])/2.
depth = interp1d(XX, ZZ)
width = interp1d(XX, WW)
def bed_elevation(x,y):
z = 25.0*ones_like(x)
wid = width(x)
dep = depth(x)
z = where( logical_and(y < wid, y>-wid), dep, z)
return z
domain.set_quantity('elevation', bed_elevation)
else:
domain = None
#===========================================================================
# Create Parallel domain
#===========================================================================
domain = distribute(domain)
#-----------------------------------------------------------------------------
# Setup boundary conditions
#------------------------------------------------------------------------------
from math import sin, pi, exp
Br = anuga.Reflective_boundary(domain) # Solid reflective wall
#Bt = anuga.Transmissive_boundary(domain) # Continue all values on boundary
#Bd = anuga.Dirichlet_boundary([1,0.,0.]) # Constant boundary values
# Associate boundary tags with boundary objects
domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br})
#------------------------------------------------------------------------------
# Produce a documentation of parameters
#------------------------------------------------------------------------------
if myid == 0:
parameter_file=open('parameters.tex', 'w')
parameter_file.write('\\begin{verbatim}\n')
from pprint import pprint
pprint(domain.get_algorithm_parameters(),parameter_file,indent=4)
parameter_file.write('\\end{verbatim}\n')
parameter_file.close()
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
import time
t0 = time.time()
for t in domain.evolve(yieldstep = 0.1, finaltime = 5.0):
#print(domain.timestepping_statistics(track_speeds=True))
if myid == 0 and verbose: print(domain.timestepping_statistics())
#vis.update()
if myid == 0 and verbose: print('That took %s sec' % str(time.time()-t0))
domain.sww_merge(delete_old=True)
finalize()
|
pyxtal/miscellaneous/from_ase_molecule.py | ubikpt/PyXtal | 127 | 12047 | <reponame>ubikpt/PyXtal
from pyxtal.molecule import *
from ase.build import molecule
from pymatgen.core import Molecule
def get_ase_mol(molname):
"""convert ase molecule to pymatgen style"""
ase_mol = molecule(molname)
pos = ase_mol.get_positions()
symbols = ase_mol.get_chemical_symbols()
return Molecule(symbols, pos)
if __name__ == "__main__":
# ---------------------------------------------------
for name in ["H2", "H2O", "HCl", "CS2", "C2Cl4", "PH3", "CH4", "C6H6", "C60"]:
mol = get_ase_mol(name)
pga = PointGroupAnalyzer(mol)
# Symmetrize the molecule using pymatgen
mol = pga.symmetrize_molecule()["sym_mol"]
pga = PointGroupAnalyzer(mol)
print(name, " has point group symmetry: ", pga.get_pointgroup())
# Check if orders of rotation are detected correctly
pg = pga.get_pointgroup()
for op in pg:
opa = OperationAnalyzer(op)
if opa.order == "irrational":
print(opa)
elif opa.order > 10:
print(opa)
# orientation_in_wyckoff_position(mol, sg, WP's index in sg)
# returns a list of orientations consistent with the WP's symmetry.
# We can choose any of these orientations at random using np.random.choice
# To use an orientation, do mol.apply_operation(orientation)
# Spacegroup 16, index 6 has .2. symmetry
# check 2 fold rotation
allowed = orientation_in_wyckoff_position(mol, 16, 6, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 2",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check reflection
allowed = orientation_in_wyckoff_position(mol, 25, 2, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm m",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 3 fold rotation
allowed = orientation_in_wyckoff_position(mol, 147, 4, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 3",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check -1
allowed = orientation_in_wyckoff_position(mol, 2, 2, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm -1",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 2/m
allowed = orientation_in_wyckoff_position(mol, 64, 6, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 2/m",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
# check 6
allowed = orientation_in_wyckoff_position(mol, 168, 3, randomize=True)
if allowed is not False:
print(
"Found " + str(len(allowed)) + " orientations for ",
name,
" with site symm 6",
)
for i, op in enumerate(allowed):
mo = deepcopy(mol)
mo.apply_operation(op)
filename = "xyz/" + name + "-" + str(i) + ".xyz"
mo.to(fmt="xyz", filename=filename)
|
Python3/src/basicExample.py | emanuelen5/XPlaneConnect | 457 | 12050 | <filename>Python3/src/basicExample.py
from time import sleep
import xpc
def ex():
print("X-Plane Connect example script")
print("Setting up simulation")
with xpc.XPlaneConnect() as client:
# Verify connection
try:
# If X-Plane does not respond to the request, a timeout error
# will be raised.
client.getDREF("sim/test/test_float")
except:
print("Error establishing connection to X-Plane.")
print("Exiting...")
return
# Set position of the player aircraft
print("Setting position")
# Lat Lon Alt Pitch Roll Yaw Gear
posi = [37.524, -122.06899, 2500, 0, 0, 0, 1]
client.sendPOSI(posi)
# Set position of a non-player aircraft
print("Setting NPC position")
# Lat Lon Alt Pitch Roll Yaw Gear
posi = [37.52465, -122.06899, 2500, 0, 20, 0, 1]
client.sendPOSI(posi, 1)
# Set angle of attack, velocity, and orientation using the DATA command
print("Setting orientation")
data = [\
[18, 0, -998, 0, -998, -998, -998, -998, -998],\
[ 3, 130, 130, 130, 130, -998, -998, -998, -998],\
[16, 0, 0, 0, -998, -998, -998, -998, -998]\
]
client.sendDATA(data)
# Set control surfaces and throttle of the player aircraft using sendCTRL
print("Setting controls")
ctrl = [0.0, 0.0, 0.0, 0.8]
client.sendCTRL(ctrl)
# Pause the sim
print("Pausing")
client.pauseSim(True)
sleep(2)
# Toggle pause state to resume
print("Resuming")
client.pauseSim(False)
# Stow landing gear using a dataref
print("Stowing gear")
gear_dref = "sim/cockpit/switches/gear_handle_status"
client.sendDREF(gear_dref, 0)
# Let the sim run for a bit.
sleep(4)
# Make sure gear was stowed successfully
gear_status = client.getDREF(gear_dref)
if gear_status[0] == 0:
print("Gear stowed")
else:
print("Error stowing gear")
print("End of Python client example")
input("Press any key to exit...")
if __name__ == "__main__":
ex() |
eth_tester/normalization/common.py | PabloLefort/eth-tester | 215 | 12080 | from cytoolz.functoolz import (
curry,
)
from eth_utils import (
to_dict,
to_tuple,
)
@curry
@to_dict
def normalize_dict(value, normalizers):
for key, item in value.items():
normalizer = normalizers[key]
yield key, normalizer(item)
@curry
@to_tuple
def normalize_array(value, normalizer):
"""
This is just `map` but it's nice to have it return a consisten type
(tuple).
"""
for item in value:
yield normalizer(item)
@curry
def normalize_if(value, conditional_fn, normalizer):
if conditional_fn(value):
return normalizer(value)
else:
return value
|
src/cactus/shared/commonTest.py | thiagogenez/cactus | 209 | 12090 | import os
import shutil
import unittest
from base64 import b64encode
from sonLib.bioio import TestStatus
from sonLib.bioio import getTempFile
from sonLib.bioio import getTempDirectory
from sonLib.bioio import system
from toil.job import Job
from toil.common import Toil
from cactus.shared.common import cactus_call, ChildTreeJob
class TestCase(unittest.TestCase):
def setUp(self):
self.testNo = TestStatus.getTestSetup(1, 5, 10, 100)
self.tempDir = getTempDirectory(os.getcwd())
self.tempFiles = []
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
system("rm -rf %s" % self.tempDir)
@TestStatus.shortLength
def testCactusCall(self):
inputFile = getTempFile(rootDir=self.tempDir)
with open("/dev/urandom", "rb") as randText:
with open(inputFile, 'w') as fh:
fh.write(b64encode(randText.read(1024)).decode())
with open(inputFile) as fh:
input = "".join(fh.read().split("\n"))
#Send input to container's stdin through a file, get output
#from stdout
output = "".join(cactus_call(infile=inputFile, check_output=True,
parameters=["docker_test_script"]).split("\n"))
self.assertEqual(input, output)
#Send input as string, get output from stdout
output = "".join(cactus_call(stdin_string=input, check_output=True,
parameters=["docker_test_script"]).split("\n"))
self.assertEqual(input, output)
@TestStatus.shortLength
def testCactusCallPipes(self):
inputFile = getTempFile(rootDir=self.tempDir)
with open(inputFile, 'w') as f:
f.write('foobar\n')
# using 'cat' here rather than infile is intentional; it tests
# whether the directory is mounted into containers correctly.
output = cactus_call(parameters=[['cat', inputFile],
['sed', 's/foo/baz/g'],
['awk', '{ print "quux" $0 }']],
check_output=True)
self.assertEqual(output, 'quuxbazbar\n')
@TestStatus.mediumLength
def testChildTreeJob(self):
"""Check that the ChildTreeJob class runs all children."""
numChildren = 100
flagDir = getTempDirectory()
options = Job.Runner.getDefaultOptions(getTempDirectory())
shutil.rmtree(options.jobStore)
with Toil(options) as toil:
toil.start(CTTestParent(flagDir, numChildren))
# Check that all jobs ran
for i in range(numChildren):
self.assertTrue(os.path.exists(os.path.join(flagDir, str(i))))
shutil.rmtree(flagDir)
class CTTestParent(ChildTreeJob):
def __init__(self, flagDir, numChildren):
self.flagDir = flagDir
self.numChildren = numChildren
super(CTTestParent, self).__init__()
def run(self, fileStore):
for i in range(self.numChildren):
self.addChild(CTTestChild(self.flagDir, i))
class CTTestChild(Job):
def __init__(self, flagDir, index):
self.flagDir = flagDir
self.index = index
super(CTTestChild, self).__init__()
def run(self, fileStore):
# Mark that this job has run using a flag file
path = os.path.join(self.flagDir, str(self.index))
with open(path, 'w') as f:
# Empty file
f.write('')
if __name__ == '__main__':
unittest.main()
|
aws_marketplace/creating_marketplace_products/src/training_specification.py | jerrypeng7773/amazon-sagemaker-examples | 2,610 | 12096 | import json
class TrainingSpecification:
template = """
{
"TrainingSpecification": {
"TrainingImage": "IMAGE_REPLACE_ME",
"SupportedHyperParameters": [
{
"Description": "Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes",
"Name": "max_leaf_nodes",
"Type": "Integer",
"Range": {
"IntegerParameterRangeSpecification": {
"MinValue": "1",
"MaxValue": "100000"
}
},
"IsTunable": true,
"IsRequired": false,
"DefaultValue": "100"
}
],
"SupportedTrainingInstanceTypes": INSTANCES_REPLACE_ME,
"SupportsDistributedTraining": false,
"MetricDefinitions": METRICS_REPLACE_ME,
"TrainingChannels": CHANNELS_REPLACE_ME,
"SupportedTuningJobObjectiveMetrics": TUNING_OBJECTIVES_REPLACE_ME
}
}
"""
def get_training_specification_dict(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
return json.loads(
self.get_training_specification_json(
ecr_image,
supports_gpu,
supported_channels,
supported_metrics,
supported_tuning_job_objective_metrics,
)
)
def get_training_specification_json(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
if supported_channels is None:
print("Please provide at least one supported channel")
raise ValueError("Please provide at least one supported channel")
if supported_metrics is None:
supported_metrics = []
if supported_tuning_job_objective_metrics is None:
supported_tuning_job_objective_metrics = []
return (
self.template.replace("IMAGE_REPLACE_ME", ecr_image)
.replace("INSTANCES_REPLACE_ME", self.get_supported_instances(supports_gpu))
.replace(
"CHANNELS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_channels], indent=4, sort_keys=True),
)
.replace(
"METRICS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_metrics], indent=4, sort_keys=True),
)
.replace(
"TUNING_OBJECTIVES_REPLACE_ME",
json.dumps(
[ob.__dict__ for ob in supported_tuning_job_objective_metrics],
indent=4,
sort_keys=True,
),
)
)
@staticmethod
def get_supported_instances(supports_gpu):
cpu_list = [
"ml.m4.xlarge",
"ml.m4.2xlarge",
"ml.m4.4xlarge",
"ml.m4.10xlarge",
"ml.m4.16xlarge",
"ml.m5.large",
"ml.m5.xlarge",
"ml.m5.2xlarge",
"ml.m5.4xlarge",
"ml.m5.12xlarge",
"ml.m5.24xlarge",
"ml.c4.xlarge",
"ml.c4.2xlarge",
"ml.c4.4xlarge",
"ml.c4.8xlarge",
"ml.c5.xlarge",
"ml.c5.2xlarge",
"ml.c5.4xlarge",
"ml.c5.9xlarge",
"ml.c5.18xlarge",
]
gpu_list = [
"ml.p2.xlarge",
"ml.p2.8xlarge",
"ml.p2.16xlarge",
"ml.p3.2xlarge",
"ml.p3.8xlarge",
"ml.p3.16xlarge",
]
list_to_return = cpu_list
if supports_gpu:
list_to_return = cpu_list + gpu_list
return json.dumps(list_to_return)
|
nvd3/multiChart.py | areski/python-nvd3 | 442 | 12104 | <filename>nvd3/multiChart.py<gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from .NVD3Chart import NVD3Chart, TemplateMixin
class multiChart(TemplateMixin, NVD3Chart):
"""
A multiChart is a type of chart which combines several plots of the same or different types.
Python example::
from nvd3 import multiChart
type = "multiChart"
chart = multiChart(name=type, x_is_date=False, x_axis_format="AM_PM")
xdata = [1,2,3,4,5,6]
ydata = [115.5,160.5,108,145.5,84,70.5]
ydata2 = [48624,42944,43439,24194,38440,31651]
kwargs1 = {'color': 'black'}
kwargs2 = {'color': 'red'}
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " calls"}}
chart.add_serie(y=ydata, x=xdata, type='line', yaxis=1, name='visits', extra=extra_serie, **kwargs1)
extra_serie = {"tooltip": {"y_start": "", "y_end": " min"}}
chart.add_serie(y=ydata2, x=xdata, type='bar', yaxis=2,name='spend', extra=extra_serie, **kwargs2)
chart.buildhtml()
Javascript rendered to:
.. raw:: html
<div id="multichart"><svg style="height:450px;"></svg></div>
<script>
data_multichart=[{"color": "black", "type": "line", "values": [{"y": 115.5, "x": 1}, {"y": 160.5, "x": 2}, {"y": 108, "x": 3}, {"y
": 145.5, "x": 4}, {"y": 84, "x": 5}, {"y": 70.5, "x": 6}], "key": "visits", "yAxis": 1}, {"color": "red", "type": "bar", "values": [{"y": 486
24, "x": 1}, {"y": 42944, "x": 2}, {"y": 43439, "x": 3}, {"y": 24194, "x": 4}, {"y": 38440, "x": 5}, {"y": 31651, "x": 6}], "key": "spend", "y
Axis": 2}];
nv.addGraph(function() {
var chart = nv.models.multiChart();
chart.margin({top: 30, right: 60, bottom: 20, left: 60});
var datum = data_multichart;
chart.yAxis1
.tickFormat(d3.format(',.02f'));
chart.yAxis2
.tickFormat(d3.format(',.02f'));
chart.xAxis
.tickFormat(function(d) { return get_am_pm(parseInt(d)); });
function get_am_pm(d){
if (d > 12) {
d = d - 12; return (String(d) + 'PM');
}
else {
return (String(d) + 'AM');
}
};
chart.showLegend(true);
d3.select('#multichart svg')
.datum(datum)
.transition().duration(500)
.attr('height', 450)
.call(chart);
});
</script>
See the source code of this page, to see the underlying javascript.
"""
CHART_FILENAME = "./multichart.html"
template_chart_nvd3 = NVD3Chart.template_environment.get_template(CHART_FILENAME)
def __init__(self, **kwargs):
super(multiChart, self).__init__(**kwargs)
self.model = 'multiChart'
height = kwargs.get('height', 450)
width = kwargs.get('width', None)
if kwargs.get('x_is_date', False):
self.set_date_flag(True)
self.create_x_axis('xAxis',
format=kwargs.get('x_axis_format', '%d %b %Y'),
date=True)
self.set_custom_tooltip_flag(True)
else:
if kwargs.get('x_axis_format') == 'AM_PM':
self.x_axis_format = format = 'AM_PM'
else:
format = kwargs.get('x_axis_format', 'r')
self.create_x_axis('xAxis', format=format,
custom_format=kwargs.get('x_custom_format',
False))
self.create_y_axis(
'yAxis1',
format=kwargs.get('y1_axis_format', '.02f'),
custom_format=kwargs.get('y1_custom_format', False))
self.create_y_axis(
'yAxis2',
format=kwargs.get('y2_axis_format', '.02f'),
custom_format=kwargs.get('y2_custom_format', False))
# must have a specified height, otherwise it superimposes both chars
self.set_graph_height(height)
if width:
self.set_graph_width(width)
|
chromium/tools/telemetry/telemetry/internal/image_processing/video.py | wedataintelligence/vivaldi-source | 925 | 12138 | <gh_stars>100-1000
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import subprocess
from catapult_base import cloud_storage
from telemetry.core import platform
from telemetry.util import image_util
from telemetry.util import rgba_color
HIGHLIGHT_ORANGE_FRAME = rgba_color.WEB_PAGE_TEST_ORANGE
class BoundingBoxNotFoundException(Exception):
pass
class Video(object):
"""Utilities for storing and interacting with the video capture."""
def __init__(self, video_file_obj):
assert video_file_obj.delete
assert not video_file_obj.close_called
self._video_file_obj = video_file_obj
self._tab_contents_bounding_box = None
def UploadToCloudStorage(self, bucket, target_path):
"""Uploads video file to cloud storage.
Args:
target_path: Path indicating where to store the file in cloud storage.
"""
cloud_storage.Insert(bucket, target_path, self._video_file_obj.name)
def GetVideoFrameIter(self):
"""Returns the iteration for processing the video capture.
This looks for the initial color flash in the first frame to establish the
tab content boundaries and then omits all frames displaying the flash.
Yields:
(time_ms, image) tuples representing each video keyframe. Only the first
frame is a run of sequential duplicate bitmaps is typically included.
time_ms is milliseconds since navigationStart.
image may be a telemetry.core.Bitmap, or a numpy array depending on
whether numpy is installed.
"""
frame_generator = self._FramesFromMp4(self._video_file_obj.name)
# Flip through frames until we find the initial tab contents flash.
content_box = None
for _, bmp in frame_generator:
content_box = self._FindHighlightBoundingBox(
bmp, HIGHLIGHT_ORANGE_FRAME)
if content_box:
break
if not content_box:
raise BoundingBoxNotFoundException(
'Failed to identify tab contents in video capture.')
# Flip through frames until the flash goes away and emit that as frame 0.
timestamp = 0
for timestamp, bmp in frame_generator:
if not self._FindHighlightBoundingBox(bmp, HIGHLIGHT_ORANGE_FRAME):
yield 0, image_util.Crop(bmp, *content_box)
break
start_time = timestamp
for timestamp, bmp in frame_generator:
yield timestamp - start_time, image_util.Crop(bmp, *content_box)
def _FindHighlightBoundingBox(self, bmp, color, bounds_tolerance=8,
color_tolerance=8):
"""Returns the bounding box of the content highlight of the given color.
Raises:
BoundingBoxNotFoundException if the hightlight could not be found.
"""
content_box, pixel_count = image_util.GetBoundingBox(bmp, color,
tolerance=color_tolerance)
if not content_box:
return None
# We assume arbitrarily that tabs are all larger than 200x200. If this
# fails it either means that assumption has changed or something is
# awry with our bounding box calculation.
if content_box[2] < 200 or content_box[3] < 200:
raise BoundingBoxNotFoundException('Unexpectedly small tab contents.')
# TODO(tonyg): Can this threshold be increased?
if pixel_count < 0.9 * content_box[2] * content_box[3]:
raise BoundingBoxNotFoundException(
'Low count of pixels in tab contents matching expected color.')
# Since we allow some fuzziness in bounding box finding, we want to make
# sure that the bounds are always stable across a run. So we cache the
# first box, whatever it may be.
#
# This relies on the assumption that since Telemetry doesn't know how to
# resize the window, we should always get the same content box for a tab.
# If this assumption changes, this caching needs to be reworked.
if not self._tab_contents_bounding_box:
self._tab_contents_bounding_box = content_box
# Verify that there is only minor variation in the bounding box. If it's
# just a few pixels, we can assume it's due to compression artifacts.
for x, y in zip(self._tab_contents_bounding_box, content_box):
if abs(x - y) > bounds_tolerance:
# If this fails, it means either that either the above assumption has
# changed or something is awry with our bounding box calculation.
raise BoundingBoxNotFoundException(
'Unexpected change in tab contents box.')
return self._tab_contents_bounding_box
def _FramesFromMp4(self, mp4_file):
host_platform = platform.GetHostPlatform()
if not host_platform.CanLaunchApplication('avconv'):
host_platform.InstallApplication('avconv')
def GetDimensions(video):
proc = subprocess.Popen(['avconv', '-i', video], stderr=subprocess.PIPE)
dimensions = None
output = ''
for line in proc.stderr.readlines():
output += line
if 'Video:' in line:
dimensions = line.split(',')[2]
dimensions = map(int, dimensions.split()[0].split('x'))
break
proc.communicate()
assert dimensions, ('Failed to determine video dimensions. output=%s' %
output)
return dimensions
def GetFrameTimestampMs(stderr):
"""Returns the frame timestamp in integer milliseconds from the dump log.
The expected line format is:
' dts=1.715 pts=1.715\n'
We have to be careful to only read a single timestamp per call to avoid
deadlock because avconv interleaves its writes to stdout and stderr.
"""
while True:
line = ''
next_char = ''
while next_char != '\n':
next_char = stderr.read(1)
line += next_char
if 'pts=' in line:
return int(1000 * float(line.split('=')[-1]))
dimensions = GetDimensions(mp4_file)
frame_length = dimensions[0] * dimensions[1] * 3
frame_data = bytearray(frame_length)
# Use rawvideo so that we don't need any external library to parse frames.
proc = subprocess.Popen(['avconv', '-i', mp4_file, '-vcodec',
'rawvideo', '-pix_fmt', 'rgb24', '-dump',
'-loglevel', 'debug', '-f', 'rawvideo', '-'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
while True:
num_read = proc.stdout.readinto(frame_data)
if not num_read:
raise StopIteration
assert num_read == len(frame_data), 'Unexpected frame size: %d' % num_read
yield (GetFrameTimestampMs(proc.stderr),
image_util.FromRGBPixels(dimensions[0], dimensions[1], frame_data))
|
adapters/heiman/HS1RC.py | russdan/domoticz-zigbee2mqtt-plugin | 146 | 12177 | <filename>adapters/heiman/HS1RC.py
from adapters.adapter_with_battery import AdapterWithBattery
from devices.switch.selector_switch import SelectorSwitch
class HeimanAlarmRemoteAdapter(AdapterWithBattery):
def __init__(self):
super().__init__()
self.switch = SelectorSwitch('Remote', 'action')
self.switch.add_level('Off', None)
self.switch.add_level('Arm all zones', 'arm_all_zones')
self.switch.add_level('Arm partial zones', 'arm_partial_zones')
self.switch.add_level('Disarm', 'disarm')
self.switch.add_level('Emergency', 'emergency')
self.switch.set_selector_style(SelectorSwitch.SELECTOR_TYPE_MENU)
self.switch.disable_value_check_on_update()
self.devices.append(self.switch)
def convert_message(self, message):
message = super().convert_message(message)
return message
def handleCommand(self, alias, device, device_data, command, level, color):
self.switch.handle_command(device_data, command, level, color)
|
kratos/mpi/tests/test_data_communicator_factory.py | lkusch/Kratos | 778 | 12200 | from KratosMultiphysics import ParallelEnvironment, IsDistributedRun
if IsDistributedRun():
from KratosMultiphysics.mpi import DataCommunicatorFactory
import KratosMultiphysics.KratosUnittest as UnitTest
import math
class TestDataCommunicatorFactory(UnitTest.TestCase):
def setUp(self):
self.registered_comms = []
self.default_data_communicator = ParallelEnvironment.GetDefaultDataCommunicator()
self.original_default = ParallelEnvironment.GetDefaultDataCommunicatorName()
def tearDown(self):
if len(self.registered_comms) > 0:
ParallelEnvironment.SetDefaultDataCommunicator(self.original_default)
for comm_name in self.registered_comms:
ParallelEnvironment.UnregisterDataCommunicator(comm_name)
def markForCleanUp(self,comm_name):
self.registered_comms.append(comm_name)
@UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.")
def testDataCommunicatorDuplication(self):
duplicate_comm = DataCommunicatorFactory.DuplicateAndRegister(self.default_data_communicator, "Duplicate")
self.markForCleanUp("Duplicate") # to clean up during tearDown
self.assertEqual(duplicate_comm.Rank(), self.default_data_communicator.Rank())
self.assertEqual(duplicate_comm.Size(), self.default_data_communicator.Size())
@UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.")
def testDataCommunicatorSplit(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
split_comm = DataCommunicatorFactory.SplitAndRegister(self.default_data_communicator, rank % 2, 0, "EvenOdd")
self.markForCleanUp("EvenOdd") # to clean up during tearDown
expected_rank = rank // 2
if rank % 2 == 0:
expected_size = math.ceil(size/2)
else:
expected_size = math.floor(size/2)
self.assertEqual(split_comm.Rank(), expected_rank)
self.assertEqual(split_comm.Size(), expected_size)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 1, "Test requires at least two ranks.")
def testDataCommunicatorCreateFromRange(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
ranks = [i for i in range(1,size)]
range_comm = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, ranks, "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
if rank == 0:
self.assertTrue(range_comm.IsNullOnThisRank())
self.assertFalse(range_comm.IsDefinedOnThisRank())
else:
self.assertEqual(range_comm.Rank(), rank-1)
self.assertEqual(range_comm.Size(), size-1)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 2, "Test requires at least three ranks.")
def testDataCommunicatorCreateUnion(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(1,size)], "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(0,size-1)], "AllExceptLast")
self.markForCleanUp("AllExceptLast") # to clean up during tearDown
# Create union communicator (should contain all ranks)
union_comm = DataCommunicatorFactory.CreateUnionAndRegister(all_except_first, all_except_last, self.default_data_communicator, "Union")
self.markForCleanUp("Union") # to clean up during tearDown
self.assertFalse(union_comm.IsNullOnThisRank())
self.assertEqual(union_comm.Rank(), rank)
self.assertEqual(union_comm.Size(), size)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 2, "Test requires at least three ranks.")
def testDataCommunicatorCreateIntersection(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(1,size)], "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(0,size-1)], "AllExceptLast")
self.markForCleanUp("AllExceptLast") # to clean up during tearDown
intersection_comm = DataCommunicatorFactory.CreateIntersectionAndRegister(
all_except_first, all_except_last, self.default_data_communicator, "Intersection")
self.markForCleanUp("Intersection") # to clean up during tearDown
if rank == 0 or rank == size - 1:
# The first and last ranks do not participate in the intersection communicator
self.assertTrue(intersection_comm.IsNullOnThisRank())
else:
self.assertEqual(intersection_comm.Rank(), rank - 1 )
self.assertEqual(intersection_comm.Size(), size - 2 )
if __name__ == "__main__":
UnitTest.main()
|
tools/testrunner/outproc/message.py | LancerWang001/v8 | 20,995 | 12203 | # Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import os
import re
from . import base
class OutProc(base.ExpectedOutProc):
def __init__(self, expected_outcomes, basepath, expected_fail,
expected_filename, regenerate_expected_files):
super(OutProc, self).__init__(expected_outcomes, expected_filename,
regenerate_expected_files)
self._basepath = basepath
self._expected_fail = expected_fail
def _is_failure_output(self, output):
fail = output.exit_code != 0
if fail != self._expected_fail:
return True
expected_lines = []
# Can't use utils.ReadLinesFrom() here because it strips whitespace.
with open(self._basepath + '.out') as f:
for line in f:
if line.startswith("#") or not line.strip():
continue
expected_lines.append(line)
raw_lines = output.stdout.splitlines()
actual_lines = [ s for s in raw_lines if not self._ignore_line(s) ]
if len(expected_lines) != len(actual_lines):
return True
# Try .js first, and fall back to .mjs.
# TODO(v8:9406): clean this up by never separating the path from
# the extension in the first place.
base_path = self._basepath + '.js'
if not os.path.exists(base_path):
base_path = self._basepath + '.mjs'
env = {
'basename': os.path.basename(base_path),
}
for (expected, actual) in itertools.izip_longest(
expected_lines, actual_lines, fillvalue=''):
pattern = re.escape(expected.rstrip() % env)
pattern = pattern.replace('\\*', '.*')
pattern = pattern.replace('\\{NUMBER\\}', '\d+(?:\.\d*)?')
pattern = '^%s$' % pattern
if not re.match(pattern, actual):
return True
return False
def _ignore_line(self, string):
"""Ignore empty lines, valgrind output, Android output."""
return (
not string or
not string.strip() or
string.startswith("==") or
string.startswith("**") or
string.startswith("ANDROID") or
# Android linker warning.
string.startswith('WARNING: linker:')
)
|
Packs/Thycotic/Integrations/Thycotic/Thycotic_test.py | diCagri/content | 799 | 12205 | import pytest
from Thycotic import Client, \
secret_password_get_command, secret_username_get_command, \
secret_get_command, secret_password_update_command, secret_checkout_command, secret_checkin_command, \
secret_delete_command, folder_create_command, folder_delete_command, folder_update_command
from test_data.context import GET_PASSWORD_BY_ID_CONTEXT, GET_USERNAME_BY_ID_CONTENT, \
SECRET_GET_CONTENT, SECRET_PASSWORD_UPDATE_CONTEXT, SECRET_CHECKOUT_CONTEXT, SECRET_CHECKIN_CONTEXT, \
SECRET_DELETE_CONTEXT, FOLDER_CREATE_CONTEXT, FOLDER_DELETE_CONTEXT, FOLDER_UPDATE_CONTEXT
from test_data.http_responses import GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_RAW_RESPONSE, \
SECRET_GET_RAW_RESPONSE, SECRET_PASSWORD_UPDATE_RAW_RESPONSE, SECRET_CHECKOUT_RAW_RESPONSE, \
SECRET_CHECKIN_RAW_RESPONSE, SECRET_DELETE_RAW_RESPONSE, FOLDER_CREATE_RAW_RESPONSE, FOLDER_DELETE_RAW_RESPONSE, \
FOLDER_UPDATE_RAW_RESPONSE
GET_PASSWORD_BY_ID_ARGS = {"secret_id": "4"}
GET_USERNAME_BY_ID_ARGS = {"secret_id": "4"}
SECRET_GET_ARGS = {"secret_id": "4"}
SECRET_PASSWORD_UPDATE_ARGS = {"secret_id": "4", "newpassword": "<PASSWORD>"}
SECRET_CHECKOUT_ARGS = {"secret_id": "4"}
SECRET_CHECKIN_ARGS = {"secret_id": "4"}
SECRET_DELETE_ARGS = {"id": "9"}
FOLDER_CREATE_ARGS = {"folderName": "xsoarFolderTest3", "folderTypeId": "1", "parentFolderId": "3"}
FOLDER_DELETE_ARGS = {"folder_id": "9"}
FOLDER_UPDATE_ARGS = {"id": "12", "folderName": "xsoarTF3New"}
@pytest.mark.parametrize('command, args, http_response, context', [
(secret_password_get_command, GET_PASSWORD_BY_ID_ARGS, GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_PASSWORD_BY_ID_CONTEXT),
(secret_username_get_command, GET_USERNAME_BY_ID_ARGS, GET_USERNAME_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_CONTENT),
(secret_get_command, SECRET_GET_ARGS, SECRET_GET_RAW_RESPONSE, SECRET_GET_CONTENT),
(secret_password_update_command, SECRET_PASSWORD_UPDATE_ARGS, SECRET_PASSWORD_UPDATE_RAW_RESPONSE,
SECRET_PASSWORD_UPDATE_CONTEXT),
(secret_checkout_command, SECRET_CHECKOUT_ARGS, SECRET_CHECKOUT_RAW_RESPONSE, SECRET_CHECKOUT_CONTEXT),
(secret_checkin_command, SECRET_CHECKIN_ARGS, SECRET_CHECKIN_RAW_RESPONSE, SECRET_CHECKIN_CONTEXT),
(secret_delete_command, SECRET_DELETE_ARGS, SECRET_DELETE_RAW_RESPONSE, SECRET_DELETE_CONTEXT),
(folder_create_command, FOLDER_CREATE_ARGS, FOLDER_CREATE_RAW_RESPONSE, FOLDER_CREATE_CONTEXT),
(folder_delete_command, FOLDER_DELETE_ARGS, FOLDER_DELETE_RAW_RESPONSE, FOLDER_DELETE_CONTEXT),
(folder_update_command, FOLDER_UPDATE_ARGS, FOLDER_UPDATE_RAW_RESPONSE, FOLDER_UPDATE_CONTEXT)
])
def test_thycotic_commands(command, args, http_response, context, mocker):
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://thss.softwarium.net/SecretServer", username="xsoar1", password="<PASSWORD>",
proxy=False, verify=False)
mocker.patch.object(Client, '_http_request', return_value=http_response)
outputs = command(client, **args)
results = outputs.to_context()
assert results.get("EntryContext") == context
|
scripts/tests/snapshots/snap_keywords_test.py | Duroktar/Wolf | 105 | 12212 | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_keywords 1'] = '[{"lineno": 7, "source": [" a\\n"], "value": "1"}, {"lineno": 7, "source": [" a\\n"], "value": "2"}, {"lineno": 7, "source": [" a\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "0"}, {"lineno": 13, "source": [" i\\n"], "value": "1"}, {"lineno": 13, "source": [" i\\n"], "value": "2"}, {"lineno": 13, "source": [" i\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "4"}]'
|
docs/source/conf.py | andriis/bravado | 600 | 12228 | # -*- coding: utf-8 -*-
import sphinx_rtd_theme
# -- General configuration -----------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bravado'
copyright = u'2013, Digium, Inc.; 2014-2015, Yelp, Inc'
exclude_patterns = []
pygments_style = 'sphinx'
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
htmlhelp_basename = 'bravado-pydoc'
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'bravado-core': ('https://bravado-core.readthedocs.io/en/latest/', None),
}
|
eeauditor/auditors/aws/Amazon_ECS_Auditor.py | kbhagi/ElectricEye | 442 | 12248 | #This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
registry = CheckRegister()
# import boto3 clients
ecs = boto3.client("ecs")
# loop through ECS Clusters
def list_clusters(cache):
response = cache.get("list_clusters")
if response:
return response
cache["list_clusters"] = ecs.list_clusters()
return cache["list_clusters"]
@registry.register_check("ecs")
def ecs_cluster_container_insights_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.1] ECS clusters should have container insights enabled"""
response = list_clusters(cache)
myEcsClusters = response["clusterArns"]
for clusters in myEcsClusters:
clusterArn = str(clusters)
try:
response = ecs.describe_clusters(clusters=[clusterArn])
for clusterinfo in response["clusters"]:
clusterName = str(clusterinfo["clusterName"])
ecsClusterArn = str(clusterinfo["clusterArn"])
for settings in clusterinfo["settings"]:
contInsightsCheck = str(settings["value"])
# ISO Time
iso8601Time = (
datetime.datetime.utcnow()
.replace(tzinfo=datetime.timezone.utc)
.isoformat()
)
if contInsightsCheck == "disabled":
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-container-insights-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[ECS.1] ECS clusters should have container insights enabled",
"Description": "ECS cluster "
+ clusterName
+ " does not have container insights enabled. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For information on configuring Container Insights for your cluster refer to the Setting Up Container Insights on Amazon ECS for Cluster- and Service-Level Metrics section of the Amazon CloudWatch User Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-container-insights-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.1] ECS clusters should have container insights enabled",
"Description": "ECS cluster "
+ clusterName
+ " has container insights enabled.",
"Remediation": {
"Recommendation": {
"Text": "For information on configuring Container Insights for your cluster refer to the Setting Up Container Insights on Amazon ECS for Cluster- and Service-Level Metrics section of the Amazon CloudWatch User Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("ecs")
def ecs_cluster_default_provider_strategy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured"""
response = list_clusters(cache)
myEcsClusters = response["clusterArns"]
for clusters in myEcsClusters:
clusterArn = str(clusters)
try:
response = ecs.describe_clusters(clusters=[clusterArn])
for clusterinfo in response["clusters"]:
clusterName = str(clusterinfo["clusterName"])
ecsClusterArn = str(clusterinfo["clusterArn"])
defaultProviderStratCheck = str(clusterinfo["defaultCapacityProviderStrategy"])
# ISO Time
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
if defaultProviderStratCheck == "[]":
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-default-provider-strategy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured",
"Description": "ECS cluster "
+ clusterName
+ " does not have a default provider strategy configured. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For information on cluster capacity provider strategies for your cluster refer to the Amazon ECS Cluster Capacity Providers section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-default-provider-strategy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured",
"Description": "ECS cluster "
+ clusterName
+ " has a default provider strategy configured.",
"Remediation": {
"Recommendation": {
"Text": "For information on cluster capacity provider strategies for your cluster refer to the Amazon ECS Cluster Capacity Providers section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("ecs")
def ecs_task_definition_privileged_container_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.3] ECS Task Definitions should not run privileged containers if not required"""
for taskdef in ecs.list_task_definitions(status='ACTIVE')['taskDefinitionArns']:
try:
response = ecs.describe_task_definition(taskDefinition=taskdef)["taskDefinition"]
taskDefinitionArn = str(response['taskDefinitionArn'])
tdefFamily = str(response["family"])
# Loop container definitions
for cdef in response["containerDefinitions"]:
# ISO Time
iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
cdefName = str(cdef["name"])
# We are going to assume that if there is not a privileged flag...that it is ;)
try:
privCheck = str(cdef["privileged"])
except:
privCheck = 'UNKNOWN'
if privCheck != 'False':
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-privileged-container-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"TTPs/Privilege Escalation"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[ECS.3] ECS Task Definitions should not run privileged containers if not required",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " has defined a Privileged container, which should be avoided unless absolutely necessary. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "Containers running as Privileged will have Root permissions, this should be avoided if not needed. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-privileged-container-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"TTPs/Privilege Escalation"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.3] ECS Task Definitions should not run privileged containers if not required",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " has not defined a Privileged container.",
"Remediation": {
"Recommendation": {
"Text": "Containers running as Privileged will have Root permissions, this should be avoided if not needed. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("ecs")
def ecs_task_definition_security_labels_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured"""
for taskdef in ecs.list_task_definitions(status='ACTIVE')['taskDefinitionArns']:
try:
response = ecs.describe_task_definition(taskDefinition=taskdef)["taskDefinition"]
taskDefinitionArn = str(response["taskDefinitionArn"])
tdefFamily = str(response["family"])
# If there is a network mode of "awsvpc" it is likely a Fargate task - even though EC2 compute can run with that...
# time for some funky edge cases, keep that in mind before you yeet an issue at me, please ;)
if str(response["networkMode"]) == 'awsvpc':
continue
else:
# Loop container definitions
for cdef in response["containerDefinitions"]:
# ISO Time
iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
cdefName = str(cdef["name"])
try:
# This is a passing check
secOpts = str(cdef["dockerSecurityOptions"])
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-security-labels-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " has Docker Security Options configured.",
"Remediation": {
"Recommendation": {
"Text": "Containers running on EC2 Compute-types should have Docker Security Options configured. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName,
'DockerSecurityOptions': secOpts
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.IP-1",
"NIST SP 800-53 CM-2",
"NIST SP 800-53 CM-3",
"NIST SP 800-53 CM-4",
"NIST SP 800-53 CM-5",
"NIST SP 800-53 CM-6",
"NIST SP 800-53 CM-7",
"NIST SP 800-53 CM-9",
"NIST SP 800-53 SA-10",
"AICPA TSC A1.3",
"AICPA TSC CC1.4",
"AICPA TSC CC5.3",
"AICPA TSC CC6.2",
"AICPA TSC CC7.1",
"AICPA TSC CC7.3",
"AICPA TSC CC7.4",
"ISO 27001:2013 A.12.1.2",
"ISO 27001:2013 A.12.5.1",
"ISO 27001:2013 A.12.6.2",
"ISO 27001:2013 A.14.2.2",
"ISO 27001:2013 A.14.2.3",
"ISO 27001:2013 A.14.2.4",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED"
}
yield finding
except:
secOpts = str('["NO_OPTIONS"]')
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-security-labels-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": "[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " does not have any Docker Security Options configured. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "Containers running on EC2 Compute-types should have Docker Security Options configured. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName,
'DockerSecurityOptions': secOpts
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.IP-1",
"NIST SP 800-53 CM-2",
"NIST SP 800-53 CM-3",
"NIST SP 800-53 CM-4",
"NIST SP 800-53 CM-5",
"NIST SP 800-53 CM-6",
"NIST SP 800-53 CM-7",
"NIST SP 800-53 CM-9",
"NIST SP 800-53 SA-10",
"AICPA TSC A1.3",
"AICPA TSC CC1.4",
"AICPA TSC CC5.3",
"AICPA TSC CC6.2",
"AICPA TSC CC7.1",
"AICPA TSC CC7.3",
"AICPA TSC CC7.4",
"ISO 27001:2013 A.12.1.2",
"ISO 27001:2013 A.12.5.1",
"ISO 27001:2013 A.12.6.2",
"ISO 27001:2013 A.14.2.2",
"ISO 27001:2013 A.14.2.3",
"ISO 27001:2013 A.14.2.4",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE"
}
yield finding
except Exception as e:
print(e) |
trimap_module.py | lnugraha/trimap_generator | 168 | 12265 | <reponame>lnugraha/trimap_generator<filename>trimap_module.py<gh_stars>100-1000
#!/usr/bin/env python
import cv2, os, sys
import numpy as np
def extractImage(path):
# error handller if the intended path is not found
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
return image
def checkImage(image):
"""
Args:
image: input image to be checked
Returns:
binary image
Raises:
RGB image, grayscale image, all-black, and all-white image
"""
if len(image.shape) > 2:
print("ERROR: non-binary image (RGB)"); sys.exit();
smallest = image.min(axis=0).min(axis=0) # lowest pixel value: 0 (black)
largest = image.max(axis=0).max(axis=0) # highest pixel value: 1 (white)
if (smallest == 0 and largest == 0):
print("ERROR: non-binary image (all black)"); sys.exit()
elif (smallest == 255 and largest == 255):
print("ERROR: non-binary image (all white)"); sys.exit()
elif (smallest > 0 or largest < 255 ):
print("ERROR: non-binary image (grayscale)"); sys.exit()
else:
return True
class Toolbox:
def __init__(self, image):
self.image = image
@property
def printImage(self):
"""
Print image into a file for checking purpose
unitTest = Toolbox(image);
unitTest.printImage(image);
"""
f = open("image_results.dat", "w+")
for i in range(0, self.image.shape[0]):
for j in range(0, self.image.shape[1]):
f.write("%d " %self.image[i,j])
f.write("\n")
f.close()
@property
def displayImage(self):
"""
Display the image on a window
Press any key to exit
"""
cv2.imshow('Displayed Image', self.image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def saveImage(self, title, extension):
"""
Save as a specific image format (bmp, png, or jpeg)
"""
cv2.imwrite("{}.{}".format(title,extension), self.image)
def morph_open(self, image, kernel):
"""
Remove all white noises or speckles outside images
Need to tune the kernel size
Instruction:
unit01 = Toolbox(image);
kernel = np.ones( (9,9), np.uint8 );
morph = unit01.morph_open(input_image, kernel);
"""
bin_open = cv2.morphologyEx(self.image, cv2.MORPH_OPEN, kernel)
return bin_open
def morph_close(self, image, kernel):
"""
Remove all black noises or speckles inside images
Need to tune the kernel size
Instruction:
unit01 = Toolbox(image);
kernel = np.ones( (11,11)_, np.uint8 );
morph = unit01.morph_close(input_image, kernel);
"""
bin_close = cv2.morphologyEx(self.image, cv2.MORPH_CLOSE, kernel)
return bin_close
def trimap(image, name, size, number, erosion=False):
"""
This function creates a trimap based on simple dilation algorithm
Inputs [4]: a binary image (black & white only), name of the image, dilation pixels
the last argument is optional; i.e., how many iterations will the image get eroded
Output : a trimap
"""
checkImage(image)
row = image.shape[0]
col = image.shape[1]
pixels = 2*size + 1 ## Double and plus 1 to have an odd-sized kernel
kernel = np.ones((pixels,pixels),np.uint8) ## Pixel of extension I get
if erosion is not False:
erosion = int(erosion)
erosion_kernel = np.ones((3,3), np.uint8) ## Design an odd-sized erosion kernel
image = cv2.erode(image, erosion_kernel, iterations=erosion) ## How many erosion do you expect
image = np.where(image > 0, 255, image) ## Any gray-clored pixel becomes white (smoothing)
# Error-handler to prevent entire foreground annihilation
if cv2.countNonZero(image) == 0:
print("ERROR: foreground has been entirely eroded")
sys.exit()
dilation = cv2.dilate(image, kernel, iterations = 1)
dilation = np.where(dilation == 255, 127, dilation) ## WHITE to GRAY
remake = np.where(dilation != 127, 0, dilation) ## Smoothing
remake = np.where(image > 127, 200, dilation) ## mark the tumor inside GRAY
remake = np.where(remake < 127, 0, remake) ## Embelishment
remake = np.where(remake > 200, 0, remake) ## Embelishment
remake = np.where(remake == 200, 255, remake) ## GRAY to WHITE
#############################################
# Ensures only three pixel values available #
# TODO: Optimization with Cython #
#############################################
for i in range(0,row):
for j in range (0,col):
if (remake[i,j] != 0 and remake[i,j] != 255):
remake[i,j] = 127
path = "./images/results/" ## Change the directory
new_name = '{}px_'.format(size) + name + '_{}.png'.format(number)
cv2.imwrite(os.path.join(path, new_name) , remake)
#############################################
### TESTING SECTION ###
#############################################
if __name__ == '__main__':
path = "./images/test_images/test_image_11.png"
image = extractImage(path)
size = 10
number = path[-5]
title = "test_image"
unit01 = Toolbox(image);
kernel1 = np.ones( (11,11), np.uint8 )
unit01.displayImage
opening = unit01.morph_close(image,kernel1)
trimap(opening, title, size, number, erosion=False)
unit02 = Toolbox(opening)
unit02.displayImage
########################################################
## Default instruction (no binary opening or closing ##
## trimap(image, title, size, number, erosion=False); ##
########################################################
|
test/test_slimta_queue_proxy.py | nanojob/python-slimta | 141 | 12329 | <reponame>nanojob/python-slimta<filename>test/test_slimta_queue_proxy.py<gh_stars>100-1000
import unittest
from mox3.mox import MoxTestBase, IsA
from slimta.queue.proxy import ProxyQueue
from slimta.smtp.reply import Reply
from slimta.relay import Relay, TransientRelayError, PermanentRelayError
from slimta.envelope import Envelope
class TestProxyQueue(MoxTestBase, unittest.TestCase):
def setUp(self):
super(TestProxyQueue, self).setUp()
self.relay = self.mox.CreateMock(Relay)
self.env = Envelope('<EMAIL>', ['<EMAIL>'])
def test_enqueue(self):
self.relay._attempt(self.env, 0)
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
ret = q.enqueue(self.env)
self.assertEqual(1, len(ret))
self.assertEqual(2, len(ret[0]))
self.assertEqual(self.env, ret[0][0])
self.assertRegexpMatches(ret[0][1], r'[0-9a-fA-F]{32}')
def test_enqueue_relayerror(self):
err = PermanentRelayError('msg failure', Reply('550', 'Not Ok'))
self.relay._attempt(self.env, 0).AndRaise(err)
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
ret = q.enqueue(self.env)
self.assertEqual(1, len(ret))
self.assertEqual(2, len(ret[0]))
self.assertEqual(self.env, ret[0][0])
self.assertEqual(err, ret[0][1])
def test_start_noop(self):
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
q.start()
def test_kill_noop(self):
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
q.kill()
def test_flush_noop(self):
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
q.flush()
def test_add_policy_error(self):
self.mox.ReplayAll()
q = ProxyQueue(self.relay)
with self.assertRaises(NotImplementedError):
q.add_policy('test')
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
grafana_backup/create_snapshot.py | Keimille/grafana-backup-tool | 515 | 12355 | import json
from grafana_backup.dashboardApi import create_snapshot
def main(args, settings, file_path):
grafana_url = settings.get('GRAFANA_URL')
http_post_headers = settings.get('HTTP_POST_HEADERS')
verify_ssl = settings.get('VERIFY_SSL')
client_cert = settings.get('CLIENT_CERT')
debug = settings.get('DEBUG')
with open(file_path, 'r') as f:
data = f.read()
snapshot = json.loads(data)
try:
snapshot['name'] = snapshot['dashboard']['title']
except KeyError:
snapshot['name'] = "Untitled Snapshot"
(status, content) = create_snapshot(json.dumps(snapshot), grafana_url, http_post_headers, verify_ssl, client_cert, debug)
if status == 200:
print("create snapshot: {0}, status: {1}, msg: {2}".format(snapshot['name'], status, content))
else:
print("creating snapshot {0} failed with status {1}".format(snapshot['name'], status))
|
segmentation/data/transforms/__init__.py | RajasekharChowdary9/panoptic-deeplab | 506 | 12358 | <gh_stars>100-1000
from .build import build_transforms
from .pre_augmentation_transforms import Resize
from .target_transforms import PanopticTargetGenerator, SemanticTargetGenerator
|
htk-lite/commandlist/help.py | otherbeast/hackers-tool-kit | 393 | 12371 | <reponame>otherbeast/hackers-tool-kit
#!/usr/local/bin/python
# coding: latin-1
#if you use this code give me credit @tuf_unkn0wn
#i do not give you permission to show / edit this script without my credit
#to ask questions or report a problem message me on instagram @tuf_unkn0wn
"""
██░ ██ ▄▄▄ ▄████▄ ██ ▄█▀▓█████ ▓█████▄
▓██░ ██▒▒████▄ ▒██▀ ▀█ ██▄█▒ ▓█ ▀ ▒██▀ ██▌
▒██▀▀██░▒██ ▀█▄ ▒▓█ ▄ ▓███▄░ ▒███ ░██ █▌
░▓█ ░██ ░██▄▄▄▄██ ▒▓▓▄ ▄██▒▓██ █▄ ▒▓█ ▄ ░▓█▄ ▌
░▓█▒░██▓ ▓█ ▓██▒▒ ▓███▀ ░▒██▒ █▄░▒████▒░▒████▓
▒ ▒░▒ ▒▒ ▓▒█ ░▒ ▒ ░▒ ▒▒ ▓▒ ▒░ ░ ▒▒▓ ▒
▒ ░▒░ ░ ▒ ▒▒ ░ ░ ▒ ░ ░▒ ▒░ ░ ░ ░ ░ ▒ ▒
░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░
"""
import os
import sys
import random
lred = '\033[91m'
lblue = '\033[94m'
lgreen = '\033[92m'
yellow = '\033[93m'
cyan = '\033[1;36m'
purple = '\033[95m'
red = '\033[31m'
green = '\033[32m'
blue = '\033[34m'
orange = '\033[33m'
colorlist = [red, blue, green, yellow, lblue, purple, cyan, lred, lgreen, orange]
randomcolor = random.choice(colorlist)
banner3list = [red, blue, green, purple]
def helpbanner():
a = os.popen("ls commandlist -1 | wc -l").read()
b = a.replace('\n', '')
print """
╔══════════════════════════════════════════════════════════╗
║ ║
║ \033[92m ██░ ██ ▓█████ ██▓ ██▓███ \033[0m ║
║ \033[90m ▓██░ ██▒▓█ ▀ ▓██▒ ▓██░ ██▒ \033[0m ║
║ \033[92m ▒██▀▀██░▒███ ▒██░ ▓██░ ██▓▒ \033[0m ║
║ \033[90m ░▓█ ░██ ▒▓█ ▄ ▒██░ ▒██▄█▓▒ ▒ \033[0m ║
║ \033[92m ░▓█▒░██▓░▒████▒░██████▒▒██▒ ░ ░ \033[0m ║
║ \033[94m ▒ ░░▒░▒░░ ▒░ ░░ ▒░▓ ░▒▓▒░ ░ ░ \033[0m ║
║ \033[90m ▒ ░▒░ ░ ░ ░ ░░ ░ ▒ ░░▒ ░ \033[0m ║
║ \033[94m ░ ░░ ░ ░ ░ ░ ░░ \033[0m ║
║ \033[90m ░ ░ ░ ░ ░ ░ ░ \033[0m ║
║ ║
║══════════════════════════════════════════════════════════║
║ Commands: [\033[32m{0}\033[0m] Banners: [\033[31m6\033[0m] ║
║══════════════════════════════════════════════════════════════════════════════════════╗
║ ? | this menu ║
║ exit | exit htkl ║
║ clear | clears screen ║
║ banner | shows a banner ║
║ infoscan | gather information on a host [for a more specific scan type infoscan -o] ║
║ dos | run Denial-Of-Service attacks ║
║ ║
║ ║
║ \033[5m@tuf_unkn0wn\033[0m ║
╚══════════════════════════════════════════════════════════════════════════════════════╝
\033[0m\n""".format(b)
helpbanner()
|
bunkai/algorithm/lbd/custom_tokenizers.py | megagonlabs/bunkai | 149 | 12374 | #!/usr/bin/env python3
import collections
import logging
import os
import typing
import unicodedata
from janome.tokenizer import Tokenizer
from transformers.file_utils import cached_path
from transformers.models.bert.tokenization_bert import BertTokenizer, WordpieceTokenizer, load_vocab
import bunkai.constant
"""
The original source code is from cl-tohoku/bert-japanese.
https://github.com/cl-tohoku/bert-japanese/blob/master/tokenization.py
The original source code is under Apache-2.0 License.
"""
logger = logging.getLogger(__name__)
KNOWN_PRETRAINED_VOCABS = {
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
}
class JanomeTokenizer(object):
"""Runs basic tokenization with Janome morphological parser."""
def __init__(self, *, do_lower_case=False, never_split=None, normalize_text=True):
"""
Construct a JanomeTokenizer.
:arg do_lower_case: (`optional`) boolean (default True)
Whether to lower case the input.
:arg never_split: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
:arg normalize_text: (`optional`) boolean (default True)
Whether to apply unicode normalization to text before tokenization.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split if never_split is not None else []
self.normalize_text = normalize_text
self.janome_tokenizer = Tokenizer()
def tokenize(self, text: str, *, never_split=None, **kwargs):
"""Tokenizes a piece of text."""
if self.normalize_text:
text = unicodedata.normalize("NFKC", text)
never_split = self.never_split + (never_split if never_split is not None else [])
tokens = self.janome_tokenizer.tokenize(text)
__tokens = []
last_index = 0
for t in tokens:
token = t.surface
token_start = text.index(token, last_index)
if last_index != token_start:
__tokens.append(text[last_index:token_start])
if self.do_lower_case and token not in never_split:
token = token.lower()
__tokens.append(token.lower())
else:
__tokens.append(token)
last_index = token_start + len(token)
if len(text) != last_index:
__tokens.append(text[last_index:])
assert text == "".join(__tokens), f"[{text}] != [{''.join(__tokens)}]"
return __tokens
class CharacterTokenizer(object):
"""Runs Character tokenziation."""
def __init__(self, vocab, unk_token, normalize_text=True):
self.vocab = vocab
self.unk_token = unk_token
self.normalize_text = normalize_text
def tokenize(self, text):
"""
Tokenize a piece of text into characters.
For example:
input = "apple"
output = ["a", "p", "p", "l", "e"]
:arg text: A single token or whitespace separated tokens.
This should have already been passed through `BasicTokenizer`.
:return: A list of characters.
"""
if self.normalize_text:
text = unicodedata.normalize("NFKC", text)
output_tokens = []
for char in text:
if char not in self.vocab:
output_tokens.append(self.unk_token)
continue
output_tokens.append(char)
return output_tokens
class JanomeSubwordsTokenizer(BertTokenizer):
def __init__(
self,
vocab_file,
*,
subword_tokenizer_type="wordpiece",
do_subword_tokenize: bool = True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs,
):
"""
Construct a JanomeSubwordsTokenizer.
:arg vocab_file: Path to a one-wordpiece-per-line vocabulary file.
:arg do_lower_case: (`optional`) boolean (default True)
Whether to lower case the input.
Only has an effect when do_basic_tokenize=True.
:arg do_word_tokenize: (`optional`) boolean (default True) Whether to do word tokenization.
:arg do_subword_tokenize: (`optional`) boolean (default True) Whether to do subword tokenization.
:arg word_tokenizer_type: (`optional`) string (default "basic")
Type of word tokenizer. basic / janome / pre_tokenize
:arg subword_tokenizer_type: (`optional`) string (default "wordpiece") Type of subword tokenizer.
:arg cls_token: No description.
"""
super(BertTokenizer, self).__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs,
)
if os.path.isfile(vocab_file):
self.vocab = load_vocab(vocab_file)
elif vocab_file in KNOWN_PRETRAINED_VOCABS:
url: str = f"https://s3.amazonaws.com/models.huggingface.co/bert/{vocab_file}/vocab.txt"
self.vocab = load_vocab(cached_path(url))
else:
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
)
# add new vocab
self.add_tokens([" ", bunkai.constant.METACHAR_LINE_BREAK])
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_word_tokenize = False
self.do_subword_tokenize = True
if do_subword_tokenize:
if subword_tokenizer_type == "wordpiece":
self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
elif subword_tokenizer_type == "character":
self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=self.unk_token)
else:
raise ValueError("Invalid subword_tokenizer_type '{}' is specified.".format(subword_tokenizer_type))
self.janome_tokenizer = JanomeTokenizer()
def tokenize(self, text: typing.Union[str, typing.List[str]]) -> typing.List[str]:
if isinstance(text, str):
morphemes = self.janome_tokenizer.tokenize(text)
elif isinstance(text, list) and all([isinstance(t, str) for t in text]):
morphemes = text
else:
raise Exception(f"Invalid input-type {text}")
if self.do_subword_tokenize:
split_tokens = []
for token in morphemes:
sts = [sub_token for sub_token in self.subword_tokenizer.tokenize(token)]
if len(sts) == 0:
split_tokens.append(token)
else:
split_tokens += sts
else:
split_tokens = morphemes
return split_tokens
|
pythran/tests/rosetta/greatest_subsequential_sum.py | davidbrochart/pythran | 1,647 | 12391 | <gh_stars>1000+
#from http://rosettacode.org/wiki/Greatest_subsequential_sum#Python
#pythran export maxsum(int list)
#pythran export maxsumseq(int list)
#pythran export maxsumit(int list)
#runas maxsum([0, 1, 0])
#runas maxsumseq([-1, 2, -1, 3, -1])
#runas maxsumit([-1, 1, 2, -5, -6])
def maxsum(sequence):
"""Return maximum sum."""
maxsofar, maxendinghere = 0, 0
for x in sequence:
# invariant: ``maxendinghere`` and ``maxsofar`` are accurate for ``x[0..i-1]``
maxendinghere = max(maxendinghere + x, 0)
maxsofar = max(maxsofar, maxendinghere)
return maxsofar
def maxsumseq(sequence):
start, end, sum_start = -1, -1, -1
maxsum_, sum_ = 0, 0
for i, x in enumerate(sequence):
sum_ += x
if maxsum_ < sum_: # found maximal subsequence so far
maxsum_ = sum_
start, end = sum_start, i
elif sum_ < 0: # start new sequence
sum_ = 0
sum_start = i
assert maxsum_ == maxsum(sequence)
assert maxsum_ == sum(sequence[start + 1:end + 1])
return sequence[start + 1:end + 1]
def maxsumit(iterable):
maxseq = seq = []
start, end, sum_start = -1, -1, -1
maxsum_, sum_ = 0, 0
for i, x in enumerate(iterable):
seq.append(x); sum_ += x
if maxsum_ < sum_:
maxseq = seq; maxsum_ = sum_
start, end = sum_start, i
elif sum_ < 0:
seq = []; sum_ = 0
sum_start = i
assert maxsum_ == sum(maxseq[:end - start])
return maxseq[:end - start]
|
test/functional/test_framework/script_util.py | TopoX84/newlux | 1,389 | 12403 | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful Script constants and utils."""
from test_framework.script import CScript
# To prevent a "tx-size-small" policy rule error, a transaction has to have a
# non-witness size of at least 82 bytes (MIN_STANDARD_TX_NONWITNESS_SIZE in
# src/policy/policy.h). Considering a Tx with the smallest possible single
# input (blank, empty scriptSig), and with an output omitting the scriptPubKey,
# we get to a minimum size of 60 bytes:
#
# Tx Skeleton: 4 [Version] + 1 [InCount] + 1 [OutCount] + 4 [LockTime] = 10 bytes
# Blank Input: 32 [PrevTxHash] + 4 [Index] + 1 [scriptSigLen] + 4 [SeqNo] = 41 bytes
# Output: 8 [Amount] + 1 [scriptPubKeyLen] = 9 bytes
#
# Hence, the scriptPubKey of the single output has to have a size of at
# least 22 bytes, which corresponds to the size of a P2WPKH scriptPubKey.
# The following script constant consists of a single push of 21 bytes of 'a':
# <PUSH_21> <21-bytes of 'a'>
# resulting in a 22-byte size. It should be used whenever (small) fake
# scriptPubKeys are needed, to guarantee that the minimum transaction size is
# met.
DUMMY_P2WPKH_SCRIPT = CScript([b'a' * 21])
|
tests/error/test_format_error.py | GDGSNF/graphql-core | 590 | 12422 | <gh_stars>100-1000
from typing import List, Union
from pytest import raises
from graphql.error import GraphQLError, format_error
from graphql.language import Node, Source
from graphql.pyutils import Undefined
def describe_format_error():
def formats_graphql_error():
source = Source(
"""
query {
something
}"""
)
path: List[Union[int, str]] = ["one", 2]
extensions = {"ext": None}
error = GraphQLError(
"test message",
Node(),
source,
[14, 40],
path,
ValueError("original"),
extensions=extensions,
)
formatted = format_error(error)
assert formatted == error.formatted
assert formatted == {
"message": "test message",
"locations": [{"line": 2, "column": 14}, {"line": 3, "column": 20}],
"path": path,
"extensions": extensions,
}
def uses_default_message():
# noinspection PyTypeChecker
formatted = format_error(GraphQLError(None)) # type: ignore
assert formatted == {
"message": "An unknown error occurred.",
"locations": None,
"path": None,
}
def includes_path():
path: List[Union[int, str]] = ["path", 3, "to", "field"]
error = GraphQLError("msg", path=path)
formatted = format_error(error)
assert formatted == error.formatted
assert formatted == {"message": "msg", "locations": None, "path": path}
def includes_extension_fields():
error = GraphQLError("msg", extensions={"foo": "bar"})
formatted = format_error(error)
assert formatted == error.formatted
assert formatted == {
"message": "msg",
"locations": None,
"path": None,
"extensions": {"foo": "bar"},
}
def rejects_none_and_undefined_errors():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
format_error(None) # type: ignore
assert str(exc_info.value) == "Expected a GraphQLError."
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
format_error(Undefined) # type: ignore
assert str(exc_info.value) == "Expected a GraphQLError."
|
dvrip.py | jackkum/python-dvr | 149 | 12432 | <filename>dvrip.py
import os
import struct
import json
from time import sleep
import hashlib
import threading
from socket import socket, AF_INET, SOCK_STREAM, SOCK_DGRAM
from datetime import *
from re import compile
import time
import logging
class SomethingIsWrongWithCamera(Exception):
pass
class DVRIPCam(object):
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
CODES = {
100: "OK",
101: "Unknown error",
102: "Unsupported version",
103: "Request not permitted",
104: "User already logged in",
105: "User is not logged in",
106: "Username or password is incorrect",
107: "User does not have necessary permissions",
203: "Password is incorrect",
511: "Start of upgrade",
512: "Upgrade was not started",
513: "Upgrade data errors",
514: "Upgrade error",
515: "Upgrade successful",
}
QCODES = {
"AuthorityList": 1470,
"Users": 1472,
"Groups": 1474,
"AddGroup": 1476,
"ModifyGroup": 1478,
"DelGroup": 1480,
"AddUser": 1482,
"ModifyUser": 1484,
"DelUser": 1486,
"ModifyPassword": <PASSWORD>,
"AlarmInfo": 1504,
"AlarmSet": 1500,
"ChannelTitle": 1046,
"EncodeCapability": 1360,
"General": 1042,
"KeepAlive": 1006,
"OPMachine": 1450,
"OPMailTest": 1636,
"OPMonitor": 1413,
"OPNetKeyboard": 1550,
"OPPTZControl": 1400,
"OPSNAP": 1560,
"OPSendFile": 0x5F2,
"OPSystemUpgrade": 0x5F5,
"OPTalk": 1434,
"OPTimeQuery": 1452,
"OPTimeSetting": 1450,
"NetWork.NetCommon": 1042,
"OPNetAlarm": 1506,
"SystemFunction": 1360,
"SystemInfo": 1020,
}
KEY_CODES = {
"M": "Menu",
"I": "Info",
"E": "Esc",
"F": "Func",
"S": "Shift",
"L": "Left",
"U": "Up",
"R": "Right",
"D": "Down",
}
OK_CODES = [100, 515]
PORTS = {
"tcp": 34567,
"udp": 34568,
}
def __init__(self, ip, **kwargs):
self.logger = logging.getLogger(__name__)
self.ip = ip
self.user = kwargs.get("user", "admin")
hash_pass = kwargs.get("hash_pass")
self.hash_pass = kwargs.get("hash_pass", self.sofia_hash(kwargs.get("password", "")))
self.proto = kwargs.get("proto", "tcp")
self.port = kwargs.get("port", self.PORTS.get(self.proto))
self.socket = None
self.packet_count = 0
self.session = 0
self.alive_time = 20
self.alive = None
self.alarm = None
self.alarm_func = None
self.busy = threading.Condition()
def debug(self, format=None):
self.logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
if format:
formatter = logging.Formatter(format)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def connect(self, timeout=10):
try:
if self.proto == "tcp":
self.socket_send = self.tcp_socket_send
self.socket_recv = self.tcp_socket_recv
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.connect((self.ip, self.port))
elif self.proto == "udp":
self.socket_send = self.udp_socket_send
self.socket_recv = self.udp_socket_recv
self.socket = socket(AF_INET, SOCK_DGRAM)
else:
raise f"Unsupported protocol {self.proto}"
# it's important to extend timeout for upgrade procedure
self.timeout = timeout
self.socket.settimeout(timeout)
except OSError:
raise SomethingIsWrongWithCamera('Cannot connect to camera')
def close(self):
try:
self.alive.cancel()
self.socket.close()
except:
pass
self.socket = None
def udp_socket_send(self, bytes):
return self.socket.sendto(bytes, (self.ip, self.port))
def udp_socket_recv(self, bytes):
data, _ = self.socket.recvfrom(bytes)
return data
def tcp_socket_send(self, bytes):
try:
return self.socket.sendall(bytes)
except:
return None
def tcp_socket_recv(self, bufsize):
try:
return self.socket.recv(bufsize)
except:
return None
def receive_with_timeout(self, length):
received = 0
buf = bytearray()
start_time = time.time()
while True:
data = self.socket_recv(length - received)
buf.extend(data)
received += len(data)
if length == received:
break
elapsed_time = time.time() - start_time
if elapsed_time > self.timeout:
return None
return buf
def receive_json(self, length):
data = self.receive_with_timeout(length)
if data is None:
return {}
self.packet_count += 1
self.logger.debug("<= %s", data)
reply = json.loads(data[:-2])
return reply
def send(self, msg, data={}, wait_response=True):
if self.socket is None:
return {"Ret": 101}
# self.busy.wait()
self.busy.acquire()
if hasattr(data, "__iter__"):
data = bytes(json.dumps(data, ensure_ascii=False), "utf-8")
pkt = (
struct.pack(
"BB2xII2xHI",
255,
0,
self.session,
self.packet_count,
msg,
len(data) + 2,
)
+ data
+ b"\x0a\x00"
)
self.logger.debug("=> %s", pkt)
self.socket_send(pkt)
if wait_response:
reply = {"Ret": 101}
data = self.socket_recv(20)
if data is None or len(data) < 20:
return None
(
head,
version,
self.session,
sequence_number,
msgid,
len_data,
) = struct.unpack("BB2xII2xHI", data)
reply = self.receive_json(len_data)
self.busy.release()
return reply
def sofia_hash(self, password=""):
md5 = hashlib.md5(bytes(password, "utf-8")).digest()
chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
return "".join([chars[sum(x) % 62] for x in zip(md5[::2], md5[1::2])])
def login(self):
if self.socket is None:
self.connect()
data = self.send(
1000,
{
"EncryptType": "MD5",
"LoginType": "DVRIP-Web",
"PassWord": <PASSWORD>,
"UserName": self.user,
},
)
if data is None or data["Ret"] not in self.OK_CODES:
return False
self.session = int(data["SessionID"], 16)
self.alive_time = data["AliveInterval"]
self.keep_alive()
return data["Ret"] in self.OK_CODES
def getAuthorityList(self):
data = self.send(self.QCODES["AuthorityList"])
if data["Ret"] in self.OK_CODES:
return data["AuthorityList"]
else:
return []
def getGroups(self):
data = self.send(self.QCODES["Groups"])
if data["Ret"] in self.OK_CODES:
return data["Groups"]
else:
return []
def addGroup(self, name, comment="", auth=None):
data = self.set_command(
"AddGroup",
{
"Group": {
"AuthorityList": auth or self.getAuthorityList(),
"Memo": comment,
"Name": name,
},
},
)
return data["Ret"] in self.OK_CODES
def modifyGroup(self, name, newname=None, comment=None, auth=None):
g = [x for x in self.getGroups() if x["Name"] == name]
if g == []:
print(f'Group "{name}" not found!')
return False
g = g[0]
data = self.send(
self.QCODES["ModifyGroup"],
{
"Group": {
"AuthorityList": auth or g["AuthorityList"],
"Memo": comment or g["Memo"],
"Name": newname or g["Name"],
},
"GroupName": name,
},
)
return data["Ret"] in self.OK_CODES
def delGroup(self, name):
data = self.send(
self.QCODES["DelGroup"],
{"Name": name, "SessionID": "0x%08X" % self.session,},
)
return data["Ret"] in self.OK_CODES
def getUsers(self):
data = self.send(self.QCODES["Users"])
if data["Ret"] in self.OK_CODES:
return data["Users"]
else:
return []
def addUser(
self, name, password, comment="", group="user", auth=None, sharable=True
):
g = [x for x in self.getGroups() if x["Name"] == group]
if g == []:
print(f'Group "{group}" not found!')
return False
g = g[0]
data = self.set_command(
"AddUser",
{
"User": {
"AuthorityList": auth or g["AuthorityList"],
"Group": g["Name"],
"Memo": comment,
"Name": name,
"Password": self.sofia_hash(password),
"Reserved": False,
"Sharable": sharable,
},
},
)
return data["Ret"] in self.OK_CODES
def modifyUser(
self, name, newname=None, comment=None, group=None, auth=None, sharable=None
):
u = [x for x in self.getUsers() if x["Name"] == name]
if u == []:
print(f'User "{name}" not found!')
return False
u = u[0]
if group:
g = [x for x in self.getGroups() if x["Name"] == group]
if g == []:
print(f'Group "{group}" not found!')
return False
u["AuthorityList"] = g[0]["AuthorityList"]
data = self.send(
self.QCODES["ModifyUser"],
{
"User": {
"AuthorityList": auth or u["AuthorityList"],
"Group": group or u["Group"],
"Memo": comment or u["Memo"],
"Name": newname or u["Name"],
"Password": "",
"Reserved": u["Reserved"],
"Sharable": sharable or u["Sharable"],
},
"UserName": name,
},
)
return data["Ret"] in self.OK_CODES
def delUser(self, name):
data = self.send(
self.QCODES["DelUser"],
{"Name": name, "SessionID": "0x%08X" % self.session,},
)
return data["Ret"] in self.OK_CODES
def changePasswd(self, newpass="", oldpass=None, user=None):
data = self.send(
self.QCODES["ModifyPassword"],
{
"EncryptType": "MD5",
"NewPassWord": self.sofia_hash(newpass),
"PassWord": oldpass or <PASSWORD>,
"SessionID": "0x%08X" % self.session,
"UserName": user or self.user,
},
)
return data["Ret"] in self.OK_CODES
def channel_title(self, titles):
if isinstance(titles, str):
titles = [titles]
self.send(
self.QCODES["ChannelTitle"],
{
"ChannelTitle": titles,
"Name": "ChannelTitle",
"SessionID": "0x%08X" % self.session,
},
)
def channel_bitmap(self, width, height, bitmap):
header = struct.pack("HH12x", width, height)
self.socket_send(
struct.pack(
"BB2xII2xHI",
255,
0,
self.session,
self.packet_count,
0x041A,
len(bitmap) + 16,
)
+ header
+ bitmap
)
reply, rcvd = self.recv_json()
if reply and reply["Ret"] != 100:
return False
return True
def reboot(self):
self.set_command("OPMachine", {"Action": "Reboot"})
self.close()
def setAlarm(self, func):
self.alarm_func = func
def clearAlarm(self):
self.alarm_func = None
def alarmStart(self):
self.alarm = threading.Thread(
name="DVRAlarm%08X" % self.session,
target=self.alarm_thread,
args=[self.busy],
)
self.alarm.start()
return self.get_command("", self.QCODES["AlarmSet"])
def alarm_thread(self, event):
while True:
event.acquire()
try:
(
head,
version,
session,
sequence_number,
msgid,
len_data,
) = struct.unpack("BB2xII2xHI", self.socket_recv(20))
sleep(0.1) # Just for receive whole packet
reply = self.socket_recv(len_data)
self.packet_count += 1
reply = json.loads(reply[:-2])
if msgid == self.QCODES["AlarmInfo"] and self.session == session:
if self.alarm_func is not None:
self.alarm_func(reply[reply["Name"]], sequence_number)
except:
pass
finally:
event.release()
if self.socket is None:
break
def set_remote_alarm(self, state):
self.set_command(
"OPNetAlarm", {"Event": 0, "State": state},
)
def keep_alive(self):
ret = self.send(
self.QCODES["KeepAlive"],
{"Name": "KeepAlive", "SessionID": "0x%08X" % self.session},
)
if ret is None:
self.close()
return
self.alive = threading.Timer(self.alive_time, self.keep_alive)
self.alive.daemon = True
self.alive.start()
def keyDown(self, key):
self.set_command(
"OPNetKeyboard", {"Status": "KeyDown", "Value": key},
)
def keyUp(self, key):
self.set_command(
"OPNetKeyboard", {"Status": "KeyUp", "Value": key},
)
def keyPress(self, key):
self.keyDown(key)
sleep(0.3)
self.keyUp(key)
def keyScript(self, keys):
for k in keys:
if k != " " and k.upper() in self.KEY_CODES:
self.keyPress(self.KEY_CODES[k.upper()])
else:
sleep(1)
def ptz(self, cmd, step=5, preset=-1, ch=0):
CMDS = [
"DirectionUp",
"DirectionDown",
"DirectionLeft",
"DirectionRight",
"DirectionLeftUp",
"DirectionLeftDown",
"DirectionRightUp",
"DirectionRightDown",
"ZoomTile",
"ZoomWide",
"FocusNear",
"FocusFar",
"IrisSmall",
"IrisLarge",
"SetPreset",
"GotoPreset",
"ClearPreset",
"StartTour",
"StopTour",
]
# ptz_param = { "AUX" : { "Number" : 0, "Status" : "On" }, "Channel" : ch, "MenuOpts" : "Enter", "POINT" : { "bottom" : 0, "left" : 0, "right" : 0, "top" : 0 }, "Pattern" : "SetBegin", "Preset" : -1, "Step" : 5, "Tour" : 0 }
ptz_param = {
"AUX": {"Number": 0, "Status": "On"},
"Channel": ch,
"MenuOpts": "Enter",
"Pattern": "Start",
"Preset": preset,
"Step": step,
"Tour": 1 if "Tour" in cmd else 0,
}
return self.set_command(
"OPPTZControl", {"Command": cmd, "Parameter": ptz_param},
)
def set_info(self, command, data):
return self.set_command(command, data, 1040)
def set_command(self, command, data, code=None):
if not code:
code = self.QCODES[command]
return self.send(
code, {"Name": command, "SessionID": "0x%08X" % self.session, command: data}
)
def get_info(self, command):
return self.get_command(command, 1042)
def get_command(self, command, code=None):
if not code:
code = self.QCODES[command]
data = self.send(code, {"Name": command, "SessionID": "0x%08X" % self.session})
if data["Ret"] in self.OK_CODES and command in data:
return data[command]
else:
return data
def get_time(self):
return datetime.strptime(self.get_command("OPTimeQuery"), self.DATE_FORMAT)
def set_time(self, time=None):
if time is None:
time = datetime.now()
return self.set_command("OPTimeSetting", time.strftime(self.DATE_FORMAT))
def get_netcommon(self):
return self.get_command("NetWork.NetCommon")
def get_system_info(self):
return self.get_command("SystemInfo")
def get_general_info(self):
return self.get_command("General")
def get_encode_capabilities(self):
return self.get_command("EncodeCapability")
def get_system_capabilities(self):
return self.get_command("SystemFunction")
def get_camera_info(self, default_config=False):
"""Request data for 'Camera' from the target DVRIP device."""
if default_config:
code = 1044
else:
code = 1042
return self.get_command("Camera", code)
def get_encode_info(self, default_config=False):
"""Request data for 'Simplify.Encode' from the target DVRIP device.
Arguments:
default_config -- returns the default values for the type if True
"""
if default_config:
code = 1044
else:
code = 1042
return self.get_command("Simplify.Encode", code)
def recv_json(self, buf=bytearray()):
p = compile(b".*({.*})")
packet = self.socket_recv(0xFFFF)
if not packet:
return None, buf
buf.extend(packet)
m = p.search(buf)
if m is None:
return None, buf
buf = buf[m.span(1)[1] :]
return json.loads(m.group(1)), buf
def get_upgrade_info(self):
return self.get_command("OPSystemUpgrade")
def upgrade(self, filename="", packetsize=0x8000, vprint=None):
if not vprint:
vprint = lambda x: print(x)
data = self.set_command(
"OPSystemUpgrade", {"Action": "Start", "Type": "System"}, 0x5F0
)
if data["Ret"] not in self.OK_CODES:
return data
vprint("Ready to upgrade")
blocknum = 0
sentbytes = 0
fsize = os.stat(filename).st_size
rcvd = bytearray()
with open(filename, "rb") as f:
while True:
bytes = f.read(packetsize)
if not bytes:
break
header = struct.pack(
"BB2xII2xHI", 255, 0, self.session, blocknum, 0x5F2, len(bytes)
)
self.socket_send(header + bytes)
blocknum += 1
sentbytes += len(bytes)
reply, rcvd = self.recv_json(rcvd)
if reply and reply["Ret"] != 100:
vprint("Upgrade failed")
return reply
progress = sentbytes / fsize * 100
vprint(f"Uploaded {progress:.2f}%")
vprint("End of file")
pkt = struct.pack("BB2xIIxBHI", 255, 0, self.session, blocknum, 1, 0x05F2, 0)
self.socket_send(pkt)
vprint("Waiting for upgrade...")
while True:
reply, rcvd = self.recv_json(rcvd)
print(reply)
if not reply:
return
if reply["Name"] == "" and reply["Ret"] == 100:
break
while True:
data, rcvd = self.recv_json(rcvd)
print(reply)
if data is None:
vprint("Done")
return
if data["Ret"] in [512, 514, 513]:
vprint("Upgrade failed")
return data
if data["Ret"] == 515:
vprint("Upgrade successful")
self.socket.close()
return data
vprint(f"Upgraded {data['Ret']}%")
def reassemble_bin_payload(self, metadata={}):
def internal_to_type(data_type, value):
if data_type == 0x1FC or data_type == 0x1FD:
if value == 1:
return "mpeg4"
elif value == 2:
return "h264"
elif value == 3:
return "h265"
elif data_type == 0x1F9:
if value == 1 or value == 6:
return "info"
elif data_type == 0x1FA:
if value == 0xE:
return "g711a"
elif data_type == 0x1FE and value == 0:
return "jpeg"
return None
def internal_to_datetime(value):
second = value & 0x3F
minute = (value & 0xFC0) >> 6
hour = (value & 0x1F000) >> 12
day = (value & 0x3E0000) >> 17
month = (value & 0x3C00000) >> 22
year = ((value & 0xFC000000) >> 26) + 2000
return datetime(year, month, day, hour, minute, second)
length = 0
buf = bytearray()
start_time = time.time()
while True:
data = self.receive_with_timeout(20)
(
head,
version,
session,
sequence_number,
total,
cur,
msgid,
len_data,
) = struct.unpack("BB2xIIBBHI", data)
packet = self.receive_with_timeout(len_data)
frame_len = 0
if length == 0:
media = None
frame_len = 8
(data_type,) = struct.unpack(">I", packet[:4])
if data_type == 0x1FC or data_type == 0x1FE:
frame_len = 16
(media, metadata["fps"], w, h, dt, length,) = struct.unpack(
"BBBBII", packet[4:frame_len]
)
metadata["width"] = w * 8
metadata["height"] = h * 8
metadata["datetime"] = internal_to_datetime(dt)
if data_type == 0x1FC:
metadata["frame"] = "I"
elif data_type == 0x1FD:
(length,) = struct.unpack("I", packet[4:frame_len])
metadata["frame"] = "P"
elif data_type == 0x1FA:
(media, samp_rate, length) = struct.unpack(
"BBH", packet[4:frame_len]
)
elif data_type == 0x1F9:
(media, n, length) = struct.unpack("BBH", packet[4:frame_len])
# special case of JPEG shapshots
elif data_type == 0xFFD8FFE0:
return packet
else:
raise ValueError(data_type)
if media is not None:
metadata["type"] = internal_to_type(data_type, media)
buf.extend(packet[frame_len:])
length -= len(packet) - frame_len
if length == 0:
return buf
elapsed_time = time.time() - start_time
if elapsed_time > self.timeout:
return None
def snapshot(self, channel=0):
command = "OPSNAP"
self.send(
self.QCODES[command],
{
"Name": command,
"SessionID": "0x%08X" % self.session,
command: {"Channel": channel},
},
wait_response=False,
)
packet = self.reassemble_bin_payload()
return packet
def start_monitor(self, frame_callback, user={}, stream="Main"):
params = {
"Channel": 0,
"CombinMode": "NONE",
"StreamType": stream,
"TransMode": "TCP",
}
data = self.set_command("OPMonitor", {"Action": "Claim", "Parameter": params})
if data["Ret"] not in self.OK_CODES:
return data
self.send(
1410,
{
"Name": "OPMonitor",
"SessionID": "0x%08X" % self.session,
"OPMonitor": {"Action": "Start", "Parameter": params},
},
wait_response=False,
)
self.monitoring = True
while self.monitoring:
meta = {}
frame = self.reassemble_bin_payload(meta)
frame_callback(frame, meta, user)
def stop_monitor(self):
self.monitoring = False
|
tests/test_renderer.py | 0xflotus/maildown | 626 | 12433 | <filename>tests/test_renderer.py
import mock
from maildown import renderer
import mistune
import pygments
from pygments import lexers
from pygments.formatters import html
import premailer
import jinja2
def test_highlight_renderer(monkeypatch):
monkeypatch.setattr(mistune, "escape", mock.MagicMock())
monkeypatch.setattr(lexers, "get_lexer_by_name", mock.MagicMock())
monkeypatch.setattr(html, "HtmlFormatter", mock.MagicMock())
monkeypatch.setattr(pygments, "highlight", mock.MagicMock())
lexers.get_lexer_by_name.return_value = True
html.HtmlFormatter.return_value = {}
r = renderer.HighlightRenderer()
r.block_code("code")
mistune.escape.assert_called_with("code")
r.block_code("code", "python")
lexers.get_lexer_by_name.assert_called_with("python", stripall=True)
pygments.highlight.assert_called_with("code", True, {})
def test_generate_content(monkeypatch):
monkeypatch.setattr(mistune, "Markdown", mock.MagicMock())
monkeypatch.setattr(premailer, "transform", mock.MagicMock())
monkeypatch.setattr(renderer, "HighlightRenderer", mock.MagicMock())
monkeypatch.setattr(jinja2, "Template", mock.MagicMock())
renderer.HighlightRenderer.return_value = 1
premailer.transform.return_value = ""
jinja2.Template.render.return_value = ""
renderer.generate_content("")
mistune.Markdown.assert_called_with(renderer=1)
|
source1/bsp/entities/portal2_entity_handlers.py | tltneon/SourceIO | 199 | 12443 | import math
from mathutils import Euler
import bpy
from .portal2_entity_classes import *
from .portal_entity_handlers import PortalEntityHandler
local_entity_lookup_table = PortalEntityHandler.entity_lookup_table.copy()
local_entity_lookup_table.update(entity_class_handle)
class Portal2EntityHandler(PortalEntityHandler):
entity_lookup_table = local_entity_lookup_table
pointlight_power_multiplier = 1000
def handle_prop_weighted_cube(self, entity: prop_weighted_cube, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_weighted_cube', obj, 'props')
def handle_prop_testchamber_door(self, entity: prop_testchamber_door, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_testchamber_door', obj, 'props')
def handle_prop_floor_button(self, entity: prop_floor_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_floor_button', obj, 'props')
def handle_prop_floor_ball_button(self, entity: prop_floor_ball_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_floor_ball_button', obj, 'props')
def handle_prop_floor_cube_button(self, entity: prop_floor_cube_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_floor_cube_button', obj, 'props')
def handle_prop_under_floor_button(self, entity: prop_under_floor_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_under_floor_button', obj, 'props')
def handle_prop_tractor_beam(self, entity: prop_tractor_beam, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_tractor_beam', obj, 'props')
def handle_logic_playmovie(self, entity: logic_playmovie, entity_raw: dict):
obj = bpy.data.objects.new(self._get_entity_name(entity), None)
self._set_location(obj, entity.origin)
self._set_icon_if_present(obj, entity)
self._set_entity_data(obj, {'entity': entity_raw})
self._put_into_collection('logic_playmovie', obj, 'logic')
def handle_trigger_paint_cleanser(self, entity: trigger_paint_cleanser, entity_raw: dict):
if 'model' not in entity_raw:
return
model_id = int(entity_raw.get('model')[1:])
mesh_object = self._load_brush_model(model_id, self._get_entity_name(entity))
self._set_location_and_scale(mesh_object, parse_float_vector(entity_raw.get('origin', '0 0 0')))
self._set_rotation(mesh_object, parse_float_vector(entity_raw.get('angles', '0 0 0')))
self._set_entity_data(mesh_object, {'entity': entity_raw})
self._put_into_collection('trigger_paint_cleanser', mesh_object, 'triggers')
def handle_trigger_catapult(self, entity: trigger_catapult, entity_raw: dict):
if 'model' not in entity_raw:
return
model_id = int(entity_raw.get('model')[1:])
mesh_object = self._load_brush_model(model_id, self._get_entity_name(entity))
self._set_location_and_scale(mesh_object, parse_float_vector(entity_raw.get('origin', '0 0 0')))
self._set_rotation(mesh_object, parse_float_vector(entity_raw.get('angles', '0 0 0')))
self._set_entity_data(mesh_object, {'entity': entity_raw})
self._put_into_collection('trigger_catapult', mesh_object, 'triggers')
def handle_npc_wheatley_boss(self, entity: npc_wheatley_boss, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('npc_wheatley_boss', obj, 'npc')
def handle_prop_exploding_futbol(self, entity: prop_exploding_futbol, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_exploding_futbol', obj, 'props')
def handle_prop_exploding_futbol_socket(self, entity: prop_exploding_futbol_socket, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_exploding_futbol', obj, 'props')
def handle_prop_exploding_futbol_spawnert(self, entity: prop_exploding_futbol_spawner, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_exploding_futbol_spawner', obj, 'props')
|
comicstreamerlib/gui_qt.py | rlugojr/ComicStreamer | 169 | 12454 | import sys
import webbrowser
import os
from comicstreamerlib.folders import AppFolders
from PyQt4 import QtGui,QtCore
class SystemTrayIcon(QtGui.QSystemTrayIcon):
def __init__(self, icon, app):
QtGui.QSystemTrayIcon.__init__(self, icon, None)
self.app = app
self.menu = QtGui.QMenu(None)
exitAction = self.menu.addAction("Exit")
self.setContextMenu(self.menu)
exitAction.triggered.connect( self.quit )
def quit(self):
QtCore.QCoreApplication.quit()
class QtBasedGui():
def __init__(self, apiServer):
self.apiServer = apiServer
self.app = QtGui.QApplication(sys.argv)
pixmap = QtGui.QPixmap(AppFolders.imagePath("trout.png"))
icon = QtGui.QIcon( pixmap.scaled(16,16))
self.trayIcon = SystemTrayIcon(icon,self)
self.trayIcon.show()
def run(self):
try:
self.app.exec_()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
QtGui().run()
|
Python/Interfacing_C_C++_Fortran/F2py/comp_pi_f2py.py | Gjacquenot/training-material | 115 | 12482 | #!/usr/bin/env python
from argparse import ArgumentParser
import sys
from comp_pi import compute_pi
def main():
arg_parser = ArgumentParser(description='compute pi using Fortran '
'function')
arg_parser.add_argument('n', default=1000, nargs='?',
help='number of random points')
options = arg_parser.parse_args()
print(compute_pi(options.n))
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
|
dqn/dqn_noisy_networks/model.py | AgentMaker/Paddle-RLBooks | 127 | 12485 | <filename>dqn/dqn_noisy_networks/model.py
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn.initializer import Assign
import math
class NoisyLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True):
super(NoisyLinear, self).__init__(in_features, out_features)
sigma_init = sigma_zero / math.sqrt(in_features)
sigma_weight = self.create_parameter(
shape=[in_features, out_features],
default_initializer=Assign(
paddle.full((in_features, out_features), sigma_init)
)
)
self.add_parameter("sigma_weight", sigma_weight)
self.register_buffer("epsilon_input", paddle.zeros((1, in_features)))
self.register_buffer("epsilon_output", paddle.zeros((out_features, 1)))
if bias:
sigma_bias = self.create_parameter(
shape=[out_features],
default_initializer=Assign(
paddle.full([out_features], sigma_init)
)
)
self.add_parameter("sigma_bias", sigma_bias)
def _scale_noise(self, shape):
x = paddle.randn(shape)
return x.sign().multiply(x.abs().sqrt())
def forward(self, inputs):
with paddle.no_grad():
eps_in = self._scale_noise(self.epsilon_input.shape)
eps_out = self._scale_noise(self.epsilon_output.shape)
noise_v = paddle.multiply(eps_in, eps_out).detach()
return F.linear(inputs, self.weight + self.sigma_weight * noise_v.t(), self.bias + self.sigma_bias * eps_out.squeeze().t())
class Model(nn.Layer):
def __init__(self, num_inputs, num_actions):
super(Model, self).__init__()
self.conv1 = nn.Conv2D(num_inputs, 32, 3, stride=3)
self.conv2 = nn.Conv2D(32, 32, 3, stride=3)
self.conv3 = nn.Conv2D(32, 64, 3, stride=1)
self.flatten = nn.Flatten()
self.linear = NoisyLinear(64 * 3 * 2, 256)
self.fc = NoisyLinear(256, num_actions)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.flatten(x)
x = self.linear(x)
return self.fc(x)
|
src/ensemble_nn/agent_nn.py | AbhinavGopal/ts_tutorial | 290 | 12538 | <reponame>AbhinavGopal/ts_tutorial
"""Agents for neural net bandit problems.
We implement three main types of agent:
- epsilon-greedy (fixed epsilon, annealing epsilon)
- dropout (arXiv:1506.02142)
- ensemble sampling
All code is specialized to the setting of 2-layer fully connected MLPs.
"""
import numpy as np
import numpy.random as rd
from base.agent import Agent
from ensemble_nn.env_nn import TwoLayerNNBandit
class TwoLayerNNEpsilonGreedy(Agent):
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
epsilon_param=0.0,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Epsilon-greedy agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
epsilon_param: fixed epsilon choice.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.W1 = 1e-2 * rd.randn(hidden_dim, input_dim) # initialize weights
self.W2 = 1e-2 * rd.randn(hidden_dim)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.epsilon_param = epsilon_param
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps # number of gradient steps we
# take during each time period
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.reward_hist = np.zeros(self.T)
def _model_forward(self, input_actions):
"""Neural network forward pass.
Args:
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1, axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
out = np.sum(relu_out * self.W2, axis=1)
cache = (input_actions, affine_out, relu_out)
return out, cache
def _model_backward(self, out, cache, y):
"""Neural network backward pass (for backpropagation).
Args:
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
drelu_out = dout[:, np.newaxis] * self.W2
mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def _update_model(self, t):
"""Update the model by taking a few gradient steps."""
for i in range(self.num_gradient_steps):
# sample minibatch
batch_ind = rd.randint(t + 1, size=self.batch_size)
action_batch = self.action_hist[batch_ind]
reward_batch = self.reward_hist[batch_ind]
out, cache = self._model_forward(action_batch)
dW1, dW2 = self._model_backward(out, cache, reward_batch)
dW1 /= self.batch_size
dW2 /= self.batch_size
dW1 += 2 / (self.prior_var * (t + 1)) * self.W1
dW2 += 2 / (self.prior_var * (t + 1)) * self.W2
self.W1 -= self.lr * dW1
self.W2 -= self.lr * dW2
def update_observation(self, observation, action, reward):
"""Learn from observations."""
t = observation
self.action_hist[t] = self.actions[action]
self.reward_hist[t] = reward
self._update_model(t)
self.lr *= self.lr_decay
def pick_action(self, observation):
"""Fixed epsilon-greedy action selection."""
u = rd.rand()
if u < self.epsilon_param:
action = rd.randint(self.num_actions)
else:
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNEpsilonGreedyAnnealing(TwoLayerNNEpsilonGreedy):
"""Epsilon-greedy with an annealing epsilon:
epsilon = self.epsilon_param / (self.epsilon_param + t)
"""
def pick_action(self, observation):
"""Overload pick_action to dynamically recalculate epsilon-greedy."""
t = observation
epsilon = self.epsilon_param / (self.epsilon_param + t)
u = rd.rand()
if u < epsilon:
action = rd.randint(self.num_actions)
else:
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNDropout(TwoLayerNNEpsilonGreedy):
"""Dropout is used to represent model uncertainty.
ICML paper suggests this is Bayesian uncertainty: arXiv:1506.02142.
Follow up work suggests that this is flawed: TODO(iosband) add link.
"""
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
drop_prob=0.5,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Dropout agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
drop_prob: probability of randomly zero-ing out weight component.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.W1 = 1e-2 * rd.randn(hidden_dim, input_dim)
self.W2 = 1e-2 * rd.randn(hidden_dim)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.p = drop_prob
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.reward_hist = np.zeros(self.T)
def _model_forward(self, input_actions):
"""Neural network forward pass.
Note that dropout remains "on" so that forward pass is stochastic.
Args:
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1, axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
dropout_mask = rd.rand(*relu_out.shape) > self.p
dropout_out = relu_out * dropout_mask
out = np.sum(dropout_out * self.W2, axis=1)
cache = (input_actions, affine_out, relu_out, dropout_mask, dropout_out)
return out, cache
def _model_backward(self, out, cache, y):
"""Neural network backward pass (for backpropagation).
Args:
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out, dropout_mask, dropout_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
ddropout_out = dout[:, np.newaxis] * self.W2
drelu_out = ddropout_out * dropout_mask
relu_mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = relu_mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def pick_action(self, observation):
"""Select the greedy action according to the output of a stochastic
forward pass."""
model_out, _ = self._model_forward(self.actions)
action = np.argmax(model_out)
return action
class TwoLayerNNEnsembleSampling(Agent):
"""An ensemble sampling agent maintains an ensemble of neural nets, each
fitted to a perturbed prior and perturbed observations."""
def __init__(self,
input_dim,
hidden_dim,
actions,
time_horizon,
prior_var,
noise_var,
num_models=10,
learning_rate=1e-1,
num_gradient_steps=1,
batch_size=64,
lr_decay=1,
leaky_coeff=0.01):
"""Ensemble sampling agent with two-layer neural network model.
Args:
input_dim: int dimension of input.
hidden_dim: int size of hidden layer.
actions: numpy array of valid actions (generated by environment).
time_horizon: int size to pre-allocate data storage.
prior_var: prior variance for random initialization.
noise_var: noise variance for update.
num_models: Number of ensemble models to train.
learning_rate: sgd learning rate.
num_gradient_steps: how many sgd to do.
batch_size: size of batch.
lr_decay: decay learning rate.
leaky_coeff: slope of "negative" part of the Leaky ReLU.
"""
self.M = num_models
# initialize models by sampling perturbed prior means
self.W1_model_prior = np.sqrt(prior_var) * rd.randn(self.M, hidden_dim,
input_dim)
self.W2_model_prior = np.sqrt(prior_var) * rd.randn(self.M, hidden_dim)
self.W1 = np.copy(self.W1_model_prior)
self.W2 = np.copy(self.W2_model_prior)
self.actions = actions
self.num_actions = len(actions)
self.T = time_horizon
self.prior_var = prior_var
self.noise_var = noise_var
self.lr = learning_rate
self.num_gradient_steps = num_gradient_steps
self.batch_size = batch_size
self.lr_decay = lr_decay
self.leaky_coeff = leaky_coeff
self.action_hist = np.zeros((self.T, input_dim))
self.model_reward_hist = np.zeros((self.M, self.T))
def _model_forward(self, m, input_actions):
"""Neural network forward pass for single model of ensemble.
Args:
m: index of which network to evaluate.
input_actions: actions to evaluate (numpy array).
Returns:
out: network prediction.
cache: tuple holding intermediate activations for backprop.
"""
affine_out = np.sum(input_actions[:, np.newaxis, :] * self.W1[m], axis=2)
relu_out = np.maximum(self.leaky_coeff * affine_out, affine_out)
out = np.sum(relu_out * self.W2[m], axis=1)
cache = (input_actions, affine_out, relu_out)
return out, cache
def _model_backward(self, m, out, cache, y):
"""Neural network backward pass (for backpropagation) for single network.
Args:
m: index of which network to evaluate.
out: output of batch of predictions.
cache: intermediate activations from _model_forward.
y: target labels.
Returns:
dW1: gradients for layer 1.
dW2: gradients for layer 2.
"""
input_actions, affine_out, relu_out = cache
dout = -(2 / self.noise_var) * (y - out)
dW2 = np.sum(dout[:, np.newaxis] * relu_out, axis=0)
drelu_out = dout[:, np.newaxis] * self.W2[m]
mask = (affine_out >= 0) + self.leaky_coeff * (affine_out < 0)
daffine_out = mask * drelu_out
dW1 = np.dot(daffine_out.T, input_actions)
return dW1, dW2
def _update_model(self, m, t):
"""Apply SGD to model m."""
for i in range(self.num_gradient_steps):
# sample minibatch
batch_ind = rd.randint(t + 1, size=self.batch_size)
action_batch = self.action_hist[batch_ind]
reward_batch = self.model_reward_hist[m][batch_ind]
out, cache = self._model_forward(m, action_batch)
dW1, dW2 = self._model_backward(m, out, cache, reward_batch)
dW1 /= self.batch_size
dW2 /= self.batch_size
dW1 += 2 / (self.prior_var * (t + 1)) * (
self.W1[m] - self.W1_model_prior[m])
dW2 += 2 / (self.prior_var * (t + 1)) * (
self.W2[m] - self.W2_model_prior[m])
self.W1[m] -= self.lr * dW1
self.W2[m] -= self.lr * dW2
return
def update_observation(self, observation, action, reward):
"""Learn from observations, shared across all models.
However, perturb the reward independently for each model and then update.
"""
t = observation
self.action_hist[t] = self.actions[action]
for m in range(self.M):
m_noise = np.sqrt(self.noise_var) * rd.randn()
self.model_reward_hist[m, t] = reward + m_noise
self._update_model(m, t)
self.lr *= self.lr_decay
def pick_action(self, observation):
"""Select action via ensemble sampling.
Choose active network uniformly at random, then act greedily wrt that model.
"""
m = rd.randint(self.M)
model_out, _ = self._model_forward(m, self.actions)
action = np.argmax(model_out)
return action
|
safe_control_gym/math_and_models/normalization.py | catgloss/safe-control-gym | 120 | 12552 | """Perform normalization on inputs or rewards.
"""
import numpy as np
import torch
from gym.spaces import Box
def normalize_angle(x):
"""Wraps input angle to [-pi, pi].
"""
return ((x + np.pi) % (2 * np.pi)) - np.pi
class RunningMeanStd():
"""Calulates the running mean and std of a data stream.
Attributes:
mean (np.array): mean of data stream.
var (np.array): variance of data stream.
count (float): total count of data steam.
"""
def __init__(self, epsilon=1e-4, shape=()):
"""Initializes containers for data mean and variance.
Args:
epsilon (float): helps with arithmetic issues.
shape (tuple): the shape of the data stream's output.
"""
self.mean = np.zeros(shape, np.float64)
self.var = np.ones(shape, np.float64)
self.count = epsilon
def update(self, arr):
"""Update current stats with a new stream of data.
Args:
arr (np.array): 1D array of data, (batch_size, *shape).
"""
batch_mean = np.mean(arr, axis=0)
batch_var = np.var(arr, axis=0)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
"""Util function for `update` method.
"""
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
class BaseNormalizer(object):
"""Template/default normalizer.
Attributes:
read_only (bool): if to freeze the current stats being tracked.
"""
def __init__(self, read_only=False):
self.read_only = read_only
def set_read_only(self):
self.read_only = True
def unset_read_only(self):
self.read_only = False
def __call__(self, x, *args, **kwargs):
"""Invokes normalization on the given input.
"""
return x
def state_dict(self):
"""Returns snapshot of current stats.
"""
return {}
def load_state_dict(self, _):
"""Restores the stats from a snapshot.
"""
pass
class MeanStdNormalizer(BaseNormalizer):
"""Normalize by the running average.
"""
def __init__(self, shape=(), read_only=False, clip=10.0, epsilon=1e-8):
"""Initializes the data stream tracker.
Args:
shape (tuple): shape of data being tracked.
read_only (bool): if to freeze the tracker.
clip (float): bounds on the data.
epsilon (float): offset to provide divide-by-zero.
"""
super().__init__(read_only)
self.read_only = read_only
self.rms = RunningMeanStd(shape=shape)
self.clip = clip
self.epsilon = epsilon
def __call__(self, x):
"""Update tracker given data, optionally normalize the data.
"""
x = np.asarray(x)
if not self.read_only:
self.rms.update(x)
return np.clip(
(x - self.rms.mean) / np.sqrt(self.rms.var + self.epsilon),
-self.clip, self.clip)
def state_dict(self):
return {'mean': self.rms.mean, 'var': self.rms.var}
def load_state_dict(self, saved):
self.rms.mean = saved['mean']
self.rms.var = saved['var']
class RewardStdNormalizer(MeanStdNormalizer):
"""Reward normalization by running average of returns.
Papers:
* arxiv.org/pdf/1808.04355.pdf
* arxiv.org/pdf/1810.12894.pdf
Also see:
* github.com/openai/baselines/issues/538
"""
def __init__(self, gamma=0.99, read_only=False, clip=10.0, epsilon=1e-8):
"""Initializes the data stream tracker.
Args:
gamma (float): discount factor for rewards.
read_only (bool): if to freeze the tracker.
clip (float): bounds on the data.
epsilon (float): offset to provide divide-by-zero.
"""
# Reward has default shape (1,) or just ().
super().__init__((), read_only, clip, epsilon)
self.gamma = gamma
self.ret = None
def __call__(self, x, dones):
"""Update tracker given reward, optionally normalize the reward (only scaling).
"""
x = np.asarray(x)
if not self.read_only:
# Track running average of forward discounted returns.
if self.ret is None:
self.ret = np.zeros(x.shape[0])
self.ret = self.ret * self.gamma + x
self.rms.update(self.ret)
# Prevent information leak from previous episodes.
self.ret[dones.astype(np.long)] = 0
return np.clip(x / np.sqrt(self.rms.var + self.epsilon), -self.clip, self.clip)
class RescaleNormalizer(BaseNormalizer):
"""Apply constant scaling.
"""
def __init__(self, coef=1.0):
"""Initializes with fixed scaling constant.
Args:
coef (float): scaling coefficient.
"""
super().__init__(self)
self.coef = coef
def __call__(self, x):
"""Scale the input.
"""
if not isinstance(x, torch.Tensor):
x = np.asarray(x)
return self.coef * x
class ImageNormalizer(RescaleNormalizer):
"""Scale image pixles from [0,255] to [0,1].
"""
def __init__(self):
super().__init__(self, 1.0 / 255)
class ActionUnnormalizer(BaseNormalizer):
"""Assumes policy output action is in [-1,1], unnormalize it for gym env.
"""
def __init__(self, action_space):
"""Defines the mean and std for the bounded action space.
"""
super().__init__()
assert isinstance(action_space, Box), "action space must be gym.spaces.Box"
low, high = action_space.low, action_space.high
self.mean = (low + high) / 2.0
self.std = (high - low) / 2.0
def __call__(self, action):
"""Unnormalizes given input action.
"""
x = np.asarray(action)
return self.mean + x * self.std
|
fastf1/tests/test_livetiming.py | JellybeanAsh/Fast-F1 | 690 | 12555 | <filename>fastf1/tests/test_livetiming.py
import os
from fastf1.core import Session, Weekend
from fastf1.livetiming.data import LiveTimingData
def test_file_loading_w_errors():
# load file with many errors and invalid data without crashing
livedata = LiveTimingData('fastf1/testing/reference_data/livedata/with_errors.txt')
livedata.load()
def test_file_loading():
# load a valid file
livedata = LiveTimingData('fastf1/testing/reference_data/livedata/2021_1_FP3.txt')
livedata.load()
weekend = Weekend(2021, 1)
session = Session(weekend=weekend, session_name='test_session')
session.load_laps(with_telemetry=True, livedata=livedata)
assert session.laps.shape == (274, 26)
assert session.car_data['44'].shape == (17362, 10)
def test_duplicate_removal(tmpdir):
# create a temporary file with two identical lines of data
tmpfile = os.path.join(tmpdir, 'tmpfile.txt')
data = "['TimingAppData', {'Lines': {'22': {'Stints': {'0': {" \
"'LapFlags': 0, 'Compound': 'UNKNOWN', 'New': 'false'," \
"'TyresNotChanged': '0', 'TotalLaps': 0, 'StartLaps':" \
"0}}}}}, '2021-03-27T12:00:32.086Z']\n"
with open(tmpfile, 'w') as fobj:
fobj.write(data)
fobj.write(data)
livedata = LiveTimingData(tmpfile)
assert len(livedata.get('TimingAppData')) == 1
livedata = LiveTimingData(tmpfile, remove_duplicates=False)
assert len(livedata.get('TimingAppData')) == 2
|
perceiver/train/dataset.py | kawa-work/deepmind-research | 10,110 | 12567 | <reponame>kawa-work/deepmind-research
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet dataset with pre-processing and augmentation.
Deng, et al CVPR 2009 - ImageNet: A large-scale hierarchical image database.
https://image-net.org/
"""
import enum
from typing import Any, Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from perceiver.train import autoaugment
Batch = Mapping[Text, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
AUTOTUNE = tf.data.experimental.AUTOTUNE
INPUT_DIM = 224 # The number of pixels in the image resize.
class Split(enum.Enum):
"""ImageNet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID, 'VALIDATION': Split.VALID,
'TEST': Split.TEST}[name.upper()]
@property
def num_examples(self):
return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167,
Split.VALID: 10000, Split.TEST: 50000}[self]
def load(
split: Split,
*,
is_training: bool,
# batch_dims should be:
# [device_count, per_device_batch_size] or [total_batch_size]
batch_dims: Sequence[int],
augmentation_settings: Mapping[str, Any],
# The shape to which images are resized.
im_dim: int = INPUT_DIM,
threadpool_size: int = 48,
max_intra_op_parallelism: int = 1,
) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset."""
start, end = _shard(split, jax.host_id(), jax.host_count())
im_size = (im_dim, im_dim)
total_batch_size = np.prod(batch_dims)
tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split),
from_=start, to=end, unit='abs')
ds = tfds.load('imagenet2012:5.*.*', split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = threadpool_size
options.experimental_threading.max_intra_op_parallelism = (
max_intra_op_parallelism)
options.experimental_optimization.map_parallelization = True
if is_training:
options.experimental_deterministic = False
ds = ds.with_options(options)
if is_training:
if jax.host_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=0)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def crop_augment_preprocess(example):
image, _ = _preprocess_image(
example['image'], is_training, im_size, augmentation_settings)
label = tf.cast(example['label'], tf.int32)
out = {'images': image, 'labels': label}
if is_training:
if augmentation_settings['cutmix']:
out['mask'] = cutmix_padding(*im_size)
out['cutmix_ratio'] = tf.reduce_mean(out['mask'])
if augmentation_settings['mixup_alpha'] is not None:
beta = tfp.distributions.Beta(
augmentation_settings['mixup_alpha'],
augmentation_settings['mixup_alpha'])
out['mixup_ratio'] = beta.sample()
return out
ds = ds.map(crop_augment_preprocess, num_parallel_calls=AUTOTUNE)
# Mixup/cutmix by temporarily batching (using the per-device batch size):
use_cutmix = augmentation_settings['cutmix']
use_mixup = augmentation_settings['mixup_alpha'] is not None
if is_training and (use_cutmix or use_mixup):
inner_batch_size = batch_dims[-1]
# Apply mixup, cutmix, or mixup + cutmix on batched data.
# We use data from 2 batches to produce 1 mixed batch.
ds = ds.batch(inner_batch_size * 2)
if not use_cutmix and use_mixup:
ds = ds.map(my_mixup, num_parallel_calls=AUTOTUNE)
elif use_cutmix and not use_mixup:
ds = ds.map(my_cutmix, num_parallel_calls=AUTOTUNE)
elif use_cutmix and use_mixup:
ds = ds.map(my_mixup_cutmix, num_parallel_calls=AUTOTUNE)
# Unbatch for further processing.
ds = ds.unbatch()
for batch_size in reversed(batch_dims):
ds = ds.batch(batch_size)
ds = ds.prefetch(AUTOTUNE)
yield from tfds.as_numpy(ds)
# cutmix_padding, my_cutmix, my_mixup, and my_mixup_cutmix taken from:
# https://github.com/deepmind/deepmind-research/blob/master/nfnets/dataset.py
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def my_cutmix(batch):
"""Apply CutMix: https://arxiv.org/abs/1905.04899."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
mask = batch['mask'][:bs]
images = (mask * batch['images'][:bs] + (1.0 - mask) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = batch['cutmix_ratio'][:bs]
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup(batch):
"""Apply mixup: https://arxiv.org/abs/1710.09412."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
ratio = batch['mixup_ratio'][:bs, None, None, None]
images = (ratio * batch['images'][:bs] + (1.0 - ratio) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = ratio[..., 0, 0, 0] # Unsqueeze
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup_cutmix(batch):
"""Apply mixup to half the batch, and cutmix to the other."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 4
mixup_ratio = batch['mixup_ratio'][:bs, None, None, None]
mixup_images = (mixup_ratio * batch['images'][:bs]
+ (1.0 - mixup_ratio) * batch['images'][bs:2*bs])
mixup_labels = batch['labels'][:bs]
mixup_mix_labels = batch['labels'][bs:2*bs]
cutmix_mask = batch['mask'][2*bs:3*bs]
cutmix_images = (cutmix_mask * batch['images'][2*bs:3*bs]
+ (1.0 - cutmix_mask) * batch['images'][-bs:])
cutmix_labels = batch['labels'][2*bs:3*bs]
cutmix_mix_labels = batch['labels'][-bs:]
cutmix_ratio = batch['cutmix_ratio'][2*bs : 3*bs]
return {'images': tf.concat([mixup_images, cutmix_images], axis=0),
'labels': tf.concat([mixup_labels, cutmix_labels], axis=0),
'mix_labels': tf.concat([mixup_mix_labels, cutmix_mix_labels], 0),
'ratio': tf.concat([mixup_ratio[..., 0, 0, 0], cutmix_ratio], axis=0)}
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, so it has been typical at DeepMind to consider the VALID
# split the TEST split and to reserve 10k images from TRAIN for VALID.
if split in (
Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(
split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
image_size: Sequence[int],
augmentation_settings: Mapping[str, Any],
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Returns processed and resized images."""
# Get the image crop.
if is_training:
image, im_shape = _decode_and_random_crop(image_bytes)
image = tf.image.random_flip_left_right(image)
else:
image, im_shape = _decode_and_center_crop(image_bytes)
assert image.dtype == tf.uint8
# Optionally apply RandAugment: https://arxiv.org/abs/1909.13719
if is_training:
if augmentation_settings['randaugment'] is not None:
# Input and output images are dtype uint8.
image = autoaugment.distort_image_with_randaugment(
image,
num_layers=augmentation_settings['randaugment']['num_layers'],
magnitude=augmentation_settings['randaugment']['magnitude'])
# Resize and normalize the image crop.
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
image = tf.image.resize(
image, image_size, tf.image.ResizeMethod.BICUBIC)
image = _normalize_image(image)
return image, im_shape
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = [offset_y, offset_x, target_height, target_width]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([target_height, target_width])
return image, im_shape
def _decode_whole_image(image_bytes: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
image = tf.io.decode_jpeg(image_bytes, channels=3)
im_shape = tf.io.extract_jpeg_shape(image_bytes, output_type=tf.int32)
return image, im_shape
def _decode_and_random_crop(
image_bytes: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Make a random crop of INPUT_DIM."""
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image, im_shape = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image, im_shape = _decode_and_center_crop(image_bytes, jpeg_shape)
return image, im_shape
def _center_crop(image, crop_dim):
"""Center crops an image to a target dimension."""
image_height = image.shape[0]
image_width = image.shape[1]
offset_height = ((image_height - crop_dim) + 1) // 2
offset_width = ((image_width - crop_dim) + 1) // 2
return tf.image.crop_to_bounding_box(
image, offset_height, offset_width, crop_dim, crop_dim)
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
padded_center_crop_size = tf.cast(
((INPUT_DIM / (INPUT_DIM + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([padded_center_crop_size, padded_center_crop_size])
return image, im_shape
|
networkx/algorithms/approximation/ramsey.py | rakschahsa/networkx | 445 | 12581 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Ramsey numbers.
"""
# Copyright (C) 2011 by
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
import networkx as nx
from ...utils import arbitrary_element
__all__ = ["ramsey_R2"]
__author__ = """<NAME> (<EMAIL>)"""
def ramsey_R2(G):
r"""Approximately computes the Ramsey number `R(2;s,t)` for graph.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
max_pair : (set, set) tuple
Maximum clique, Maximum independent set.
"""
if not G:
return set(), set()
node = arbitrary_element(G)
nbrs = nx.all_neighbors(G, node)
nnbrs = nx.non_neighbors(G, node)
c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy())
c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy())
c_1.add(node)
i_2.add(node)
# Choose the larger of the two cliques and the larger of the two
# independent sets, according to cardinality.
return max(c_1, c_2, key=len), max(i_1, i_2, key=len)
|
malaya_speech/train/model/fastspeechsplit/model.py | ishine/malaya-speech | 111 | 12598 | import tensorflow as tf
from ..fastspeech.model import (
TFFastSpeechEncoder,
TFTacotronPostnet,
TFFastSpeechLayer,
)
from ..speechsplit.model import InterpLnr
import numpy as np
import copy
class Encoder_6(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_6, self).__init__(name='Encoder_6', **kwargs)
self.dim_neck_3 = hparams.dim_neck_3
self.freq_3 = hparams.freq_3
self.dim_f0 = hparams.dim_f0
self.dim_enc_3 = hparams.dim_enc_3
self.dim_emb = hparams.dim_spk_emb
self.chs_grp = hparams.chs_grp
self.before_dense_1 = tf.keras.layers.Dense(
units=self.dim_enc_3, dtype=tf.float32, name='before_dense_1'
)
config_1 = copy.deepcopy(config)
config_1.hidden_size = self.dim_enc_3
self.layer_1 = [
TFFastSpeechLayer(config_1, name='layer_._{}'.format(i))
for i in range(config_1.num_hidden_layers)
]
self.encoder_dense_1 = tf.keras.layers.Dense(
units=self.dim_neck_3,
dtype=tf.float32,
name='encoder_dense_1',
)
self.interp = InterpLnr(hparams)
def call(self, x, attention_mask, training=True):
x = self.before_dense_1(x)
for no, layer_module in enumerate(self.layer_1):
x = layer_module([x, attention_mask], training=training)[0]
x = self.interp(
x,
tf.tile([tf.shape(x)[1]], [tf.shape(x)[0]]),
training=training,
)
x = self.encoder_dense_1(x)
return x
class Encoder_7(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_7, self).__init__(name='Encoder_7', **kwargs)
self.config = config
self.dim_neck = hparams.dim_neck
self.dim_neck_3 = hparams.dim_neck_3
self.dim_freq = hparams.dim_freq
self.dim_enc = hparams.dim_enc
self.dim_enc_3 = hparams.dim_enc_3
self.before_dense_1 = tf.keras.layers.Dense(
units=self.dim_enc, dtype=tf.float32, name='before_dense_1'
)
self.before_dense_2 = tf.keras.layers.Dense(
units=self.dim_enc_3, dtype=tf.float32, name='before_dense_2'
)
config_1 = copy.deepcopy(config)
config_1.hidden_size = self.dim_enc
self.layer_1 = [
TFFastSpeechLayer(config_1, name='layer_._{}'.format(i))
for i in range(config_1.num_hidden_layers)
]
config_2 = copy.deepcopy(config)
config_2.hidden_size = self.dim_enc_3
self.layer_2 = [
TFFastSpeechLayer(config_2, name='layer_._{}'.format(i))
for i in range(config_2.num_hidden_layers)
]
self.encoder_dense_1 = tf.keras.layers.Dense(
units=self.dim_neck, dtype=tf.float32, name='encoder_dense_1'
)
self.encoder_dense_2 = tf.keras.layers.Dense(
units=self.dim_neck_3,
dtype=tf.float32,
name='encoder_dense_2',
)
self.interp = InterpLnr(hparams)
def call(self, x_f0, attention_mask, training=True):
x = x_f0[:, :, : self.dim_freq]
f0 = x_f0[:, :, self.dim_freq:]
x = self.before_dense_1(x)
f0 = self.before_dense_2(f0)
seq_length = tf.shape(x_f0)[1]
for no, layer_module in enumerate(self.layer_1):
x = layer_module([x, attention_mask], training=training)[0]
f0 = self.layer_2[no]([f0, attention_mask], training=training)[0]
x_f0 = tf.concat((x, f0), axis=2)
x_f0 = self.interp(
x_f0,
tf.tile([tf.shape(x_f0)[1]], [tf.shape(x)[0]]),
training=training,
)
x = x_f0[:, :, : self.dim_enc]
f0 = x_f0[:, :, self.dim_enc:]
x = x_f0[:, :, : self.dim_enc]
f0 = x_f0[:, :, self.dim_enc:]
x = self.encoder_dense_1(x)
f0 = self.encoder_dense_2(f0)
return x, f0
class Encoder_t(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_t, self).__init__(name='Encoder_t', **kwargs)
self.dim_neck_2 = hparams.dim_neck_2
self.freq_2 = hparams.freq_2
self.dim_freq = hparams.dim_freq
self.dim_enc_2 = hparams.dim_enc_2
self.dim_emb = hparams.dim_spk_emb
self.chs_grp = hparams.chs_grp
config = copy.deepcopy(config)
config.num_hidden_layers = 1
config.hidden_size = self.dim_enc_2
self.config = config
self.before_dense = tf.keras.layers.Dense(
units=self.dim_enc_2, dtype=tf.float32, name='before_dense_1'
)
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.encoder_dense = tf.keras.layers.Dense(
units=self.dim_neck_2, dtype=tf.float32, name='encoder_dense'
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.encoder_dense(f)
class Decoder_3(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Decoder_3, self).__init__(name='Decoder_3', **kwargs)
self.config = config
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.before_dense = tf.keras.layers.Dense(
units=config.hidden_size,
dtype=tf.float32,
name='before_dense_1',
)
self.linear_projection = tf.keras.layers.Dense(
units=hparams.dim_freq,
dtype=tf.float32,
name='self.linear_projection',
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.linear_projection(f)
class Decoder_4(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Decoder_4, self).__init__(name='Decoder_4', **kwargs)
self.config = config
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.before_dense = tf.keras.layers.Dense(
units=config.hidden_size,
dtype=tf.float32,
name='before_dense_1',
)
self.linear_projection = tf.keras.layers.Dense(
units=hparams.dim_f0,
dtype=tf.float32,
name='self.linear_projection',
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.linear_projection(f)
class Model(tf.keras.Model):
def __init__(self, config, hparams, **kwargs):
super(Model, self).__init__(name='speechsplit', **kwargs)
self.encoder_1 = Encoder_7(
config.encoder_self_attention_params, hparams
)
self.encoder_2 = Encoder_t(
config.encoder_self_attention_params, hparams
)
self.decoder = Decoder_3(config.decoder_self_attention_params, hparams)
self.freq = hparams.freq
self.freq_2 = hparams.freq_2
self.freq_3 = hparams.freq_3
def call(self, x_f0, x_org, c_trg, mel_lengths, training=True):
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
attention_mask = tf.sequence_mask(
lengths=mel_lengths, maxlen=max_length, dtype=tf.float32
)
attention_mask.set_shape((None, None))
codes_x, codes_f0 = self.encoder_1(
x_f0, attention_mask, training=training
)
codes_2 = self.encoder_2(x_org, attention_mask, training=training)
code_exp_1 = codes_x
code_exp_3 = codes_f0
code_exp_2 = codes_2
c_trg = tf.tile(tf.expand_dims(c_trg, 1), (1, tf.shape(x_f0)[1], 1))
encoder_outputs = tf.concat(
(code_exp_1, code_exp_2, code_exp_3, c_trg), axis=-1
)
mel_outputs = self.decoder(
encoder_outputs, attention_mask, training=training
)
return codes_x, codes_f0, codes_2, encoder_outputs, mel_outputs
class Model_F0(tf.keras.Model):
def __init__(self, config, hparams, **kwargs):
super(Model_F0, self).__init__(name='speechsplit_f0', **kwargs)
self.encoder_2 = Encoder_t(
config.encoder_self_attention_params, hparams
)
self.encoder_3 = Encoder_6(
config.encoder_self_attention_params, hparams
)
self.decoder = Decoder_4(config.decoder_self_attention_params, hparams)
self.freq_2 = hparams.freq_2
self.freq_3 = hparams.freq_3
def call(self, x_org, f0_trg, mel_lengths, training=True):
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
attention_mask = tf.sequence_mask(
lengths=mel_lengths, maxlen=max_length, dtype=tf.float32
)
attention_mask.set_shape((None, None))
codes_2 = self.encoder_2(x_org, attention_mask, training=training)
code_exp_2 = codes_2
codes_3 = self.encoder_3(f0_trg, attention_mask, training=training)
code_exp_3 = codes_3
self.o = [code_exp_2, code_exp_3]
encoder_outputs = tf.concat((code_exp_2, code_exp_3), axis=-1)
mel_outputs = self.decoder(
encoder_outputs, attention_mask, training=training
)
return codes_2, codes_3, encoder_outputs, mel_outputs
|
apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/pymysql/constants/CR.py | tharindu1st/apim-migration-resources | 1,573 | 12615 | <filename>apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/pymysql/constants/CR.py
# flake8: noqa
# errmsg.h
CR_ERROR_FIRST = 2000
CR_UNKNOWN_ERROR = 2000
CR_SOCKET_CREATE_ERROR = 2001
CR_CONNECTION_ERROR = 2002
CR_CONN_HOST_ERROR = 2003
CR_IPSOCK_ERROR = 2004
CR_UNKNOWN_HOST = 2005
CR_SERVER_GONE_ERROR = 2006
CR_VERSION_ERROR = 2007
CR_OUT_OF_MEMORY = 2008
CR_WRONG_HOST_INFO = 2009
CR_LOCALHOST_CONNECTION = 2010
CR_TCP_CONNECTION = 2011
CR_SERVER_HANDSHAKE_ERR = 2012
CR_SERVER_LOST = 2013
CR_COMMANDS_OUT_OF_SYNC = 2014
CR_NAMEDPIPE_CONNECTION = 2015
CR_NAMEDPIPEWAIT_ERROR = 2016
CR_NAMEDPIPEOPEN_ERROR = 2017
CR_NAMEDPIPESETSTATE_ERROR = 2018
CR_CANT_READ_CHARSET = 2019
CR_NET_PACKET_TOO_LARGE = 2020
CR_EMBEDDED_CONNECTION = 2021
CR_PROBE_SLAVE_STATUS = 2022
CR_PROBE_SLAVE_HOSTS = 2023
CR_PROBE_SLAVE_CONNECT = 2024
CR_PROBE_MASTER_CONNECT = 2025
CR_SSL_CONNECTION_ERROR = 2026
CR_MALFORMED_PACKET = 2027
CR_WRONG_LICENSE = 2028
CR_NULL_POINTER = 2029
CR_NO_PREPARE_STMT = 2030
CR_PARAMS_NOT_BOUND = 2031
CR_DATA_TRUNCATED = 2032
CR_NO_PARAMETERS_EXISTS = 2033
CR_INVALID_PARAMETER_NO = 2034
CR_INVALID_BUFFER_USE = 2035
CR_UNSUPPORTED_PARAM_TYPE = 2036
CR_SHARED_MEMORY_CONNECTION = 2037
CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038
CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039
CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040
CR_SHARED_MEMORY_CONNECT_MAP_ERROR = 2041
CR_SHARED_MEMORY_FILE_MAP_ERROR = 2042
CR_SHARED_MEMORY_MAP_ERROR = 2043
CR_SHARED_MEMORY_EVENT_ERROR = 2044
CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045
CR_SHARED_MEMORY_CONNECT_SET_ERROR = 2046
CR_CONN_UNKNOW_PROTOCOL = 2047
CR_INVALID_CONN_HANDLE = 2048
CR_SECURE_AUTH = 2049
CR_FETCH_CANCELED = 2050
CR_NO_DATA = 2051
CR_NO_STMT_METADATA = 2052
CR_NO_RESULT_SET = 2053
CR_NOT_IMPLEMENTED = 2054
CR_SERVER_LOST_EXTENDED = 2055
CR_STMT_CLOSED = 2056
CR_NEW_STMT_METADATA = 2057
CR_ALREADY_CONNECTED = 2058
CR_AUTH_PLUGIN_CANNOT_LOAD = 2059
CR_DUPLICATE_CONNECTION_ATTR = 2060
CR_AUTH_PLUGIN_ERR = 2061
CR_ERROR_LAST = 2061
|
sayn/logging/file_logger.py | robin-173/sayn | 105 | 12651 | from pathlib import Path
import logging
from .logger import Logger
from .log_formatter import LogFormatter
class FileLogger(Logger):
fmt = LogFormatter(use_colour=False, output_ts=False)
logger = None
def __init__(self, folder, format=None):
if format is None:
format = ("%(asctime)s|%(levelname)s|%(message)s",)
formatter = logging.Formatter(format)
log_file = Path(folder, "sayn.log")
if not log_file.parent.exists():
log_file.parent.mkdir(parents=True)
handler = logging.FileHandler(log_file)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self.logger = logger
def print(self, s=None):
if s is not None:
if s["level"] == "info":
func = self.logger.info
elif s["level"] == "error":
func = self.logger.error
elif s["level"] == "warning":
func = self.logger.warning
else:
func = self.logger.debug
s = s["message"]
if isinstance(s, str):
s = [s]
elif not isinstance(s, list):
raise ValueError("error in logging print")
func(f"{s[0]}")
for e in s[1:]:
for l in e.split("\n"):
func(f"{l}")
|
segmentation/utils/transforms.py | voldemortX/DST-CBC | 103 | 12655 | <gh_stars>100-1000
# Mostly copied and modified from torch/vision/references/segmentation to support unlabeled data
# Copied functions from fmassa/vision-1 to support multi-dimensional masks loaded from numpy ndarray
import numpy as np
from PIL import Image
import random
import torch
import utils.functional as F
# For 2/3 dimensional tensors only
def get_tensor_image_size(img):
if img.dim() == 2:
h, w = img.size()
else:
h = img.size()[1]
w = img.size()[2]
return h, w
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target, *args):
for t in self.transforms:
image, target = t(image, target)
return (image, target, *args)
class Resize(object):
def __init__(self, size_image, size_label):
self.size_image = size_image
self.size_label = size_label
def __call__(self, image, target):
image = image if type(image) == str else F.resize(image, self.size_image, interpolation=Image.LINEAR)
target = target if type(target) == str else F.resize(target, self.size_label, interpolation=Image.NEAREST)
return image, target
# Pad image with zeros, yet pad target with 255 (ignore label) on bottom & right if
# given a bigger desired size (or else nothing is done at all)
class ZeroPad(object):
def __init__(self, size):
self.h, self.w = size
@staticmethod
def zero_pad(image, target, h, w):
oh, ow = get_tensor_image_size(image)
pad_h = h - oh if oh < h else 0
pad_w = w - ow if ow < w else 0
image = F.pad(image, (0, 0, pad_w, pad_h), fill=0)
target = target if type(target) == str else F.pad(target, (0, 0, pad_w, pad_h), fill=255)
return image, target
def __call__(self, image, target):
return self.zero_pad(image, target, self.h, self.w)
class RandomResize(object):
def __init__(self, min_size, max_size=None):
self.min_size = min_size
if max_size is None:
max_size = min_size
self.max_size = max_size
def __call__(self, image, target):
min_h, min_w = self.min_size
max_h, max_w = self.max_size
h = random.randint(min_h, max_h)
w = random.randint(min_w, max_w)
image = F.resize(image, (h, w), interpolation=Image.LINEAR)
target = target if type(target) == str else F.resize(target, (h, w), interpolation=Image.NEAREST)
return image, target
class RandomScale(object):
def __init__(self, min_scale, max_scale=None):
self.min_scale = min_scale
if max_scale is None:
max_scale = min_scale
self.max_scale = max_scale
def __call__(self, image, target):
scale = random.uniform(self.min_scale, self.max_scale)
h, w = get_tensor_image_size(image)
h = int(scale * h)
w = int(scale * w)
image = F.resize(image, (h, w), interpolation=Image.LINEAR)
target = target if type(target) == str else F.resize(target, (h, w), interpolation=Image.NEAREST)
return image, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
@staticmethod
def get_params(img, output_size):
h, w = get_tensor_image_size(img)
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, image, target):
# Pad if needed
ih, iw = get_tensor_image_size(image)
if ih < self.size[0] or iw < self.size[1]:
image, target = ZeroPad.zero_pad(image, target,
max(self.size[0], ih),
max(self.size[1], iw))
i, j, h, w = self.get_params(image, self.size)
image = F.crop(image, i, j, h, w)
target = target if type(target) == str else F.crop(target, i, j, h, w)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, flip_prob):
self.flip_prob = flip_prob
def __call__(self, image, target):
t = random.random()
if t < self.flip_prob:
image = F.hflip(image)
target = target if (type(target) == str or t >= self.flip_prob) else F.hflip(target)
return image, target
class ToTensor(object):
def __init__(self, keep_scale=False, reverse_channels=False):
# keep_scale = True => Images or whatever are not divided by 255
# reverse_channels = True => RGB images are changed to BGR(the default behavior of openCV & Caffe,
# let's wish them all go to heaven,
# for they wasted me days!)
self.keep_scale = keep_scale
self.reverse_channels = reverse_channels
def __call__(self, image, target):
image = image if type(image) == str else self._pil_to_tensor(image)
target = target if type(target) == str else self.label_to_tensor(target)
return image, target
@staticmethod
def label_to_tensor(pic): # 3 dimensional arrays or normal segmentation masks
if isinstance(pic, np.ndarray):
return torch.as_tensor(pic.transpose((2, 0, 1)), dtype=torch.float32)
else:
return torch.as_tensor(np.asarray(pic).copy(), dtype=torch.int64)
def _pil_to_tensor(self, pic):
# Convert a PIL Image to tensor(a direct copy)
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif pic.mode == 'F':
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif pic.mode == '1':
img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
if self.reverse_channels: # Beware this only works with 3 channels(can't use -1 with tensors)
img = img[:, :, [2, 1, 0]]
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
if self.keep_scale:
return img.float()
else:
return img.float().div(255)
else:
return img
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
# Init with a python list as the map(mainly for cityscapes's id -> train_id)
class LabelMap(object):
def __init__(self, label_id_map):
self.label_id_map = torch.tensor(label_id_map)
def __call__(self, image, target):
target = target if type(target) == str else self.label_id_map[target]
return image, target
|
src/c3nav/site/templatetags/route_render.py | johnjohndoe/c3nav | 132 | 12659 | <reponame>johnjohndoe/c3nav
from django import template
register = template.Library()
@register.filter
def negate(value):
return -value
@register.filter
def subtract(value, arg):
return value - arg
|
mysql_tests/test_schema.py | maestro-1/gino | 1,376 | 12678 | from enum import Enum
import pytest
import gino
from gino.dialects.aiomysql import AsyncEnum
pytestmark = pytest.mark.asyncio
db = gino.Gino()
class MyEnum(Enum):
ONE = "one"
TWO = "two"
class Blog(db.Model):
__tablename__ = "s_blog"
id = db.Column(db.BigInteger(), primary_key=True)
title = db.Column(db.Unicode(255), index=True, comment="Title Comment")
visits = db.Column(db.BigInteger(), default=0)
comment_id = db.Column(db.ForeignKey("s_comment.id"))
number = db.Column(db.Enum(MyEnum), nullable=False, default=MyEnum.TWO)
number2 = db.Column(AsyncEnum(MyEnum), nullable=False, default=MyEnum.TWO)
class Comment(db.Model):
__tablename__ = "s_comment"
id = db.Column(db.BigInteger(), primary_key=True)
blog_id = db.Column(db.ForeignKey("s_blog.id", name="blog_id_fk"))
blog_seq = db.Sequence("blog_seq", metadata=db, schema="schema_test")
async def test(engine, define=True):
async with engine.acquire() as conn:
assert not await engine.dialect.has_table(conn, "non_exist")
Blog.__table__.comment = "Blog Comment"
db.bind = engine
await db.gino.create_all()
await Blog.number.type.create_async(engine, checkfirst=True)
await Blog.number2.type.create_async(engine, checkfirst=True)
await db.gino.create_all(tables=[Blog.__table__], checkfirst=True)
await blog_seq.gino.create(checkfirst=True)
await Blog.__table__.gino.create(checkfirst=True)
await db.gino.drop_all()
await db.gino.drop_all(tables=[Blog.__table__], checkfirst=True)
await Blog.__table__.gino.drop(checkfirst=True)
await blog_seq.gino.drop(checkfirst=True)
if define:
class Comment2(db.Model):
__tablename__ = "s_comment_2"
id = db.Column(db.BigInteger(), primary_key=True)
blog_id = db.Column(db.ForeignKey("s_blog.id"))
await db.gino.create_all()
await db.gino.drop_all()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.