code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ['WampLongPollResource']
import json
import traceback
import binascii
from collections import deque
from twisted.python import log
from twisted.web.resource import Resource, NoResource
## Each of the following 2 trigger a reactor import at module level
from twisted.web import http
from twisted.web.server import NOT_DONE_YET
from autobahn.util import newid
from autobahn.wamp.websocket import parseSubprotocolIdentifier
from autobahn.wamp.exception import ProtocolError, \
SerializationError, \
TransportLost
class WampLongPollResourceSessionSend(Resource):
"""
A Web resource for sending via XHR that is part of :class:`autobahn.twisted.longpoll.WampLongPollResourceSession`.
"""
def __init__(self, parent):
"""
Ctor.
:param parent: The Web parent resource for the WAMP session.
:type parent: Instance of :class:`autobahn.twisted.longpoll.WampLongPollResourceSession`.
"""
Resource.__init__(self)
self._parent = parent
self._debug = self._parent._parent._debug
def render_POST(self, request):
"""
A client sends a message via WAMP-over-Longpoll by HTTP/POSTing
to this Web resource. The body of the POST should contain a batch
## of WAMP messages which are serialized according to the selected
serializer, and delimited by a single \0 byte in between two WAMP
messages in the batch.
"""
payload = request.content.read()
if self._debug:
log.msg("WampLongPoll: receiving data for transport '{0}'\n{1}".format(self._parent._transportid, binascii.hexlify(payload)))
try:
## process (batch of) WAMP message(s)
self._parent.onMessage(payload, None)
except Exception as e:
return self._parent._parent._failRequest(request, "could not unserialize WAMP message: {0}".format(e))
else:
request.setResponseCode(http.NO_CONTENT)
self._parent._parent._setStandardHeaders(request)
self._parent._isalive = True
return ""
class WampLongPollResourceSessionReceive(Resource):
"""
A Web resource for receiving via XHR that is part of :class:`autobahn.twisted.longpoll.WampLongPollResourceSession`.
"""
def __init__(self, parent):
"""
Ctor.
:param parent: The Web parent resource for the WAMP session.
:type parent: Instance of :class:`autobahn.twisted.longpoll.WampLongPollResourceSession`.
"""
Resource.__init__(self)
self._parent = parent
self._debug = self._parent._parent._debug
self.reactor = self._parent._parent.reactor
self._queue = deque()
self._request = None
self._killed = False
if self._debug:
def logqueue():
if not self._killed:
log.msg("WampLongPoll: transport '{0}' - currently polled {1}, pending messages {2}".format(self._parent._transportid, self._request is not None, len(self._queue)))
self.reactor.callLater(1, logqueue)
logqueue()
def queue(self, data):
"""
Enqueue data to be received by client.
:param data: The data to be received by the client.
:type data: bytes
"""
self._queue.append(data)
self._trigger()
def _kill(self):
"""
Kill any outstanding request.
"""
if self._request:
self._request.finish()
self._request = None
self._killed = True
def _trigger(self):
"""
Trigger batched sending of queued messages.
"""
if self._request and len(self._queue):
if self._parent._serializer._serializer._batched:
## in batched mode, write all pending messages
while len(self._queue) > 0:
msg = self._queue.popleft()
self._request.write(msg)
else:
## in unbatched mode, only write 1 pending message
msg = self._queue.popleft()
self._request.write(msg)
self._request.finish()
self._request = None
def render_POST(self, request):
"""
A client receives WAMP messages by issuing a HTTP/POST to this
Web resource. The request will immediately return when there are
messages pending to be received. When there are no such messages
pending, the request will "just hang", until either a message
arrives to be received or a timeout occurs.
"""
## remember request, which marks the session as being polled
self._request = request
self._parent._parent._setStandardHeaders(request)
request.setHeader('content-type', self._parent._serializer.MIME_TYPE)
def cancel(err):
if self._debug:
log.msg("WampLongPoll: poll request for transport '{0}' has gone away".format(self._parent._transportid))
self._request = None
request.notifyFinish().addErrback(cancel)
self._parent._isalive = True
self._trigger()
return NOT_DONE_YET
class WampLongPollResourceSessionClose(Resource):
"""
A Web resource for closing the Long-poll session WampLongPollResourceSession.
"""
def __init__(self, parent):
"""
Ctor.
:param parent: The Web parent resource for the WAMP session.
:type parent: Instance of :class:`autobahn.twisted.longpoll.WampLongPollResourceSession`.
"""
Resource.__init__(self)
self._parent = parent
self._debug = self._parent._parent._debug
def render_POST(self, request):
"""
A client may actively close a session (and the underlying long-poll transport)
by issuing a HTTP/POST with empty body to this resource.
"""
if self._debug:
log.msg("WampLongPoll: closing transport '{0}'".format(self._parent._transportid))
## now actually close the session
self._parent.close()
if self._debug:
log.msg("WampLongPoll: session ended and transport {0} closed".format(self._parent._transportid))
request.setResponseCode(http.NO_CONTENT)
self._parent._parent._setStandardHeaders(request)
return ""
class WampLongPollResourceSession(Resource):
"""
A Web resource representing an open WAMP session.
"""
def __init__(self, parent, transportid, serializer):
"""
Create a new Web resource representing a WAMP session.
:param parent: The WAMP Web base resource.
:type parent: Instance of WampLongPollResource.
:param serializer: The WAMP serializer in use for this session.
:type serializer: An instance of WampSerializer.
"""
Resource.__init__(self)
self._parent = parent
self._debug = self._parent._debug
self._debug_wamp = True
self.reactor = self._parent.reactor
self._transportid = transportid
self._serializer = serializer
self._session = None
## session authentication information
##
self._authid = None
self._authrole = None
self._authmethod = None
self._send = WampLongPollResourceSessionSend(self)
self._receive = WampLongPollResourceSessionReceive(self)
self._close = WampLongPollResourceSessionClose(self)
self.putChild("send", self._send)
self.putChild("receive", self._receive)
self.putChild("close", self._close)
killAfter = self._parent._killAfter
self._isalive = False
def killIfDead():
if not self._isalive:
if self._debug:
log.msg("WampLongPoll: killing inactive WAMP session with transport '{0}'".format(self._transportid))
self.onClose(False, 5000, "session inactive")
self._receive._kill()
del self._parent._transports[self._transportid]
else:
if self._debug:
log.msg("WampLongPoll: transport '{0}'' is still alive".format(self._transportid))
self._isalive = False
self.reactor.callLater(killAfter, killIfDead)
self.reactor.callLater(killAfter, killIfDead)
if self._debug:
log.msg("WampLongPoll: session resource for transport '{0}' initialized)".format(self._transportid))
self.onOpen()
def close(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.close`
"""
if self.isOpen():
self.onClose(True, 1000, "session closed")
self._receive._kill()
del self._parent._transports[self._transportid]
else:
raise TransportLost()
def abort(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.abort`
"""
if self.isOpen():
self.onClose(True, 1000, "session aborted")
self._receive._kill()
del self._parent._transports[self._transportid]
else:
raise TransportLost()
def onClose(self, wasClean, code, reason):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onClose`
"""
if self._session:
try:
self._session.onClose(wasClean)
except Exception:
## silently ignore exceptions raised here ..
if self._debug:
traceback.print_exc()
self._session = None
def onOpen(self):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onOpen`
"""
try:
self._session = self._parent._factory()
self._session.onOpen(self)
except Exception:
if self._debug:
traceback.print_exc()
def onMessage(self, payload, isBinary):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessage`
"""
for msg in self._serializer.unserialize(payload, isBinary):
if self._debug:
print("WampLongPoll: RX {0}".format(msg))
self._session.onMessage(msg)
def send(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.send`
"""
if self.isOpen():
try:
if self._debug:
print("WampLongPoll: TX {0}".format(msg))
bytes, isBinary = self._serializer.serialize(msg)
except Exception as e:
## all exceptions raised from above should be serialization errors ..
raise SerializationError("unable to serialize WAMP application payload ({0})".format(e))
else:
self._receive.queue(bytes)
else:
raise TransportLost()
def isOpen(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.isOpen`
"""
return self._session is not None
class WampLongPollResourceOpen(Resource):
"""
A Web resource for creating new WAMP sessions.
"""
def __init__(self, parent):
"""
Ctor.
:param parent: The parent Web resource.
:type parent: Instance of `WampLongPollResource`.
"""
Resource.__init__(self)
self._parent = parent
self._debug = self._parent._debug
def render_POST(self, request):
"""
Request to create a new WAMP session.
"""
if self._debug:
log.msg("WampLongPoll: creating new session ..")
payload = request.content.read()
try:
options = json.loads(payload)
except Exception as e:
return self._parent._failRequest(request, "could not parse WAMP session open request body: {0}".format(e))
if type(options) != dict:
return self._parent._failRequest(request, "invalid type for WAMP session open request [was {0}, expected dictionary]".format(type(options)))
if not 'protocols' in options:
return self._parent._failRequest(request, "missing attribute 'protocols' in WAMP session open request")
## determine the protocol to speak
##
protocol = None
serializer = None
for p in options['protocols']:
version, serializerId = parseSubprotocolIdentifier(p)
if version == 2 and serializerId in self._parent._serializers.keys():
serializer = self._parent._serializers[serializerId]
protocol = p
break
if protocol is None:
return self.__failRequest(request, "no common protocol to speak (I speak: {0})".format(["wamp.2.{}".format(s) for s in self._parent._serializers.keys()]))
## make up new transport ID
##
if self._parent._debug_transport_id:
## use fixed transport ID for debugging purposes
transport = self._parent._debug_transport_id
else:
transport = newid()
## create instance of WampLongPollResourceSession or subclass thereof ..
##
self._parent._transports[transport] = self._parent.protocol(self._parent, transport, serializer)
## create response
##
self._parent._setStandardHeaders(request)
request.setHeader('content-type', 'application/json; charset=utf-8')
result = {
'transport': transport,
'protocol': protocol
}
bytes = json.dumps(result)
if self._debug:
log.msg("WampLongPoll: new session created on transport '{0}'".format(transport))
return bytes
class WampLongPollResource(Resource):
"""
A WAMP-over-Longpoll resource for use with Twisted Web Resource trees.
This class provides an implementation of the
`WAMP-over-Longpoll Transport <https://github.com/tavendo/WAMP/blob/master/spec/advanced.md#long-poll-transport>`_
for WAMP.
The Resource exposes the following paths (child resources).
Opening a new WAMP session:
* ``<base-url>/open``
Once a transport is created and the session is opened:
* ``<base-url>/<transport-id>/send``
* ``<base-url>/<transport-id>/receive``
* ``<base-url>/<transport-id>/close``
"""
protocol = WampLongPollResourceSession
def __init__(self,
factory,
serializers = None,
timeout = 10,
killAfter = 30,
queueLimitBytes = 128 * 1024,
queueLimitMessages = 100,
debug = False,
debug_transport_id = None,
reactor = None):
"""
Create new HTTP WAMP Web resource.
:param factory: A (router) session factory.
:type factory: Instance of `RouterSessionFactory`.
:param serializers: List of WAMP serializers.
:type serializers: List of WampSerializer objects.
:param timeout: XHR polling timeout in seconds.
:type timeout: int
:param killAfter: Kill WAMP session after inactivity in seconds.
:type killAfter: int
:param queueLimitBytes: Kill WAMP session after accumulation of this many bytes in send queue (XHR poll).
:type queueLimitBytes: int
:param queueLimitMessages: Kill WAMP session after accumulation of this many message in send queue (XHR poll).
:type queueLimitMessages: int
:param debug: Enable debug logging.
:type debug: bool
:param debug_transport_id: If given, use this fixed transport ID.
:type debug_transport_id: str
:param reactor: The Twisted reactor to run under.
:type reactor: obj
"""
Resource.__init__(self)
## RouterSessionFactory
self._factory = factory
## lazy import to avoid reactor install upon module import
if reactor is None:
from twisted.internet import reactor
self.reactor = reactor
self._debug = debug
self._debug_transport_id = debug_transport_id
self._timeout = timeout
self._killAfter = killAfter
self._queueLimitBytes = queueLimitBytes
self._queueLimitMessages = queueLimitMessages
if serializers is None:
serializers = []
## try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
serializers.append(MsgPackSerializer(batched = True))
serializers.append(MsgPackSerializer())
except ImportError:
pass
## try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
serializers.append(JsonSerializer(batched = True))
serializers.append(JsonSerializer())
except ImportError:
pass
if not serializers:
raise Exception("could not import any WAMP serializers")
self._serializers = {}
for ser in serializers:
self._serializers[ser.SERIALIZER_ID] = ser
self._transports = {}
## <Base URL>/open
##
self.putChild("open", WampLongPollResourceOpen(self))
if self._debug:
log.msg("WampLongPollResource initialized")
def render_GET(self, request):
request.setHeader('content-type', 'text/html; charset=UTF-8')
peer = "{}:{}".format(request.client.host, request.client.port)
return self.getNotice(peer = peer)
def getChild(self, name, request):
"""
Returns send/receive/close resource for transport.
.. seealso::
* :class:`twisted.web.resource.Resource`
* :class:`zipfile.ZipFile`
"""
if name not in self._transports:
return NoResource("no WAMP transport '{}'".format(name))
if len(request.postpath) != 1 or request.postpath[0] not in ['send', 'receive', 'close']:
return NoResource("invalid WAMP transport operation '{}'".format(request.postpath))
return self._transports[name]
def _setStandardHeaders(self, request):
"""
Set standard HTTP response headers.
"""
origin = request.getHeader("origin")
if origin is None or origin == "null":
origin = "*"
request.setHeader('access-control-allow-origin', origin)
request.setHeader('access-control-allow-credentials', 'true')
request.setHeader('cache-control', 'no-store, no-cache, must-revalidate, max-age=0')
headers = request.getHeader('access-control-request-headers')
if headers is not None:
request.setHeader('access-control-allow-headers', headers)
def _failRequest(self, request, msg):
"""
Fails a request to the long-poll service.
"""
self._setStandardHeaders(request)
request.setHeader('content-type', 'text/plain; charset=UTF-8')
request.setResponseCode(http.BAD_REQUEST)
return msg
def getNotice(self, peer, redirectUrl = None, redirectAfter = 0):
"""
Render a user notice (HTML page) when the Long-Poll root resource
is accessed via HTTP/GET (by a user).
:param redirectUrl: Optional URL to redirect the user to.
:type redirectUrl: str
:param redirectAfter: When `redirectUrl` is provided, redirect after this time (seconds).
:type redirectAfter: int
"""
from autobahn import __version__
if redirectUrl:
redirect = """<meta http-equiv="refresh" content="%d;URL='%s'">""" % (redirectAfter, redirectUrl)
else:
redirect = ""
html = """
<!DOCTYPE html>
<html>
<head>
%s
<style>
body {
color: #fff;
background-color: #027eae;
font-family: "Segoe UI", "Lucida Grande", "Helvetica Neue", Helvetica, Arial, sans-serif;
font-size: 16px;
}
a, a:visited, a:hover {
color: #fff;
}
</style>
</head>
<body>
<h1>AutobahnPython %s</h1>
<p>
I am not Web server, but a <b>WAMP-over-LongPoll</b> transport endpoint.
</p>
<p>
You can talk to me using the <a href="https://github.com/tavendo/WAMP/blob/master/spec/advanced.md#long-poll-transport">WAMP-over-LongPoll</a> protocol.
</p>
<p>
For more information, please see:
<ul>
<li><a href="http://wamp.ws/">WAMP</a></li>
<li><a href="http://autobahn.ws/python">AutobahnPython</a></li>
</ul>
</p>
</body>
</html>
""" % (redirect, __version__)
return html
| hlzz/dotfiles | graphics/VTK-7.0.0/ThirdParty/AutobahnPython/autobahn/twisted/longpoll.py | Python | bsd-3-clause | 21,583 |
# paperScrape.py v0.00 damiancclarke yyyy-mm-dd:2015-05-29
#---|----1----|----2----|----3----|----4----|----5----|----6----|----7----|----8
#
# Scrapes econpapers for all titles, abstracts, etc from all JPE issues. Based
# on the url http://econpapers.repec.org/article/ucpjpolec/, and screen dumps of
# each issue.
#
import os
import urllib
import urllib2
from urllib2 import urlopen, URLError, HTTPError
import re
#-------------------------------------------------------------------------------
#--- (1) out
#-------------------------------------------------------------------------------
nameFile = open('namesJPE.txt', 'w')
absFile = open('abstractsJPE.txt', 'w')
#-------------------------------------------------------------------------------
#--- (2) dump
#-------------------------------------------------------------------------------
base = 'http://econpapers.repec.org/article/ucpjpolec/'
addresses = ['http://econpapers.repec.org/article/ucpjpolec/default.htm']
for page in range(1,74):
addresses += [base+'default'+str(page)+'.htm']
for a in addresses:
source = urllib2.urlopen(a).read()
papers = re.findall('<dt><a href="(.*)</a>', source)
for p in papers:
p = p.split('.htm')
padd = base+'/'+p[0]+'.htm'
det = urllib2.urlopen(padd).read()
name = re.search('<meta name="citation_title" content="(.*)">',det)
abstract = re.search('<meta name="citation_abstract" content="(.*)">',det)
year = re.search('<meta name="citation_year" content="(.*)">',det)
volume = re.search('<meta name="citation_volume" content="(.*)">',det)
try:
abstract = abstract.group(1)
except:
abstract = ''
name = name.group(1)
volume = volume.group(1)
year = year.group(1)
nameFile.write(year + ' | ' + volume + ' | ' + name +'\n')
absFile.write(year + ' | ' + volume + ' | ' + abstract +'\n')
nameFile.close()
absFile.close()
| damiancclarke/PhD | literature/scrape/paperScrape.py | Python | cc0-1.0 | 2,022 |
import copy
import logging
import random
import string
import salt.loader
import salt.modules.boto_elasticsearch_domain as boto_elasticsearch_domain
from salt.utils.versions import LooseVersion
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
# pylint: disable=import-error,no-name-in-module
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=import-error,no-name-in-module
# the boto_elasticsearch_domain module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto3_version = "1.2.1"
def _has_required_boto():
"""
Returns True/False boolean depending on if Boto is installed and correct
version.
"""
if not HAS_BOTO:
return False
elif LooseVersion(boto3.__version__) < LooseVersion(required_boto3_version):
return False
else:
return True
if _has_required_boto():
region = "us-east-1"
access_key = "GKTADJGHEIQSXMKKRBJ08H"
secret_key = "askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs"
conn_parameters = {
"region": region,
"key": access_key,
"keyid": secret_key,
"profile": {},
}
error_message = (
"An error occurred (101) when calling the {0} operation: Test-defined error"
)
error_content = {"Error": {"Code": 101, "Message": "Test-defined error"}}
not_found_error = ClientError(
{
"Error": {
"Code": "ResourceNotFoundException",
"Message": "Test-defined error",
}
},
"msg",
)
domain_ret = dict(
DomainName="testdomain",
ElasticsearchClusterConfig={},
EBSOptions={},
AccessPolicies={},
SnapshotOptions={},
AdvancedOptions={},
)
log = logging.getLogger(__name__)
class BotoElasticsearchDomainTestCaseBase(TestCase, LoaderModuleMockMixin):
conn = None
def setup_loader_modules(self):
self.opts = salt.config.DEFAULT_MINION_OPTS.copy()
utils = salt.loader.utils(
self.opts,
whitelist=["boto3", "args", "systemd", "path", "platform"],
context={},
)
return {boto_elasticsearch_domain: {"__utils__": utils}}
def setUp(self):
super().setUp()
boto_elasticsearch_domain.__init__(self.opts)
del self.opts
# Set up MagicMock to replace the boto3 session
# connections keep getting cached from prior tests, can't find the
# correct context object to clear it. So randomize the cache key, to prevent any
# cache hits
conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
self.patcher = patch("boto3.session.Session")
self.addCleanup(self.patcher.stop)
self.addCleanup(delattr, self, "patcher")
mock_session = self.patcher.start()
session_instance = mock_session.return_value
self.conn = MagicMock()
self.addCleanup(delattr, self, "conn")
session_instance.client.return_value = self.conn
class BotoElasticsearchDomainTestCaseMixin:
pass
# @skipIf(True, "Skip these tests while investigating failures")
@skipIf(HAS_BOTO is False, "The boto module must be installed.")
@skipIf(
_has_required_boto() is False,
"The boto3 module must be greater than or equal to version {}".format(
required_boto3_version
),
)
class BotoElasticsearchDomainTestCase(
BotoElasticsearchDomainTestCaseBase, BotoElasticsearchDomainTestCaseMixin
):
"""
TestCase for salt.modules.boto_elasticsearch_domain module
"""
def test_that_when_checking_if_a_domain_exists_and_a_domain_exists_the_domain_exists_method_returns_true(
self,
):
"""
Tests checking domain existence when the domain already exists
"""
result = boto_elasticsearch_domain.exists(
DomainName="testdomain", **conn_parameters
)
self.assertTrue(result["exists"])
def test_that_when_checking_if_a_domain_exists_and_a_domain_does_not_exist_the_domain_exists_method_returns_false(
self,
):
"""
Tests checking domain existence when the domain does not exist
"""
self.conn.describe_elasticsearch_domain.side_effect = not_found_error
result = boto_elasticsearch_domain.exists(
DomainName="mydomain", **conn_parameters
)
self.assertFalse(result["exists"])
def test_that_when_checking_if_a_domain_exists_and_boto3_returns_an_error_the_domain_exists_method_returns_error(
self,
):
"""
Tests checking domain existence when boto returns an error
"""
self.conn.describe_elasticsearch_domain.side_effect = ClientError(
error_content, "list_domains"
)
result = boto_elasticsearch_domain.exists(
DomainName="mydomain", **conn_parameters
)
self.assertEqual(
result.get("error", {}).get("message"), error_message.format("list_domains")
)
def test_that_when_checking_domain_status_and_a_domain_exists_the_domain_status_method_returns_info(
self,
):
"""
Tests checking domain existence when the domain already exists
"""
self.conn.describe_elasticsearch_domain.return_value = {
"DomainStatus": domain_ret
}
result = boto_elasticsearch_domain.status(
DomainName="testdomain", **conn_parameters
)
self.assertTrue(result["domain"])
def test_that_when_checking_domain_status_and_boto3_returns_an_error_the_domain_status_method_returns_error(
self,
):
"""
Tests checking domain existence when boto returns an error
"""
self.conn.describe_elasticsearch_domain.side_effect = ClientError(
error_content, "list_domains"
)
result = boto_elasticsearch_domain.status(
DomainName="mydomain", **conn_parameters
)
self.assertEqual(
result.get("error", {}).get("message"), error_message.format("list_domains")
)
def test_that_when_describing_domain_it_returns_the_dict_of_properties_returns_true(
self,
):
"""
Tests describing parameters if domain exists
"""
domainconfig = {}
for k, v in domain_ret.items():
if k == "DomainName":
continue
domainconfig[k] = {"Options": v}
self.conn.describe_elasticsearch_domain_config.return_value = {
"DomainConfig": domainconfig
}
result = boto_elasticsearch_domain.describe(
DomainName=domain_ret["DomainName"], **conn_parameters
)
log.warning(result)
desired_ret = copy.copy(domain_ret)
desired_ret.pop("DomainName")
self.assertEqual(result, {"domain": desired_ret})
def test_that_when_describing_domain_on_client_error_it_returns_error(self):
"""
Tests describing parameters failure
"""
self.conn.describe_elasticsearch_domain_config.side_effect = ClientError(
error_content, "list_domains"
)
result = boto_elasticsearch_domain.describe(
DomainName="testdomain", **conn_parameters
)
self.assertTrue("error" in result)
def test_that_when_creating_a_domain_succeeds_the_create_domain_method_returns_true(
self,
):
"""
tests True domain created.
"""
self.conn.create_elasticsearch_domain.return_value = {
"DomainStatus": domain_ret
}
args = copy.copy(domain_ret)
args.update(conn_parameters)
result = boto_elasticsearch_domain.create(**args)
self.assertTrue(result["created"])
def test_that_when_creating_a_domain_fails_the_create_domain_method_returns_error(
self,
):
"""
tests False domain not created.
"""
self.conn.create_elasticsearch_domain.side_effect = ClientError(
error_content, "create_domain"
)
args = copy.copy(domain_ret)
args.update(conn_parameters)
result = boto_elasticsearch_domain.create(**args)
self.assertEqual(
result.get("error", {}).get("message"),
error_message.format("create_domain"),
)
def test_that_when_deleting_a_domain_succeeds_the_delete_domain_method_returns_true(
self,
):
"""
tests True domain deleted.
"""
result = boto_elasticsearch_domain.delete(
DomainName="testdomain", **conn_parameters
)
self.assertTrue(result["deleted"])
def test_that_when_deleting_a_domain_fails_the_delete_domain_method_returns_false(
self,
):
"""
tests False domain not deleted.
"""
self.conn.delete_elasticsearch_domain.side_effect = ClientError(
error_content, "delete_domain"
)
result = boto_elasticsearch_domain.delete(
DomainName="testdomain", **conn_parameters
)
self.assertFalse(result["deleted"])
def test_that_when_updating_a_domain_succeeds_the_update_domain_method_returns_true(
self,
):
"""
tests True domain updated.
"""
self.conn.update_elasticsearch_domain_config.return_value = {
"DomainConfig": domain_ret
}
args = copy.copy(domain_ret)
args.update(conn_parameters)
result = boto_elasticsearch_domain.update(**args)
self.assertTrue(result["updated"])
def test_that_when_updating_a_domain_fails_the_update_domain_method_returns_error(
self,
):
"""
tests False domain not updated.
"""
self.conn.update_elasticsearch_domain_config.side_effect = ClientError(
error_content, "update_domain"
)
args = copy.copy(domain_ret)
args.update(conn_parameters)
result = boto_elasticsearch_domain.update(**args)
self.assertEqual(
result.get("error", {}).get("message"),
error_message.format("update_domain"),
)
def test_that_when_adding_tags_succeeds_the_add_tags_method_returns_true(self):
"""
tests True tags added.
"""
self.conn.describe_elasticsearch_domain.return_value = {
"DomainStatus": domain_ret
}
result = boto_elasticsearch_domain.add_tags(
DomainName="testdomain", a="b", **conn_parameters
)
self.assertTrue(result["tagged"])
def test_that_when_adding_tags_fails_the_add_tags_method_returns_false(self):
"""
tests False tags not added.
"""
self.conn.add_tags.side_effect = ClientError(error_content, "add_tags")
self.conn.describe_elasticsearch_domain.return_value = {
"DomainStatus": domain_ret
}
result = boto_elasticsearch_domain.add_tags(
DomainName=domain_ret["DomainName"], a="b", **conn_parameters
)
self.assertFalse(result["tagged"])
def test_that_when_removing_tags_succeeds_the_remove_tags_method_returns_true(self):
"""
tests True tags removed.
"""
self.conn.describe_elasticsearch_domain.return_value = {
"DomainStatus": domain_ret
}
result = boto_elasticsearch_domain.remove_tags(
DomainName=domain_ret["DomainName"], TagKeys=["a"], **conn_parameters
)
self.assertTrue(result["tagged"])
def test_that_when_removing_tags_fails_the_remove_tags_method_returns_false(self):
"""
tests False tags not removed.
"""
self.conn.remove_tags.side_effect = ClientError(error_content, "remove_tags")
self.conn.describe_elasticsearch_domain.return_value = {
"DomainStatus": domain_ret
}
result = boto_elasticsearch_domain.remove_tags(
DomainName=domain_ret["DomainName"], TagKeys=["b"], **conn_parameters
)
self.assertFalse(result["tagged"])
def test_that_when_listing_tags_succeeds_the_list_tags_method_returns_true(self):
"""
tests True tags listed.
"""
self.conn.describe_elasticsearch_domain.return_value = {
"DomainStatus": domain_ret
}
result = boto_elasticsearch_domain.list_tags(
DomainName=domain_ret["DomainName"], **conn_parameters
)
self.assertEqual(result["tags"], {})
def test_that_when_listing_tags_fails_the_list_tags_method_returns_false(self):
"""
tests False tags not listed.
"""
self.conn.list_tags.side_effect = ClientError(error_content, "list_tags")
self.conn.describe_elasticsearch_domain.return_value = {
"DomainStatus": domain_ret
}
result = boto_elasticsearch_domain.list_tags(
DomainName=domain_ret["DomainName"], **conn_parameters
)
self.assertTrue(result["error"])
| saltstack/salt | tests/unit/modules/test_boto_elasticsearch_domain.py | Python | apache-2.0 | 13,405 |
import unittest
import xml.parsers.expat
from .. import xmlcore
def is_valid_xml_data(data):
try:
xml.parsers.expat.ParserCreate("utf-8").Parse(data)
except xml.parsers.expat.ExpatError:
return False
return True
class TestElementParser(unittest.TestCase):
def test_text_inside_an_element_should_become_the_text_of_the_element(self):
parser = xmlcore.ElementParser()
for a in parser.feed("<root><a><b>test</b></a>"):
self.assertEqual(a.name, u"a")
self.assertEqual(a.text, u"")
self.assertEqual(a.tail, u"")
children = list(a.children())
self.assertEqual(len(children), 1)
self.assertEqual(children[0].name, u"b")
self.assertEqual(children[0].text, u"test")
self.assertEqual(children[0].tail, u"")
def test_text_before_the_first_child_element_should_be_the_text_of_the_parent(self):
parser = xmlcore.ElementParser()
for a in parser.feed("<root><a>test<b /></a>"):
self.assertEqual(a.name, u"a")
self.assertEqual(a.text, u"test")
self.assertEqual(a.tail, u"")
children = list(a.children())
self.assertEqual(len(children), 1)
self.assertEqual(children[0].name, u"b")
self.assertEqual(children[0].text, u"")
self.assertEqual(children[0].tail, u"")
def test_text_after_the_last_child_element_should_be_the_tail_of_the_child_element(self):
parser = xmlcore.ElementParser()
for a in parser.feed("<root><a><b />test</a>"):
self.assertEqual(a.name, u"a")
self.assertEqual(a.text, u"")
self.assertEqual(a.tail, u"")
self.assertEqual(len(a.children()), 1)
children = list(a.children())
self.assertEqual(len(children), 1)
self.assertEqual(children[0].name, u"b")
self.assertEqual(children[0].text, u"")
self.assertEqual(children[0].tail, u"test")
def test_text_between_two_child_elements_should_be_the_tail_of_the_first_child_element(self):
parser = xmlcore.ElementParser()
for a in parser.feed("<root><a><b />test<c /></a>"):
self.assertEqual(a.text, u"")
self.assertEqual(a.tail, u"")
children = list(a.children())
self.assertEqual(len(children), 2)
self.assertEqual(children[0].name, u"b")
self.assertEqual(children[0].text, u"")
self.assertEqual(children[0].tail, u"test")
self.assertEqual(children[1].name, u"c")
self.assertEqual(children[1].text, u"")
self.assertEqual(children[1].tail, u"")
class TestCharacterSet(unittest.TestCase):
NON_XML_RANGES = [(0x0, 0x9), (0xb, 0xd), (0xe, 0x20), (0xd800, 0xe000), (0xfffe, 0x10000)]
NON_XML_STRINGS = [unichr(x) for start, end in NON_XML_RANGES for x in xrange(start, end)]
NON_XML_STRINGS += [u"\ud800\ud800", u"\ud800\U00100000"]
def test_raise_on_non_xml_chars_in_text(self):
for x in self.NON_XML_STRINGS:
element = xmlcore.Element("name")
self.assertRaises(ValueError, setattr, element, "text", x)
def test_accept_wide_unicode_chars_in_text(self):
element = xmlcore.Element("name")
element.text = u"\U00100000"
assert is_valid_xml_data(element.serialize())
element = xmlcore.Element("name")
element.text = u"\udbc0\udc00"
assert is_valid_xml_data(element.serialize())
def test_raise_on_non_xml_chars_in_tail(self):
for x in self.NON_XML_STRINGS:
element = xmlcore.Element("name")
self.assertRaises(ValueError, setattr, element, "tail", x)
def test_accept_wide_unicode_chars_in_tail(self):
element = xmlcore.Element("name")
wide = xmlcore.Element("inner")
wide.tail = u"\U00100000"
element.add(wide)
surrogate = xmlcore.Element("surrogate")
surrogate.tail = u"\udbc0\udc00"
element.add(surrogate)
assert is_valid_xml_data(element.serialize())
def test_raise_on_non_xml_chars_in_name(self):
for x in self.NON_XML_STRINGS:
self.assertRaises(ValueError, xmlcore.Element, x)
def test_raise_on_non_xml_chars_in_attr_key(self):
for x in self.NON_XML_STRINGS:
element = xmlcore.Element("name")
self.assertRaises(ValueError, element.set_attr, x, "value")
def test_raise_on_non_xml_chars_in_attr_value(self):
for x in self.NON_XML_STRINGS:
element = xmlcore.Element("name")
self.assertRaises(ValueError, element.set_attr, "key", x)
class TestEncoding(unittest.TestCase):
def test_escape(self):
element = xmlcore.Element("name")
element.text = "<&>"
assert is_valid_xml_data(element.serialize())
class TestElementNamespaces(unittest.TestCase):
def test_default_ns(self):
element = xmlcore.Element("name", xmlns="default_ns")
element.set_attr("xmlns:other", "other_ns")
assert element.name == "name"
assert element.ns == "default_ns"
def test_non_default_ns(self):
element = xmlcore.Element("other:name", xmlns="default_ns")
element.set_attr("xmlns:other", "other_ns")
assert element.name == "name"
assert element.ns == "other_ns"
def test_default_ns_inheritance(self):
parent = xmlcore.Element("parent", xmlns="default_ns")
child = xmlcore.Element("child")
parent.add(child)
assert child.name == "child"
assert child.ns == "default_ns"
def test_non_default_ns_inheritance(self):
parent = xmlcore.Element("parent", xmlns="default_ns")
parent.set_attr("xmlns:other", "other_ns")
child = xmlcore.Element("other:child")
parent.add(child)
assert child.name == "child"
assert child.ns == "other_ns"
def test_default_ns_inheritance_vs_gc(self):
import gc
parent = xmlcore.Element("parent", xmlns="default_ns")
child = xmlcore.Element("child")
parent.add(child)
del parent
gc.collect()
assert child.ns == "default_ns"
assert not child.with_attrs("xmlns")
| abusesa/idiokit | idiokit/tests/test_xmlcore.py | Python | mit | 6,318 |
class A(object):
pass
class B(A):
pass
def some(a):
"""
:type a: sample.supertypes.original.A
:rtype: list of sample.supertypes.original.A
"""
return [a]
def another(a):
"""
:type a: dict of sample.supertypes.original.A,int
:rtype: dict of sample.supertypes.original.A,int
"""
return a
def yet_another(a):
"""
:type a: dict of int,sample.supertypes.original.A
:rtype: dict of int,sample.supertypes.original.A
"""
return a
def empty(a):
"""
:type a: dict of sample.supertypes.original.A,int
:rtype: dict of sample.supertypes.original.A,int
"""
return a
| markuswissinger/ducktestpy | sample/supertypes/expected_sphinx.py | Python | apache-2.0 | 650 |
cancelService = True
cancelServiceOnAnniversaryDate = True
cancelItem = True
| cloudify-cosmo/softlayer-python | SoftLayer/testing/fixtures/SoftLayer_Billing_Item.py | Python | mit | 77 |
#!/usr/bin/env python
"""
A convinience wrapper around mysql connector.
This file is part of open-ihm.
open-ihm is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
open-ihm is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with open-ihm. If not, see <http://www.gnu.org/licenses/>.
"""
import includes.mysql.connector as connector
import data.database
# refactored to remove duplicate code while
# providing same interface as before.
class Database(data.database.Database):
pass
| snim2mirror/openihm | src/openihm/model/database.py | Python | lgpl-3.0 | 949 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
name_index = sqlalchemy.Index('ix_stack_owner_id', stack.c.owner_id,
mysql_length=36)
name_index.create(migrate_engine)
| cwolferh/heat-scratch | heat/db/sqlalchemy/migrate_repo/versions/071_stack_owner_id_index.py | Python | apache-2.0 | 897 |
from pyramid.view import view_config
# @view_config(route_name='home', renderer='templates/mytemplate.pt')
@view_config(route_name='home', renderer='templates/index.pt')
def my_view(request):
return {'project': 'KatFud'}
| ferdy-lw/katfud-5000 | KatFud/katfud/views.py | Python | gpl-3.0 | 227 |
from skrf.vi.vna import keysight_pna
class Analyzer(keysight_pna.PNAX):
DEFAULT_VISA_ADDRESS = "GPIB::16::INSTR"
NAME = "Agilent N5247A"
NPORTS = 4
NCHANNELS = 32
SCPI_VERSION_TESTED = 'A.09.80.20'
| temmeand/scikit-rf | qtapps/skrf_qtwidgets/analyzers/analyzer_agilent_n5247a.py | Python | bsd-3-clause | 220 |
import typing
import pytest
import abjad
values: typing.List[typing.Tuple] = []
values.extend(
[
(-24, -12, "-P8"),
(-23, -11, "-M7"),
(-22, -10, "-m7"),
(-21, -9, "-M6"),
(-20, -8, "-m6"),
(-19, -7, "-P5"),
(-18, -6, "-d5"),
(-17, -5, "-P4"),
(-16, -4, "-M3"),
(-15, -3, "-m3"),
(-14, -2, "-M2"),
(-13, -1, "-m2"),
(-12, -12, "-P8"),
(-11, -11, "-M7"),
(-10, -10, "-m7"),
(-9, -9, "-M6"),
(-8, -8, "-m6"),
(-7, -7, "-P5"),
(-6, -6, "-d5"),
(-5, -5, "-P4"),
(-4, -4, "-M3"),
(-3, -3, "-m3"),
(-2, -2, "-M2"),
(-1, -1, "-m2"),
(0, 0, "P1"),
(1, 1, "+m2"),
(2, 2, "+M2"),
(3, 3, "+m3"),
(4, 4, "+M3"),
(5, 5, "+P4"),
(6, 6, "+d5"),
(7, 7, "+P5"),
(8, 8, "+m6"),
(9, 9, "+M6"),
(10, 10, "+m7"),
(11, 11, "+M7"),
(12, 12, "+P8"),
(13, 1, "+m2"),
(14, 2, "+M2"),
(15, 3, "+m3"),
(16, 4, "+M3"),
(17, 5, "+P4"),
(18, 6, "+d5"),
(19, 7, "+P5"),
(20, 8, "+m6"),
(21, 9, "+M6"),
(22, 10, "+m7"),
(23, 11, "+M7"),
(24, 12, "+P8"),
]
)
values.extend(
[
("-A1", -1, "-A1"),
("-A10", -5, "-A3"),
("-A11", -6, "-A4"),
("-A12", -8, "-A5"),
("-A13", -10, "-A6"),
("-A14", -12, "-A7"),
("-A2", -3, "-A2"),
("-A3", -5, "-A3"),
("-A4", -6, "-A4"),
("-A5", -8, "-A5"),
("-A6", -10, "-A6"),
("-A7", -12, "-A7"),
("-A8", -1, "-A1"),
("-A9", -3, "-A2"),
("-AA1", -2, "-AA1"),
("-AA10", -6, "-AA3"),
("-AA11", -7, "-AA4"),
("-AA12", -9, "-AA5"),
("-AA13", -11, "-AA6"),
("-AA14", -1, "-AA7"),
("-AA2", -4, "-AA2"),
("-AA3", -6, "-AA3"),
("-AA4", -7, "-AA4"),
("-AA5", -9, "-AA5"),
("-AA6", -11, "-AA6"),
("-AA7", -1, "-AA7"),
("-AA8", -2, "-AA1"),
("-AA9", -4, "-AA2"),
("-AAA1", -3, "-AAA1"),
("-AAA10", -7, "-AAA3"),
("-AAA11", -8, "-AAA4"),
("-AAA12", -10, "-AAA5"),
("-AAA13", -12, "-AAA6"),
("-AAA14", -2, "-AAA7"),
("-AAA2", -5, "-AAA2"),
("-AAA3", -7, "-AAA3"),
("-AAA4", -8, "-AAA4"),
("-AAA5", -10, "-AAA5"),
("-AAA6", -12, "-AAA6"),
("-AAA7", -2, "-AAA7"),
("-AAA8", -3, "-AAA1"),
("-AAA9", -5, "-AAA2"),
("-M10", -4, "-M3"),
("-M13", -9, "-M6"),
("-M2", -2, "-M2"),
("-M3", -4, "-M3"),
("-M6", -9, "-M6"),
("-M7", -11, "-M7"),
("-M9", -2, "-M2"),
("-P1", -0, "P1"),
("-P11", -5, "-P4"),
("-P12", -7, "-P5"),
("-P15", -12, "-P8"),
("-P4", -5, "-P4"),
("-P5", -7, "-P5"),
("-P8", -12, "-P8"),
("-d1", -1, "-d1"),
("-d10", -2, "-d3"),
("-d11", -4, "-d4"),
("-d12", -6, "-d5"),
("-d13", -7, "-d6"),
("-d14", -9, "-d7"),
("-d2", -0, "-d2"),
("-d3", -2, "-d3"),
("-d4", -4, "-d4"),
("-d5", -6, "-d5"),
("-d6", -7, "-d6"),
("-d7", -9, "-d7"),
("-d8", 1, "+d1"),
("-d9", 0, "-d2"),
("-dd1", -2, "-dd1"),
("-dd10", -1, "-dd3"),
("-dd11", -3, "-dd4"),
("-dd12", -5, "-dd5"),
("-dd13", -6, "-dd6"),
("-dd14", -8, "-dd7"),
("-dd2", 1, "-dd2"),
("-dd3", -1, "-dd3"),
("-dd4", -3, "-dd4"),
("-dd5", -5, "-dd5"),
("-dd6", -6, "-dd6"),
("-dd7", -8, "-dd7"),
("-dd8", 2, "+dd1"),
("-dd9", 1, "-dd2"),
("-ddd1", -3, "-ddd1"),
("-ddd11", -2, "-ddd4"),
("-ddd12", -4, "-ddd5"),
("-ddd13", -5, "-ddd6"),
("-ddd14", -7, "-ddd7"),
("-ddd2", 2, "-ddd2"),
("-ddd3", 0, "-ddd3"),
("-ddd4", -2, "-ddd4"),
("-ddd5", -4, "-ddd5"),
("-ddd6", -5, "-ddd6"),
("-ddd7", -7, "-ddd7"),
("-ddd8", 3, "+ddd1"),
("-ddd9", 2, "-ddd2"),
("-m10", -3, "-m3"),
("-m13", -8, "-m6"),
("-m14", -10, "-m7"),
("-m2", -1, "-m2"),
("-m3", -3, "-m3"),
("-m6", -8, "-m6"),
("-m7", -10, "-m7"),
("-m9", -1, "-m2"),
("A1", 1, "+A1"),
("A10", 5, "+A3"),
("A11", 6, "+A4"),
("A12", 8, "+A5"),
("A13", 10, "+A6"),
("A14", 12, "+A7"),
("A2", 3, "+A2"),
("A3", 5, "+A3"),
("A4", 6, "+A4"),
("A5", 8, "+A5"),
("A6", 10, "+A6"),
("A7", 12, "+A7"),
("A8", 1, "+A1"),
("A9", 3, "+A2"),
("AA1", 2, "+AA1"),
("AA10", 6, "+AA3"),
("AA11", 7, "+AA4"),
("AA12", 9, "+AA5"),
("AA13", 11, "+AA6"),
("AA14", 1, "+AA7"),
("AA2", 4, "+AA2"),
("AA3", 6, "+AA3"),
("AA4", 7, "+AA4"),
("AA5", 9, "+AA5"),
("AA6", 11, "+AA6"),
("AA7", 1, "+AA7"),
("AA8", 2, "+AA1"),
("AA9", 4, "+AA2"),
("AAA1", 3, "+AAA1"),
("AAA10", 7, "+AAA3"),
("AAA11", 8, "+AAA4"),
("AAA12", 10, "+AAA5"),
("AAA13", 12, "+AAA6"),
("AAA14", 2, "+AAA7"),
("AAA2", 5, "+AAA2"),
("AAA3", 7, "+AAA3"),
("AAA4", 8, "+AAA4"),
("AAA5", 10, "+AAA5"),
("AAA6", 12, "+AAA6"),
("AAA7", 2, "+AAA7"),
("AAA8", 3, "+AAA1"),
("AAA9", 5, "+AAA2"),
("M10", 4, "+M3"),
("M13", 9, "+M6"),
("M14", 11, "+M7"),
("M2", 2, "+M2"),
("M3", 4, "+M3"),
("M6", 9, "+M6"),
("M7", 11, "+M7"),
("M9", 2, "+M2"),
("P1", 0, "P1"),
("P11", 5, "+P4"),
("P12", 7, "+P5"),
("P15", 12, "+P8"),
("P4", 5, "+P4"),
("P5", 7, "+P5"),
("P8", 12, "+P8"),
("d1", 1, "+d1"),
("d10", 2, "+d3"),
("d11", 4, "+d4"),
("d12", 6, "+d5"),
("d13", 7, "+d6"),
("d14", 9, "+d7"),
("d2", 0, "+d2"),
("d3", 2, "+d3"),
("d4", 4, "+d4"),
("d5", 6, "+d5"),
("d6", 7, "+d6"),
("d7", 9, "+d7"),
("d8", -1, "-d1"),
("d9", 0, "+d2"),
("dd1", 2, "+dd1"),
("dd10", 1, "+dd3"),
("dd11", 3, "+dd4"),
("dd12", 5, "+dd5"),
("dd13", 6, "+dd6"),
("dd14", 8, "+dd7"),
("dd2", -1, "+dd2"),
("dd3", 1, "+dd3"),
("dd4", 3, "+dd4"),
("dd5", 5, "+dd5"),
("dd6", 6, "+dd6"),
("dd7", 8, "+dd7"),
("dd8", -2, "-dd1"),
("dd9", -1, "+dd2"),
("ddd1", 3, "+ddd1"),
("ddd10", 0, "+ddd3"),
("ddd11", 2, "+ddd4"),
("ddd12", 4, "+ddd5"),
("ddd13", 5, "+ddd6"),
("ddd14", 7, "+ddd7"),
("ddd2", -2, "+ddd2"),
("ddd3", 0, "+ddd3"),
("ddd4", 2, "+ddd4"),
("ddd5", 4, "+ddd5"),
("ddd6", 5, "+ddd6"),
("ddd7", 7, "+ddd7"),
("ddd8", -3, "-ddd1"),
("ddd9", -2, "+ddd2"),
("m10", 3, "+m3"),
("m13", 8, "+m6"),
("m14", 10, "+m7"),
("m2", 1, "+m2"),
("m3", 3, "+m3"),
("m6", 8, "+m6"),
("m7", 10, "+m7"),
("m9", 1, "+m2"),
]
)
values.extend(
[
(("M", 1), ValueError, None),
(("M", 4), ValueError, None),
(("M", 5), ValueError, None),
(("P", 2), ValueError, None),
(("P", 3), ValueError, None),
(("P", 6), ValueError, None),
(("P", 7), ValueError, None),
(("m", 1), ValueError, None),
(("m", 4), ValueError, None),
(("m", 5), ValueError, None),
]
)
@pytest.mark.parametrize("input_, semitones, name", values)
def test_init(input_, semitones, name):
class_ = abjad.NamedIntervalClass
if isinstance(semitones, type) and issubclass(semitones, Exception):
with pytest.raises(semitones):
class_(input_)
return
instance = class_(input_)
assert float(instance) == semitones
assert instance.name == name
abjad.NamedInterval(instance)
abjad.NamedIntervalClass(instance)
abjad.NumberedInterval(instance)
abjad.NumberedIntervalClass(instance)
| Abjad/abjad | tests/test_NamedIntervalClass.py | Python | gpl-3.0 | 8,475 |
#
# randpool.py : Cryptographically strong random number generation
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: randpool.py,v 1.14 2004/05/06 12:56:54 akuchling Exp $"
import time, array, types, warnings, os.path
from number import long_to_bytes
try:
import Crypto.Util.winrandom as winrandom
except:
winrandom = None
STIRNUM = 3
class RandomPool:
"""randpool.py : Cryptographically strong random number generation.
The implementation here is similar to the one in PGP. To be
cryptographically strong, it must be difficult to determine the RNG's
output, whether in the future or the past. This is done by using
a cryptographic hash function to "stir" the random data.
Entropy is gathered in the same fashion as PGP; the highest-resolution
clock around is read and the data is added to the random number pool.
A conservative estimate of the entropy is then kept.
If a cryptographically secure random source is available (/dev/urandom
on many Unixes, Windows CryptGenRandom on most Windows), then use
it.
Instance Attributes:
bits : int
Maximum size of pool in bits
bytes : int
Maximum size of pool in bytes
entropy : int
Number of bits of entropy in this pool.
Methods:
add_event([s]) : add some entropy to the pool
get_bytes(int) : get N bytes of random data
randomize([N]) : get N bytes of randomness from external source
"""
def __init__(self, numbytes = 160, cipher=None, hash=None):
if hash is None:
from hashlib import sha1 as hash
# The cipher argument is vestigial; it was removed from
# version 1.1 so RandomPool would work even in the limited
# exportable subset of the code
if cipher is not None:
warnings.warn("'cipher' parameter is no longer used")
if isinstance(hash, types.StringType):
# ugly hack to force __import__ to give us the end-path module
hash = __import__('Crypto.Hash.'+hash,
None, None, ['new'])
warnings.warn("'hash' parameter should now be a hashing module")
self.bytes = numbytes
self.bits = self.bytes*8
self.entropy = 0
self._hash = hash
# Construct an array to hold the random pool,
# initializing it to 0.
self._randpool = array.array('B', [0]*self.bytes)
self._event1 = self._event2 = 0
self._addPos = 0
self._getPos = hash().digest_size
self._lastcounter=time.time()
self.__counter = 0
self._measureTickSize() # Estimate timer resolution
self._randomize()
def _updateEntropyEstimate(self, nbits):
self.entropy += nbits
if self.entropy < 0:
self.entropy = 0
elif self.entropy > self.bits:
self.entropy = self.bits
def _randomize(self, N = 0, devname = '/dev/urandom'):
"""_randomize(N, DEVNAME:device-filepath)
collects N bits of randomness from some entropy source (e.g.,
/dev/urandom on Unixes that have it, Windows CryptoAPI
CryptGenRandom, etc)
DEVNAME is optional, defaults to /dev/urandom. You can change it
to /dev/random if you want to block till you get enough
entropy.
"""
data = ''
if N <= 0:
nbytes = int((self.bits - self.entropy)/8+0.5)
else:
nbytes = int(N/8+0.5)
if winrandom:
# Windows CryptGenRandom provides random data.
data = winrandom.new().get_bytes(nbytes)
# GAE fix, benadida
#elif os.path.exists(devname):
# # Many OSes support a /dev/urandom device
# try:
# f=open(devname)
# data=f.read(nbytes)
# f.close()
# except IOError, (num, msg):
# if num!=2: raise IOError, (num, msg)
# # If the file wasn't found, ignore the error
if data:
self._addBytes(data)
# Entropy estimate: The number of bits of
# data obtained from the random source.
self._updateEntropyEstimate(8*len(data))
self.stir_n() # Wash the random pool
def randomize(self, N=0):
"""randomize(N:int)
use the class entropy source to get some entropy data.
This is overridden by KeyboardRandomize().
"""
return self._randomize(N)
def stir_n(self, N = STIRNUM):
"""stir_n(N)
stirs the random pool N times
"""
for i in xrange(N):
self.stir()
def stir (self, s = ''):
"""stir(s:string)
Mix up the randomness pool. This will call add_event() twice,
but out of paranoia the entropy attribute will not be
increased. The optional 's' parameter is a string that will
be hashed with the randomness pool.
"""
entropy=self.entropy # Save inital entropy value
self.add_event()
# Loop over the randomness pool: hash its contents
# along with a counter, and add the resulting digest
# back into the pool.
for i in range(self.bytes / self._hash().digest_size):
h = self._hash(self._randpool)
h.update(str(self.__counter) + str(i) + str(self._addPos) + s)
self._addBytes( h.digest() )
self.__counter = (self.__counter + 1) & 0xFFFFffffL
self._addPos, self._getPos = 0, self._hash().digest_size
self.add_event()
# Restore the old value of the entropy.
self.entropy=entropy
def get_bytes (self, N):
"""get_bytes(N:int) : string
Return N bytes of random data.
"""
s=''
i, pool = self._getPos, self._randpool
h=self._hash()
dsize = self._hash().digest_size
num = N
while num > 0:
h.update( self._randpool[i:i+dsize] )
s = s + h.digest()
num = num - dsize
i = (i + dsize) % self.bytes
if i<dsize:
self.stir()
i=self._getPos
self._getPos = i
self._updateEntropyEstimate(- 8*N)
return s[:N]
def add_event(self, s=''):
"""add_event(s:string)
Add an event to the random pool. The current time is stored
between calls and used to estimate the entropy. The optional
's' parameter is a string that will also be XORed into the pool.
Returns the estimated number of additional bits of entropy gain.
"""
event = time.time()*1000
delta = self._noise()
s = (s + long_to_bytes(event) +
4*chr(0xaa) + long_to_bytes(delta) )
self._addBytes(s)
if event==self._event1 and event==self._event2:
# If events are coming too closely together, assume there's
# no effective entropy being added.
bits=0
else:
# Count the number of bits in delta, and assume that's the entropy.
bits=0
while delta:
delta, bits = delta>>1, bits+1
if bits>8: bits=8
self._event1, self._event2 = event, self._event1
self._updateEntropyEstimate(bits)
return bits
# Private functions
def _noise(self):
# Adds a bit of noise to the random pool, by adding in the
# current time and CPU usage of this process.
# The difference from the previous call to _noise() is taken
# in an effort to estimate the entropy.
t=time.time()
delta = (t - self._lastcounter)/self._ticksize*1e6
self._lastcounter = t
self._addBytes(long_to_bytes(long(1000*time.time())))
self._addBytes(long_to_bytes(long(1000*time.clock())))
self._addBytes(long_to_bytes(long(1000*time.time())))
self._addBytes(long_to_bytes(long(delta)))
# Reduce delta to a maximum of 8 bits so we don't add too much
# entropy as a result of this call.
delta=delta % 0xff
return int(delta)
def _measureTickSize(self):
# _measureTickSize() tries to estimate a rough average of the
# resolution of time that you can see from Python. It does
# this by measuring the time 100 times, computing the delay
# between measurements, and taking the median of the resulting
# list. (We also hash all the times and add them to the pool)
interval = [None] * 100
h = self._hash(`(id(self),id(interval))`)
# Compute 100 differences
t=time.time()
h.update(`t`)
i = 0
j = 0
while i < 100:
t2=time.time()
h.update(`(i,j,t2)`)
j += 1
delta=int((t2-t)*1e6)
if delta:
interval[i] = delta
i += 1
t=t2
# Take the median of the array of intervals
interval.sort()
self._ticksize=interval[len(interval)/2]
h.update(`(interval,self._ticksize)`)
# mix in the measurement times and wash the random pool
self.stir(h.digest())
def _addBytes(self, s):
"XOR the contents of the string S into the random pool"
i, pool = self._addPos, self._randpool
for j in range(0, len(s)):
pool[i]=pool[i] ^ ord(s[j])
i=(i+1) % self.bytes
self._addPos = i
# Deprecated method names: remove in PCT 2.1 or later.
def getBytes(self, N):
warnings.warn("getBytes() method replaced by get_bytes()",
DeprecationWarning)
return self.get_bytes(N)
def addEvent (self, event, s=""):
warnings.warn("addEvent() method replaced by add_event()",
DeprecationWarning)
return self.add_event(s + str(event))
class PersistentRandomPool (RandomPool):
def __init__ (self, filename=None, *args, **kwargs):
RandomPool.__init__(self, *args, **kwargs)
self.filename = filename
if filename:
try:
# the time taken to open and read the file might have
# a little disk variability, modulo disk/kernel caching...
f=open(filename, 'rb')
self.add_event()
data = f.read()
self.add_event()
# mix in the data from the file and wash the random pool
self.stir(data)
f.close()
except IOError:
# Oh, well; the file doesn't exist or is unreadable, so
# we'll just ignore it.
pass
def save(self):
if self.filename == "":
raise ValueError, "No filename set for this object"
# wash the random pool before save, provides some forward secrecy for
# old values of the pool.
self.stir_n()
f=open(self.filename, 'wb')
self.add_event()
f.write(self._randpool.tostring())
f.close()
self.add_event()
# wash the pool again, provide some protection for future values
self.stir()
# non-echoing Windows keyboard entry
_kb = 0
if not _kb:
try:
import msvcrt
class KeyboardEntry:
def getch(self):
c = msvcrt.getch()
if c in ('\000', '\xe0'):
# function key
c += msvcrt.getch()
return c
def close(self, delay = 0):
if delay:
time.sleep(delay)
while msvcrt.kbhit():
msvcrt.getch()
_kb = 1
except:
pass
# non-echoing Posix keyboard entry
if not _kb:
try:
import termios
class KeyboardEntry:
def __init__(self, fd = 0):
self._fd = fd
self._old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3]=new[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, new)
def getch(self):
termios.tcflush(0, termios.TCIFLUSH) # XXX Leave this in?
return os.read(self._fd, 1)
def close(self, delay = 0):
if delay:
time.sleep(delay)
termios.tcflush(self._fd, termios.TCIFLUSH)
termios.tcsetattr(self._fd, termios.TCSAFLUSH, self._old)
_kb = 1
except:
pass
class KeyboardRandomPool (PersistentRandomPool):
def __init__(self, *args, **kwargs):
PersistentRandomPool.__init__(self, *args, **kwargs)
def randomize(self, N = 0):
"Adds N bits of entropy to random pool. If N is 0, fill up pool."
import os, string, time
if N <= 0:
bits = self.bits - self.entropy
else:
bits = N*8
if bits == 0:
return
print bits,'bits of entropy are now required. Please type on the keyboard'
print 'until enough randomness has been accumulated.'
kb = KeyboardEntry()
s='' # We'll save the characters typed and add them to the pool.
hash = self._hash
e = 0
try:
while e < bits:
temp=str(bits-e).rjust(6)
os.write(1, temp)
s=s+kb.getch()
e += self.add_event(s)
os.write(1, 6*chr(8))
self.add_event(s+hash.new(s).digest() )
finally:
kb.close()
print '\n\007 Enough. Please wait a moment.\n'
self.stir_n() # wash the random pool.
kb.close(4)
if __name__ == '__main__':
pool = RandomPool()
print 'random pool entropy', pool.entropy, 'bits'
pool.add_event('something')
print `pool.get_bytes(100)`
import tempfile, os
fname = tempfile.mktemp()
pool = KeyboardRandomPool(filename=fname)
print 'keyboard random pool entropy', pool.entropy, 'bits'
pool.randomize()
print 'keyboard random pool entropy', pool.entropy, 'bits'
pool.randomize(128)
pool.save()
saved = open(fname, 'rb').read()
print 'saved', `saved`
print 'pool ', `pool._randpool.tostring()`
newpool = PersistentRandomPool(fname)
print 'persistent random pool entropy', pool.entropy, 'bits'
os.remove(fname)
| stanley89/helios-server | helios/crypto/randpool.py | Python | apache-2.0 | 14,786 |
""" Tests for tinypages build using sphinx extensions """
import filecmp
from os.path import join as pjoin, dirname, isdir
import shutil
from subprocess import call, Popen, PIPE
import sys
import tempfile
import pytest
from matplotlib import cbook
HERE = dirname(__file__)
TINY_PAGES = pjoin(HERE, 'tinypages')
def setup_module():
"""Check we have a recent enough version of sphinx installed.
"""
ret = call([sys.executable, '-msphinx', '--help'],
stdout=PIPE, stderr=PIPE)
if ret != 0:
raise RuntimeError(
"'{} -msphinx' does not return 0".format(sys.executable))
@cbook.deprecated("2.1", alternative="filecmp.cmp")
def file_same(file1, file2):
with open(file1, 'rb') as fobj:
contents1 = fobj.read()
with open(file2, 'rb') as fobj:
contents2 = fobj.read()
return contents1 == contents2
class TestTinyPages(object):
"""Test build and output of tinypages project"""
@classmethod
def setup_class(cls):
cls.page_build = tempfile.mkdtemp()
try:
cls.html_dir = pjoin(cls.page_build, 'html')
cls.doctree_dir = pjoin(cls.page_build, 'doctrees')
# Build the pages with warnings turned into errors
cmd = [sys.executable, '-msphinx', '-W', '-b', 'html',
'-d', cls.doctree_dir,
TINY_PAGES,
cls.html_dir]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise RuntimeError(
"'{} -msphinx' failed with stdout:\n{}\nstderr:\n{}\n"
.format(sys.executable, out, err))
except Exception as e:
shutil.rmtree(cls.page_build)
raise e
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.page_build)
def test_some_plots(self):
assert isdir(self.html_dir)
def plot_file(num):
return pjoin(self.html_dir, 'some_plots-{0}.png'.format(num))
range_10, range_6, range_4 = [plot_file(i) for i in range(1, 4)]
# Plot 5 is range(6) plot
assert filecmp.cmp(range_6, plot_file(5))
# Plot 7 is range(4) plot
assert filecmp.cmp(range_4, plot_file(7))
# Plot 11 is range(10) plot
assert filecmp.cmp(range_10, plot_file(11))
# Plot 12 uses the old range(10) figure and the new range(6) figure
assert filecmp.cmp(range_10, plot_file('12_00'))
assert filecmp.cmp(range_6, plot_file('12_01'))
# Plot 13 shows close-figs in action
assert filecmp.cmp(range_4, plot_file(13))
# Plot 14 has included source
with open(pjoin(self.html_dir, 'some_plots.html'), 'rb') as fobj:
html_contents = fobj.read()
assert b'# Only a comment' in html_contents
# check plot defined in external file.
assert filecmp.cmp(range_4, pjoin(self.html_dir, 'range4.png'))
assert filecmp.cmp(range_6, pjoin(self.html_dir, 'range6.png'))
# check if figure caption made it into html file
assert b'This is the caption for plot 15.' in html_contents
| jonyroda97/redbot-amigosprovaveis | lib/matplotlib/sphinxext/tests/test_tinypages.py | Python | gpl-3.0 | 3,203 |
# Copyright 2010 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import larch
class FrozenNodeTests(unittest.TestCase):
def test_node_id_is_in_error_message(self):
node = larch.nodes.Node(123, [], [])
e = larch.FrozenNode(node)
self.assert_('123' in str(e))
class NodeTests(unittest.TestCase):
def setUp(self):
self.node_id = 12765
self.pairs = [('key2', 'value2'), ('key1', 'value1')]
self.pairs.sort()
self.keys = [k for k, v in self.pairs]
self.values = [v for k, v in self.pairs]
self.node = larch.nodes.Node(self.node_id, self.keys, self.values)
def test_has_id(self):
self.assertEqual(self.node.id, self.node_id)
def test_empty_node_is_still_true(self):
empty = larch.nodes.Node(self.node_id, [], [])
self.assert_(empty)
def test_has_no_size(self):
self.assertEqual(self.node.size, None)
def test_has_each_pair(self):
for key, value in self.pairs:
self.assertEqual(self.node[key], value)
def test_raises_keyerror_for_missing_key(self):
self.assertRaises(KeyError, self.node.__getitem__, 'notexist')
def test_contains_each_key(self):
for key, value in self.pairs:
self.assert_(key in self.node)
def test_does_not_contain_wrong_key(self):
self.assertFalse('notexist' in self.node)
def test_is_equal_to_itself(self):
self.assert_(self.node == self.node)
def test_iterates_over_all_keys(self):
self.assertEqual([k for k in self.node],
sorted(k for k, v in self.pairs))
def test_has_correct_length(self):
self.assertEqual(len(self.node), len(self.pairs))
def test_has_keys(self):
self.assertEqual(self.node.keys(), sorted(k for k, v in self.pairs))
def test_sorts_keys(self):
self.assertEqual(self.node.keys(), sorted(k for k, v in self.pairs))
def test_has_values(self):
self.assertEqual(self.node.values(),
[v for k, v in sorted(self.pairs)])
def test_returns_correct_first_key(self):
self.assertEqual(self.node.first_key(), 'key1')
def test_returns_keys_and_values(self):
self.assertEqual(self.node.keys(), self.keys)
self.assertEqual(self.node.values(), self.values)
def test_adds_key_value_pair_to_empty_node(self):
node = larch.nodes.Node(0, [], [])
node.add('foo', 'bar')
self.assertEqual(node.keys(), ['foo'])
self.assertEqual(node.values(), ['bar'])
self.assertEqual(node['foo'], 'bar')
def test_adds_key_value_pair_to_end_of_node_of_one_element(self):
node = larch.nodes.Node(0, ['foo'], ['bar'])
node.add('foo2', 'bar2')
self.assertEqual(node.keys(), ['foo', 'foo2'])
self.assertEqual(node.values(), ['bar', 'bar2'])
self.assertEqual(node['foo2'], 'bar2')
def test_adds_key_value_pair_to_beginning_of_node_of_one_element(self):
node = larch.nodes.Node(0, ['foo'], ['bar'])
node.add('bar', 'bar')
self.assertEqual(node.keys(), ['bar', 'foo'])
self.assertEqual(node.values(), ['bar', 'bar'])
self.assertEqual(node['bar'], 'bar')
def test_adds_key_value_pair_to_middle_of_node_of_two_elements(self):
node = larch.nodes.Node(0, ['bar', 'foo'], ['bar', 'bar'])
node.add('duh', 'bar')
self.assertEqual(node.keys(), ['bar', 'duh', 'foo'])
self.assertEqual(node.values(), ['bar', 'bar', 'bar'])
self.assertEqual(node['duh'], 'bar')
def test_add_replaces_value_for_existing_key(self):
node = larch.nodes.Node(0, ['bar', 'foo'], ['bar', 'bar'])
node.add('bar', 'xxx')
self.assertEqual(node.keys(), ['bar', 'foo'])
self.assertEqual(node.values(), ['xxx', 'bar'])
self.assertEqual(node['bar'], 'xxx')
def test_add_resets_cached_size(self):
node = larch.nodes.Node(0, [], [])
node.size = 1234
node.add('foo', 'bar')
self.assertEqual(node.size, None)
def test_removes_first_key(self):
node = larch.nodes.Node(0, ['bar', 'duh', 'foo'],
['bar', 'bar', 'bar'])
node.remove('bar')
self.assertEqual(node.keys(), ['duh', 'foo'])
self.assertEqual(node.values(), ['bar', 'bar'])
self.assertRaises(KeyError, node.__getitem__, 'bar')
def test_removes_last_key(self):
node = larch.nodes.Node(0, ['bar', 'duh', 'foo'],
['bar', 'bar', 'bar'])
node.remove('foo')
self.assertEqual(node.keys(), ['bar', 'duh'])
self.assertEqual(node.values(), ['bar', 'bar'])
self.assertRaises(KeyError, node.__getitem__, 'foo')
def test_removes_middle_key(self):
node = larch.nodes.Node(0, ['bar', 'duh', 'foo'],
['bar', 'bar', 'bar'])
node.remove('duh')
self.assertEqual(node.keys(), ['bar', 'foo'])
self.assertEqual(node.values(), ['bar', 'bar'])
self.assertRaises(KeyError, node.__getitem__, 'duh')
def test_raises_exception_when_removing_unknown_key(self):
node = larch.nodes.Node(0, ['bar', 'duh', 'foo'],
['bar', 'bar', 'bar'])
self.assertRaises(KeyError, node.remove, 'yo')
def test_remove_resets_cached_size(self):
node = larch.nodes.Node(0, ['foo'], ['bar'])
node.size = 1234
node.remove('foo')
self.assertEqual(node.size, None)
def test_removes_index_range(self):
node = larch.nodes.Node(0, ['bar', 'duh', 'foo'],
['bar', 'bar', 'bar'])
node.size = 12375654
node.remove_index_range(1, 5)
self.assertEqual(node.keys(), ['bar'])
self.assertEqual(node.values(), ['bar'])
self.assertEqual(node.size, None)
def test_finds_keys_in_range(self):
# The children's keys are 'bar' and 'foo'. We need to test for
# every combination of minkey and maxkey being less than, equal,
# or greater than either child key (as long as minkey <= maxkey).
node = larch.LeafNode(0, ['bar', 'foo'], ['bar', 'foo'])
find = node.find_keys_in_range
self.assertEqual(find('aaa', 'aaa'), [])
self.assertEqual(find('aaa', 'bar'), ['bar'])
self.assertEqual(find('aaa', 'ccc'), ['bar'])
self.assertEqual(find('aaa', 'foo'), ['bar', 'foo'])
self.assertEqual(find('aaa', 'ggg'), ['bar', 'foo'])
self.assertEqual(find('bar', 'bar'), ['bar'])
self.assertEqual(find('bar', 'ccc'), ['bar'])
self.assertEqual(find('bar', 'foo'), ['bar', 'foo'])
self.assertEqual(find('bar', 'ggg'), ['bar', 'foo'])
self.assertEqual(find('ccc', 'ccc'), [])
self.assertEqual(find('ccc', 'foo'), ['foo'])
self.assertEqual(find('ccc', 'ggg'), ['foo'])
self.assertEqual(find('foo', 'foo'), ['foo'])
self.assertEqual(find('foo', 'ggg'), ['foo'])
self.assertEqual(find('ggg', 'ggg'), [])
def test_finds_no_potential_range_in_empty_node(self):
node = larch.LeafNode(0, [], [])
self.assertEqual(node.find_potential_range('aaa', 'bbb'), (None, None))
def test_finds_potential_ranges(self):
# The children's keys are 'bar' and 'foo'. We need to test for
# every combination of minkey and maxkey being less than, equal,
# or greater than either child key (as long as minkey <= maxkey).
node = larch.LeafNode(0, ['bar', 'foo'], ['bar', 'foo'])
find = node.find_potential_range
self.assertEqual(find('aaa', 'aaa'), (None, None))
self.assertEqual(find('aaa', 'bar'), (0, 0))
self.assertEqual(find('aaa', 'ccc'), (0, 0))
self.assertEqual(find('aaa', 'foo'), (0, 1))
self.assertEqual(find('aaa', 'ggg'), (0, 1))
self.assertEqual(find('bar', 'bar'), (0, 0))
self.assertEqual(find('bar', 'ccc'), (0, 0))
self.assertEqual(find('bar', 'foo'), (0, 1))
self.assertEqual(find('bar', 'ggg'), (0, 1))
self.assertEqual(find('ccc', 'ccc'), (0, 0))
self.assertEqual(find('ccc', 'foo'), (0, 1))
self.assertEqual(find('ccc', 'ggg'), (0, 1))
self.assertEqual(find('foo', 'foo'), (1, 1))
self.assertEqual(find('foo', 'ggg'), (1, 1))
# This one is a bit special. The last key may refer to a
# child that is an index node, so it _might_ have keys
# in the desired range.
self.assertEqual(find('ggg', 'ggg'), (1, 1))
def test_is_not_frozen(self):
self.assertEqual(self.node.frozen, False)
def test_freezing_makes_add_raise_error(self):
self.node.frozen = True
self.assertRaises(larch.FrozenNode, self.node.add, 'foo', 'bar')
def test_freezing_makes_remove_raise_error(self):
self.node.frozen = True
self.assertRaises(larch.FrozenNode, self.node.remove, 'foo')
def test_freezing_makes_remove_index_range_raise_error(self):
self.node.frozen = True
self.assertRaises(larch.FrozenNode, self.node.remove_index_range, 0, 1)
class IndexNodeTests(unittest.TestCase):
def setUp(self):
self.leaf1 = larch.LeafNode(0, ['bar'], ['bar'])
self.leaf2 = larch.LeafNode(1, ['foo'], ['foo'])
self.index_id = 1234
self.index = larch.IndexNode(self.index_id, ['bar', 'foo'],
[self.leaf1.id, self.leaf2.id])
def test_find_key_for_child_containing(self):
find = self.index.find_key_for_child_containing
self.assertEqual(find('aaa'), None)
self.assertEqual(find('bar'), 'bar')
self.assertEqual(find('bar2'), 'bar')
self.assertEqual(find('foo'), 'foo')
self.assertEqual(find('foo2'), 'foo')
def test_returns_none_when_no_child_contains_key(self):
self.assertEqual(self.index.find_key_for_child_containing('a'), None)
def test_finds_no_key_when_node_is_empty(self):
empty = larch.IndexNode(0, [], [])
self.assertEqual(empty.find_key_for_child_containing('f00'), None)
def test_finds_no_children_in_range_when_empty(self):
empty = larch.IndexNode(0, [], [])
self.assertEqual(empty.find_children_in_range('bar', 'foo'), [])
def test_finds_children_in_ranges(self):
# The children's keys are 'bar' and 'foo'. We need to test for
# every combination of minkey and maxkey being less than, equal,
# or greater than either child key (as long as minkey <= maxkey).
find = self.index.find_children_in_range
bar = self.leaf1.id
foo = self.leaf2.id
self.assertEqual(find('aaa', 'aaa'), [])
self.assertEqual(find('aaa', 'bar'), [bar])
self.assertEqual(find('aaa', 'ccc'), [bar])
self.assertEqual(find('aaa', 'foo'), [bar, foo])
self.assertEqual(find('aaa', 'ggg'), [bar, foo])
self.assertEqual(find('bar', 'bar'), [bar])
self.assertEqual(find('bar', 'ccc'), [bar])
self.assertEqual(find('bar', 'foo'), [bar, foo])
self.assertEqual(find('bar', 'ggg'), [bar, foo])
self.assertEqual(find('ccc', 'ccc'), [bar])
self.assertEqual(find('ccc', 'foo'), [bar, foo])
self.assertEqual(find('ccc', 'ggg'), [bar, foo])
self.assertEqual(find('foo', 'foo'), [foo])
self.assertEqual(find('foo', 'ggg'), [foo])
self.assertEqual(find('ggg', 'ggg'), [foo])
| jensens/larch | larch/nodes_tests.py | Python | gpl-3.0 | 12,222 |
import pigui.pyqt5.widgets.list.view
import pigui.pyqt5.widgets.miller.view
import about.editor
import about.delegate
DefaultList = pigui.pyqt5.widgets.list.view.DefaultList
def create_delegate(self, index):
typ = self.model.data(index, 'type')
if typ == 'editor':
suffix = self.model.data(index, 'suffix')
default = self.model.data(index, 'default')
value = self.model.data(index, 'value')
Editor = about.editor.get(suffix)
if Editor:
return Editor(default=value or default, index=index)
else:
return about.editor.noeditor.Editor(suffix=suffix, index=index)
elif typ == 'entry':
label = self.model.data(index, 'display')
suffix = self.model.data(index, 'suffix')
delegate = about.delegate.EntryDelegate(label=label, index=index)
delegate.setProperty('suffix', suffix)
return delegate
else:
return super(DefaultList, self).create_delegate(index)
def monkey_patch():
DefaultList.create_delegate = create_delegate
| abstractfactory/about | about/view.py | Python | gpl-3.0 | 1,063 |
# copied from beets for use in musicdir
# This file is part of beets.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Handles low-level interfacing for files' tags. Wraps Mutagen to
automatically detect file types and provide a unified interface for a
useful subset of music files' tags.
Usage:
>>> f = MediaFile('Lucy.mp3')
>>> f.title
u'Lucy in the Sky with Diamonds'
>>> f.artist = 'The Beatles'
>>> f.save()
A field will always return a reasonable value of the correct type, even
if no tag is present. If no value is available, the value will be false
(e.g., zero or the empty string).
"""
import mutagen
import mutagen.mp3
import mutagen.oggvorbis
import mutagen.mp4
import mutagen.flac
import mutagen.monkeysaudio
import datetime
import re
import base64
import imghdr
from musicdir.util.enumeration import enum
__all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile']
# Exceptions.
# Raised for any file MediaFile can't read.
class UnreadableFileError(IOError):
pass
# Raised for files that don't seem to have a type MediaFile supports.
class FileTypeError(UnreadableFileError):
pass
# Constants.
# Human-readable type names.
TYPES = {
'mp3': 'MP3',
'mp4': 'AAC',
'ogg': 'OGG',
'flac': 'FLAC',
'ape': 'APE',
'wv': 'WavPack',
'mpc': 'Musepack',
}
# Utility.
def _safe_cast(out_type, val):
"""Tries to covert val to out_type but will never raise an
exception. If the value can't be converted, then a sensible
default value is returned. out_type should be bool, int, or
unicode; otherwise, the value is just passed through.
"""
if out_type == int:
if val is None:
return 0
elif isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if not isinstance(val, basestring):
val = unicode(val)
# Get a number from the front of the string.
val = re.match('[0-9]*', val.strip()).group(0)
if not val:
return 0
else:
return int(val)
elif out_type == bool:
if val is None:
return False
else:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == unicode:
if val is None:
return u''
else:
return unicode(val)
else:
return val
# Flags for encoding field behavior.
# Determine style of packing, if any.
packing = enum('SLASHED', # pair delimited by /
'TUPLE', # a python tuple of 2 items
'DATE', # YYYY-MM-DD
name='packing')
class StorageStyle(object):
"""Parameterizes the storage behavior of a single field for a
certain tag format.
- key: The Mutagen key used to access the field's data.
- list_elem: Store item as a single object or as first element
of a list.
- as_type: Which type the value is stored as (unicode, int,
bool, or str).
- packing: If this value is packed in a multiple-value storage
unit, which type of packing (in the packing enum). Otherwise,
None. (Makes as_type irrelevant).
- pack_pos: If the value is packed, in which position it is
stored.
- ID3 storage only: match against this 'desc' field as well
as the key.
"""
def __init__(self, key, list_elem = True, as_type = unicode,
packing = None, pack_pos = 0, id3_desc = None,
id3_frame_field = u'text'):
self.key = key
self.list_elem = list_elem
self.as_type = as_type
self.packing = packing
self.pack_pos = pack_pos
self.id3_desc = id3_desc
self.id3_frame_field = id3_frame_field
# Dealing with packings.
class Packed(object):
"""Makes a packed list of values subscriptable. To access the packed
output after making changes, use packed_thing.items.
"""
def __init__(self, items, packstyle, none_val=0, out_type=int):
"""Create a Packed object for subscripting the packed values in
items. The items are packed using packstyle, which is a value
from the packing enum. none_val is returned from a request when
no suitable value is found in the items. Vales are converted to
out_type before they are returned.
"""
self.items = items
self.packstyle = packstyle
self.none_val = none_val
self.out_type = out_type
def __getitem__(self, index):
if not isinstance(index, int):
raise TypeError('index must be an integer')
if self.items is None:
return self.none_val
items = self.items
if self.packstyle == packing.DATE:
# Remove time information from dates. Usually delimited by
# a "T" or a space.
items = re.sub(r'[Tt ].*$', '', unicode(items))
# transform from a string packing into a list we can index into
if self.packstyle == packing.SLASHED:
seq = unicode(items).split('/')
elif self.packstyle == packing.DATE:
seq = unicode(items).split('-')
elif self.packstyle == packing.TUPLE:
seq = items # tuple: items is already indexable
try:
out = seq[index]
except:
out = None
if out is None or out == self.none_val or out == '':
return _safe_cast(self.out_type, self.none_val)
else:
return _safe_cast(self.out_type, out)
def __setitem__(self, index, value):
if self.packstyle in (packing.SLASHED, packing.TUPLE):
# SLASHED and TUPLE are always two-item packings
length = 2
else:
# DATE can have up to three fields
length = 3
# make a list of the items we'll pack
new_items = []
for i in range(length):
if i == index:
next_item = value
else:
next_item = self[i]
new_items.append(next_item)
if self.packstyle == packing.DATE:
# Truncate the items wherever we reach an invalid (none)
# entry. This prevents dates like 2008-00-05.
for i, item in enumerate(new_items):
if item == self.none_val or item is None:
del(new_items[i:]) # truncate
break
if self.packstyle == packing.SLASHED:
self.items = '/'.join(map(unicode, new_items))
elif self.packstyle == packing.DATE:
field_lengths = [4, 2, 2] # YYYY-MM-DD
elems = []
for i, item in enumerate(new_items):
elems.append( ('%0' + str(field_lengths[i]) + 'i') % item )
self.items = '-'.join(elems)
elif self.packstyle == packing.TUPLE:
self.items = new_items
# The field itself.
class MediaField(object):
"""A descriptor providing access to a particular (abstract) metadata
field. out_type is the type that users of MediaFile should see and
can be unicode, int, or bool. id3, mp4, and flac are StorageStyle
instances parameterizing the field's storage for each type.
"""
def __init__(self, out_type = unicode, **kwargs):
"""Creates a new MediaField.
- out_type: The field's semantic (exterior) type.
- kwargs: A hash whose keys are 'mp3', 'mp4', and 'etc'
and whose values are StorageStyle instances
parameterizing the field's storage for each type.
"""
self.out_type = out_type
if not set(['mp3', 'mp4', 'etc']) == set(kwargs):
raise TypeError('MediaField constructor must have keyword '
'arguments mp3, mp4, and etc')
self.styles = kwargs
def _fetchdata(self, obj, style):
"""Get the value associated with this descriptor's field stored
with the given StorageStyle. Unwraps from a list if necessary.
"""
# fetch the value, which may be a scalar or a list
if obj.type == 'mp3':
if style.id3_desc is not None: # also match on 'desc' field
frames = obj.mgfile.tags.getall(style.key)
entry = None
for frame in frames:
if frame.desc == style.id3_desc:
entry = getattr(frame, style.id3_frame_field)
break
if entry is None: # no desc match
return None
else:
# Get the metadata frame object.
try:
frame = obj.mgfile[style.key]
except KeyError:
return None
entry = getattr(frame, style.id3_frame_field)
else: # Not MP3.
try:
entry = obj.mgfile[style.key]
except KeyError:
return None
# possibly index the list
if style.list_elem:
if entry: # List must have at least one value.
return entry[0]
else:
return None
else:
return entry
def _storedata(self, obj, val, style):
"""Store val for this descriptor's field in the tag dictionary
according to the provided StorageStyle. Store it as a
single-item list if necessary.
"""
# wrap as a list if necessary
if style.list_elem: out = [val]
else: out = val
if obj.type == 'mp3':
# Try to match on "desc" field.
if style.id3_desc is not None:
frames = obj.mgfile.tags.getall(style.key)
# try modifying in place
found = False
for frame in frames:
if frame.desc == style.id3_desc:
setattr(frame, style.id3_frame_field, out)
found = True
break
# need to make a new frame?
if not found:
frame = mutagen.id3.Frames[style.key](
encoding=3,
desc=style.id3_desc,
**{style.id3_frame_field: val}
)
obj.mgfile.tags.add(frame)
# Try to match on "owner" field.
elif style.key.startswith('UFID:'):
owner = style.key.split(':', 1)[1]
frames = obj.mgfile.tags.getall(style.key)
for frame in frames:
# Replace existing frame data.
if frame.owner == owner:
setattr(frame, style.id3_frame_field, val)
else:
# New frame.
frame = mutagen.id3.UFID(owner=owner,
**{style.id3_frame_field: val})
obj.mgfile.tags.setall('UFID', [frame])
# Just replace based on key.
else:
frame = mutagen.id3.Frames[style.key](encoding = 3,
**{style.id3_frame_field: val})
obj.mgfile.tags.setall(style.key, [frame])
else: # Not MP3.
obj.mgfile[style.key] = out
def _styles(self, obj):
if obj.type in ('mp3', 'mp4'):
styles = self.styles[obj.type]
else:
styles = self.styles['etc'] # sane styles
# Make sure we always return a list of styles, even when given
# a single style for convenience.
if isinstance(styles, StorageStyle):
return [styles]
else:
return styles
def __get__(self, obj, owner):
"""Retrieve the value of this metadata field.
"""
# Fetch the data using the various StorageStyles.
styles = self._styles(obj)
for style in styles:
# Use the first style that returns a reasonable value.
out = self._fetchdata(obj, style)
if out:
break
if style.packing:
out = Packed(out, style.packing)[style.pack_pos]
return _safe_cast(self.out_type, out)
def __set__(self, obj, val):
"""Set the value of this metadata field.
"""
# Store using every StorageStyle available.
styles = self._styles(obj)
for style in styles:
if style.packing:
p = Packed(self._fetchdata(obj, style), style.packing)
p[style.pack_pos] = val
out = p.items
else: # unicode, integer, or boolean scalar
out = val
# deal with Nones according to abstract type if present
if out is None:
if self.out_type == int:
out = 0
elif self.out_type == bool:
out = False
elif self.out_type == unicode:
out = u''
# We trust that packed values are handled above.
# convert to correct storage type (irrelevant for
# packed values)
if style.as_type == unicode:
if out is None:
out = u''
else:
if self.out_type == bool:
# store bools as 1,0 instead of True,False
out = unicode(int(out))
else:
out = unicode(out)
elif style.as_type == int:
if out is None:
out = 0
else:
out = int(out)
elif style.as_type in (bool, str):
out = style.as_type(out)
# store the data
self._storedata(obj, out, style)
class CompositeDateField(object):
"""A MediaFile field for conveniently accessing the year, month, and
day fields as a datetime.date object. Allows both getting and
setting of the component fields.
"""
def __init__(self, year_field, month_field, day_field):
"""Create a new date field from the indicated MediaFields for
the component values.
"""
self.year_field = year_field
self.month_field = month_field
self.day_field = day_field
def __get__(self, obj, owner):
"""Return a datetime.date object whose components indicating the
smallest valid date whose components are at least as large as
the three component fields (that is, if year == 1999, month == 0,
and day == 0, then date == datetime.date(1999, 1, 1)). If the
components indicate an invalid date (e.g., if month == 47),
datetime.date.min is returned.
"""
try:
return datetime.date(
max(self.year_field.__get__(obj, owner), datetime.MINYEAR),
max(self.month_field.__get__(obj, owner), 1),
max(self.day_field.__get__(obj, owner), 1)
)
except ValueError: # Out of range values.
return datetime.date.min
def __set__(self, obj, val):
"""Set the year, month, and day fields to match the components of
the provided datetime.date object.
"""
self.year_field.__set__(obj, val.year)
self.month_field.__set__(obj, val.month)
self.day_field.__set__(obj, val.day)
class ImageField(object):
"""A descriptor providing access to a file's embedded album art.
Holds a bytestring reflecting the image data. The image should
either be a JPEG or a PNG for cross-format compatibility. It's
probably a bad idea to use anything but these two formats.
"""
@classmethod
def _mime(cls, data):
"""Return the MIME type (either image/png or image/jpeg) of the
image data (a bytestring).
"""
kind = imghdr.what(None, h=data)
if kind == 'png':
return 'image/png'
else:
# Currently just fall back to JPEG.
return 'image/jpeg'
@classmethod
def _mp4kind(cls, data):
"""Return the MPEG-4 image type code of the data. If the image
is not a PNG or JPEG, JPEG is assumed.
"""
kind = imghdr.what(None, h=data)
if kind == 'png':
return mutagen.mp4.MP4Cover.FORMAT_PNG
else:
return mutagen.mp4.MP4Cover.FORMAT_JPEG
def __get__(self, obj, owner):
if obj.type == 'mp3':
# Look for APIC frames.
for frame in obj.mgfile.tags.values():
if frame.FrameID == 'APIC':
picframe = frame
break
else:
# No APIC frame.
return None
return picframe.data
elif obj.type == 'mp4':
if 'covr' in obj.mgfile:
covers = obj.mgfile['covr']
if covers:
cover = covers[0]
# cover is an MP4Cover, which is a subclass of str.
return cover
# No cover found.
return None
else:
# Here we're assuming everything but MP3 and MPEG-4 uses
# the Xiph/Vorbis Comments standard. This may not be valid.
# http://wiki.xiph.org/VorbisComment#Cover_art
if 'metadata_block_picture' not in obj.mgfile:
# Try legacy COVERART tags.
if 'coverart' in obj.mgfile and obj.mgfile['coverart']:
return base64.b64decode(obj.mgfile['coverart'][0])
return None
for data in obj.mgfile["metadata_block_picture"]:
try:
pic = mutagen.flac.Picture(base64.b64decode(data))
break
except TypeError:
pass
else:
return None
return pic.data
def __set__(self, obj, val):
if val is not None:
if not isinstance(val, str):
raise ValueError('value must be a byte string or None')
if obj.type == 'mp3':
# Clear all APIC frames.
obj.mgfile.tags.delall('APIC')
if val is None:
# If we're clearing the image, we're done.
return
picframe = mutagen.id3.APIC(
encoding = 3,
mime = self._mime(val),
type = 3, # front cover
desc = u'',
data = val,
)
obj.mgfile['APIC'] = picframe
elif obj.type == 'mp4':
if val is None:
if 'covr' in obj.mgfile:
del obj.mgfile['covr']
else:
cover = mutagen.mp4.MP4Cover(val, self._mp4kind(val))
obj.mgfile['covr'] = [cover]
else:
# Again, assuming Vorbis Comments standard.
# Strip all art, including legacy COVERART.
if 'metadata_block_picture' in obj.mgfile:
if 'metadata_block_picture' in obj.mgfile:
del obj.mgfile['metadata_block_picture']
if 'coverart' in obj.mgfile:
del obj.mgfile['coverart']
if 'coverartmime' in obj.mgfile:
del obj.mgfile['coverartmime']
# Add new art if provided.
if val is not None:
pic = mutagen.flac.Picture()
pic.data = val
pic.mime = self._mime(val)
obj.mgfile['metadata_block_picture'] = [
base64.b64encode(pic.write())
]
# The file (a collection of fields).
class MediaFile(object):
"""Represents a multimedia file on disk and provides access to its
metadata.
"""
def __init__(self, path):
"""Constructs a new MediaFile reflecting the file at path. May
throw UnreadableFileError.
"""
unreadable_exc = (
mutagen.mp3.HeaderNotFoundError,
mutagen.flac.FLACNoHeaderError,
mutagen.monkeysaudio.MonkeysAudioHeaderError,
mutagen.mp4.MP4StreamInfoError,
mutagen.oggvorbis.OggVorbisHeaderError,
)
try:
self.mgfile = mutagen.File(path)
except unreadable_exc:
raise UnreadableFileError('Mutagen could not read file')
except IOError:
raise UnreadableFileError('could not read file')
if self.mgfile is None: # Mutagen couldn't guess the type
raise FileTypeError('file type unsupported by Mutagen')
elif type(self.mgfile).__name__ == 'M4A' or \
type(self.mgfile).__name__ == 'MP4':
self.type = 'mp4'
elif type(self.mgfile).__name__ == 'ID3' or \
type(self.mgfile).__name__ == 'MP3':
self.type = 'mp3'
elif type(self.mgfile).__name__ == 'FLAC':
self.type = 'flac'
elif type(self.mgfile).__name__ == 'OggVorbis':
self.type = 'ogg'
elif type(self.mgfile).__name__ == 'MonkeysAudio':
self.type = 'ape'
elif type(self.mgfile).__name__ == 'WavPack':
self.type = 'wv'
elif type(self.mgfile).__name__ == 'Musepack':
self.type = 'mpc'
else:
raise FileTypeError('file type %s unsupported by MediaFile' %
type(self.mgfile).__name__)
# add a set of tags if it's missing
if self.mgfile.tags is None:
self.mgfile.add_tags()
def save(self):
self.mgfile.save()
#### field definitions ####
title = MediaField(
mp3 = StorageStyle('TIT2'),
mp4 = StorageStyle("\xa9nam"),
etc = StorageStyle('title'),
)
artist = MediaField(
mp3 = StorageStyle('TPE1'),
mp4 = StorageStyle("\xa9ART"),
etc = StorageStyle('artist'),
)
album = MediaField(
mp3 = StorageStyle('TALB'),
mp4 = StorageStyle("\xa9alb"),
etc = StorageStyle('album'),
)
genre = MediaField(
mp3 = StorageStyle('TCON'),
mp4 = StorageStyle("\xa9gen"),
etc = StorageStyle('genre'),
)
composer = MediaField(
mp3 = StorageStyle('TCOM'),
mp4 = StorageStyle("\xa9wrt"),
etc = StorageStyle('composer'),
)
grouping = MediaField(
mp3 = StorageStyle('TIT1'),
mp4 = StorageStyle("\xa9grp"),
etc = StorageStyle('grouping'),
)
year = MediaField(out_type=int,
mp3 = StorageStyle('TDRC',
packing = packing.DATE,
pack_pos = 0),
mp4 = StorageStyle("\xa9day",
packing = packing.DATE,
pack_pos = 0),
etc = [StorageStyle('date',
packing = packing.DATE,
pack_pos = 0),
StorageStyle('year')]
)
month = MediaField(out_type=int,
mp3 = StorageStyle('TDRC',
packing = packing.DATE,
pack_pos = 1),
mp4 = StorageStyle("\xa9day",
packing = packing.DATE,
pack_pos = 1),
etc = StorageStyle('date',
packing = packing.DATE,
pack_pos = 1)
)
day = MediaField(out_type=int,
mp3 = StorageStyle('TDRC',
packing = packing.DATE,
pack_pos = 2),
mp4 = StorageStyle("\xa9day",
packing = packing.DATE,
pack_pos = 2),
etc = StorageStyle('date',
packing = packing.DATE,
pack_pos = 2)
)
date = CompositeDateField(year, month, day)
track = MediaField(out_type = int,
mp3 = StorageStyle('TRCK',
packing = packing.SLASHED,
pack_pos = 0),
mp4 = StorageStyle('trkn',
packing = packing.TUPLE,
pack_pos = 0),
etc = [StorageStyle('track'),
StorageStyle('tracknumber')]
)
tracktotal = MediaField(out_type = int,
mp3 = StorageStyle('TRCK',
packing = packing.SLASHED,
pack_pos = 1),
mp4 = StorageStyle('trkn',
packing = packing.TUPLE,
pack_pos = 1),
etc = [StorageStyle('tracktotal'),
StorageStyle('trackc'),
StorageStyle('totaltracks')]
)
disc = MediaField(out_type = int,
mp3 = StorageStyle('TPOS',
packing = packing.SLASHED,
pack_pos = 0),
mp4 = StorageStyle('disk',
packing = packing.TUPLE,
pack_pos = 0),
etc = [StorageStyle('disc'),
StorageStyle('discnumber')]
)
disctotal = MediaField(out_type = int,
mp3 = StorageStyle('TPOS',
packing = packing.SLASHED,
pack_pos = 1),
mp4 = StorageStyle('disk',
packing = packing.TUPLE,
pack_pos = 1),
etc = [StorageStyle('disctotal'),
StorageStyle('discc'),
StorageStyle('totaldiscs')]
)
lyrics = MediaField(
mp3 = StorageStyle('USLT',
list_elem = False,
id3_desc = u''),
mp4 = StorageStyle("\xa9lyr"),
etc = StorageStyle('lyrics')
)
comments = MediaField(
mp3 = StorageStyle('COMM', id3_desc = u''),
mp4 = StorageStyle("\xa9cmt"),
etc = [StorageStyle('description'),
StorageStyle('comment')]
)
bpm = MediaField(out_type = int,
mp3 = StorageStyle('TBPM'),
mp4 = StorageStyle('tmpo', as_type = int),
etc = StorageStyle('bpm')
)
comp = MediaField(out_type = bool,
mp3 = StorageStyle('TCMP'),
mp4 = StorageStyle('cpil',
list_elem = False,
as_type = bool),
etc = StorageStyle('compilation')
)
albumartist = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'Album Artist'),
mp4 = StorageStyle(
'----:com.apple.iTunes:Album Artist',
as_type=str),
etc = [StorageStyle('album artist'),
StorageStyle('albumartist')]
)
albumtype = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'MusicBrainz Album Type'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Type',
as_type=str),
etc = StorageStyle('musicbrainz_albumtype')
)
# Album art.
art = ImageField()
# MusicBrainz IDs.
mb_trackid = MediaField(
mp3 = StorageStyle('UFID:http://musicbrainz.org',
list_elem = False,
id3_frame_field = u'data'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Track Id',
as_type=str),
etc = StorageStyle('musicbrainz_trackid')
)
mb_albumid = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'MusicBrainz Album Id'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Id',
as_type=str),
etc = StorageStyle('musicbrainz_albumid')
)
mb_artistid = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'MusicBrainz Artist Id'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Artist Id',
as_type=str),
etc = StorageStyle('musicbrainz_artistid')
)
mb_albumartistid = MediaField(
mp3 = StorageStyle('TXXX',
id3_desc=u'MusicBrainz Album Artist Id'),
mp4 = StorageStyle(
'----:com.apple.iTunes:MusicBrainz Album Artist Id',
as_type=str),
etc = StorageStyle('musicbrainz_albumartistid')
)
@property
def length(self):
return self.mgfile.info.length
@property
def bitrate(self):
if self.type in ('flac', 'ape'):
if hasattr(self.mgfile.info, 'bits_per_sample'):
# Simulate bitrate for lossless formats.
#fixme: The utility of this guess is questionable.
return self.mgfile.info.sample_rate * \
self.mgfile.info.bits_per_sample
else:
# Old APE file format.
return 0
elif self.type == 'wv':
# Mutagen doesn't provide enough information.
return 0
else:
return self.mgfile.info.bitrate
@property
def format(self):
return TYPES[self.type]
| coolkehon/musicdir | src/musicdir/mediafile.py | Python | gpl-3.0 | 31,537 |
# Copyright (c) 2019 Verizon Media
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ALIAS = 'tag-ports-during-bulk-creation'
IS_SHIM_EXTENSION = True
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'Tag Ports During Bulk Creation'
DESCRIPTION = 'Allow to tag ports during bulk creation'
UPDATED_TIMESTAMP = '2019-12-29T19:00:00-00:00'
RESOURCE_ATTRIBUTE_MAP = {}
SUB_RESOURCE_ATTRIBUTE_MAP = {}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = []
OPTIONAL_EXTENSIONS = []
ACTION_STATUS = {}
| openstack/neutron-lib | neutron_lib/api/definitions/tag_ports_during_bulk_creation.py | Python | apache-2.0 | 1,024 |
# Copyright (c) 2015 James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from matplotlib import use;use('pdf')
from coldeep import ColDeep
from layers import *
import GPy
from matplotlib import pyplot as plt
plt.close('all')
np.random.seed(0)
N = 30
D = 1
X = np.linspace(0,1,N)[:,None]
Y = np.where(X>0.5, 1,0) + np.random.randn(N,1)*0.02
Q = 1
M = 15
Xtest = np.linspace(-1,2,500)[:,None]
def plot(m, prefix):
#for i, l in enumerate(m.layers):
#l.plot()
#plt.savefig('./step_fn_plots/%s_layer%i.pdf'%(prefix,i))
#plt.close()
s = m.predict_sampling(Xtest, 1000)
H, xedges, yedges = np.histogram2d(np.repeat(Xtest.T, 1000, 0).flatten(), s.flatten(), bins=[Xtest.flatten(),np.linspace(-.3,1.3,50)])
plt.figure()
plt.imshow(H.T[:,::-1], extent=[xedges.min(), xedges.max(),yedges.min(),yedges.max()], cmap=plt.cm.Blues, interpolation='nearest')
plt.plot(X, Y, 'kx', mew=1.3)
plt.ylim(-.3, 1.3)
#a GP
m = GPy.models.GPRegression(X,Y)
m.optimize()
print m.log_likelihood()
m.plot()
mu, var = m.predict(Xtest)
s = np.random.randn(mu.size, 1000)*np.sqrt(var) + mu
H, xedges, yedges = np.histogram2d(np.repeat(Xtest.T, 1000, 0).flatten(), s.T.flatten(), bins=[Xtest.flatten(),np.linspace(-.3,1.3,50)])
plt.figure()
plt.imshow(H.T[:,::-1], extent=[xedges.min(), xedges.max(),yedges.min(),yedges.max()], cmap=plt.cm.Blues, interpolation='nearest')
plt.plot(X, Y, 'kx', mew=1.3)
plt.ylim(-.3, 1.3)
#one hidden layer:
layer_X = InputLayerFixed(X, input_dim=1, output_dim=Q, kern=GPy.kern.RBF(1), Z=np.random.rand(M,1), beta=100., name='layerX')
layer_Y = ObservedLayer(Y, input_dim=Q, output_dim=D, kern=GPy.kern.RBF(Q), Z=np.random.randn(M,Q), beta=500., name='layerY')
layer_X.add_layer(layer_Y)
m = ColDeep([layer_X, layer_Y])
layer_X.Z.fix()
m.optimize('bfgs', max_iters=1000, messages=1)
print m.log_likelihood()
plot(m, 'H1')
#two hidden layers
layer_X = InputLayerFixed(X, input_dim=1, output_dim=Q, kern=GPy.kern.RBF(1), Z=np.random.rand(M,1), beta=100., name='layerX')
layer_H = HiddenLayer(input_dim=Q, output_dim=Q, kern=GPy.kern.RBF(Q, ARD=True), Z=np.random.randn(M,Q), beta=100., name='layerH')
layer_Y = ObservedLayer(Y, input_dim=Q, output_dim=D, kern=GPy.kern.RBF(Q), Z=np.random.randn(M,Q), beta=500., name='layerY')
layer_X.add_layer(layer_H)
layer_H.add_layer(layer_Y)
m = ColDeep([layer_X, layer_H, layer_Y])
layer_X.Z.fix()
m.optimize('bfgs', max_iters=1000, messages=1)
print m.log_likelihood()
plot(m, 'H2')
#threee hidden layers
layer_X = InputLayerFixed(X, input_dim=1, output_dim=Q, kern=GPy.kern.RBF(1), Z=np.random.rand(M,1), beta=100., name='layerX')
layer_H = HiddenLayer(input_dim=Q, output_dim=Q, kern=GPy.kern.RBF(Q, ARD=True), Z=np.random.randn(M,Q), beta=100., name='layerH')
layer_H2 = HiddenLayer(input_dim=Q, output_dim=Q, kern=GPy.kern.RBF(Q, ARD=True), Z=np.random.randn(M,Q), beta=100., name='layerH2')
layer_Y = ObservedLayer(Y, input_dim=Q, output_dim=D, kern=GPy.kern.RBF(Q), Z=np.random.randn(M,Q), beta=500., name='layerY')
layer_X.add_layer(layer_H)
layer_H.add_layer(layer_H2)
layer_H2.add_layer(layer_Y)
m = ColDeep([layer_X, layer_H, layer_H2, layer_Y])
layer_X.Z.fix()
m.optimize('bfgs', max_iters=1000, messages=1)
print m.log_likelihood()
plot(m, 'H3')
| SheffieldML/deepGPy | step_fn_demo.py | Python | bsd-3-clause | 3,284 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in image_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class ResizeNearestNeighborOpTest(tf.test.TestCase):
TYPES = [np.float32, np.float64]
def testShapeIsCorrectAfterOp(self):
in_shape = [1, 2, 2, 1]
out_shape = [1, 4, 6, 1]
for nptype in self.TYPES:
x = np.arange(0, 4).reshape(in_shape).astype(nptype)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
input_tensor = tf.constant(x, shape=in_shape)
resize_out = tf.image.resize_nearest_neighbor(input_tensor,
out_shape[1:3])
self.assertEqual(out_shape, list(resize_out.get_shape()))
resize_out = sess.run(resize_out)
self.assertEqual(out_shape, list(resize_out.shape))
def testGradFromResizeToLargerInBothDims(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
for nptype in self.TYPES:
x = np.arange(0, 6).reshape(in_shape).astype(nptype)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
input_tensor = tf.constant(x, shape=in_shape)
resize_out = tf.image.resize_nearest_neighbor(input_tensor,
out_shape[1:3])
err = tf.test.compute_gradient_error(input_tensor,
in_shape,
resize_out,
out_shape,
x_init_value=x)
self.assertLess(err, 1e-3)
def testGradFromResizeToSmallerInBothDims(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
for nptype in self.TYPES:
x = np.arange(0, 24).reshape(in_shape).astype(nptype)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
input_tensor = tf.constant(x, shape=in_shape)
resize_out = tf.image.resize_nearest_neighbor(input_tensor,
out_shape[1:3])
err = tf.test.compute_gradient_error(input_tensor,
in_shape,
resize_out,
out_shape,
x_init_value=x)
self.assertLess(err, 1e-3)
def testCompareGpuVsCpu(self):
in_shape = [1, 4, 6, 3]
out_shape = [1, 8, 16, 3]
for nptype in self.TYPES:
x = np.arange(0, np.prod(in_shape)).reshape(in_shape).astype(nptype)
for align_corners in [True, False]:
with self.test_session(use_gpu=False):
input_tensor = tf.constant(x, shape=in_shape)
resize_out = tf.image.resize_nearest_neighbor(input_tensor,
out_shape[1:3],
align_corners=align_corners)
grad_cpu = tf.test.compute_gradient(input_tensor,
in_shape,
resize_out,
out_shape,
x_init_value=x)
with self.test_session(use_gpu=True):
input_tensor = tf.constant(x, shape=in_shape)
resize_out = tf.image.resize_nearest_neighbor(input_tensor,
out_shape[1:3],
align_corners=align_corners)
grad_gpu = tf.test.compute_gradient(input_tensor,
in_shape,
resize_out,
out_shape,
x_init_value=x)
self.assertAllClose(grad_cpu, grad_gpu, rtol=1e-5, atol=1e-5)
class ResizeBilinearOpTest(tf.test.TestCase):
def testShapeIsCorrectAfterOp(self):
in_shape = [1, 2, 2, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 4).reshape(in_shape).astype(np.float32)
with self.test_session() as sess:
input_tensor = tf.constant(x, shape=in_shape)
resize_out = tf.image.resize_bilinear(input_tensor,
out_shape[1:3])
self.assertEqual(out_shape, list(resize_out.get_shape()))
resize_out = sess.run(resize_out)
self.assertEqual(out_shape, list(resize_out.shape))
def testGradFromResizeToLargerInBothDims(self):
in_shape = [1, 2, 3, 1]
out_shape = [1, 4, 6, 1]
x = np.arange(0, 6).reshape(in_shape).astype(np.float32)
with self.test_session():
input_tensor = tf.constant(x, shape=in_shape)
resize_out = tf.image.resize_bilinear(input_tensor,
out_shape[1:3])
err = tf.test.compute_gradient_error(input_tensor,
in_shape,
resize_out,
out_shape,
x_init_value=x)
self.assertLess(err, 1e-3)
def testGradFromResizeToSmallerInBothDims(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape).astype(np.float32)
with self.test_session():
input_tensor = tf.constant(x, shape=in_shape)
resize_out = tf.image.resize_bilinear(input_tensor,
out_shape[1:3])
err = tf.test.compute_gradient_error(input_tensor,
in_shape,
resize_out,
out_shape,
x_init_value=x)
self.assertLess(err, 1e-3)
def testGradOnUnsupportedType(self):
in_shape = [1, 4, 6, 1]
out_shape = [1, 2, 3, 1]
x = np.arange(0, 24).reshape(in_shape).astype(np.uint8)
with self.test_session():
input_tensor = tf.constant(x, shape=in_shape)
resize_out = tf.image.resize_bilinear(input_tensor, out_shape[1:3])
grad = tf.gradients(input_tensor, [resize_out])
self.assertEqual([None], grad)
if __name__ == "__main__":
tf.test.main()
| ivano666/tensorflow | tensorflow/python/ops/image_grad_test.py | Python | apache-2.0 | 7,200 |
from .meteor_files_uploader import MeteorFilesUploader, MeteorFilesException
| hunternet93/ddp_asyncio | ddp_asyncio/extras/__init__.py | Python | mit | 77 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tasks that should be run via Celery."""
from __future__ import absolute_import
import celery
import models
import taskqueue.celery as taskc
import utils
import utils.batch.common
import utils.bisect.boot as bootb
import utils.bisect.defconfig as defconfigb
import utils.bootimport
import utils.db
import utils.docimport
import utils.emails
import utils.log_parser
import utils.report.boot
import utils.report.build
import utils.report.common
import utils.tests_import as tests_import
@taskc.app.task(name="import-job")
def import_job(json_obj, db_options, mail_options=None):
"""Just a wrapper around the real import function.
This is used to provide a Celery-task access to the import function.
:param json_obj: The JSON object with the values necessary to import the
job.
:type json_obj: dictionary
:param db_options: The database connection parameters.
:type db_options: dictionary
:param mail_options: The options necessary to connect to the SMTP server.
:type mail_options: dictionary
"""
return utils.docimport.import_and_save_job(json_obj, db_options)
@taskc.app.task(name="parse-build-log")
def parse_build_log(job_id, json_obj, db_options, mail_options=None):
"""Wrapper around the real build log parsing function.
Used to provided a task to the import function.
:param job_id: The ID of the job saved in the database. This value gest
injected by Celery when linking the task to the previous one.
:type job_id: string
:param json_obj: The JSON object with the necessary values.
:type json_obj: dictionary
:param db_options: The database connection parameters.
:type db_options: dictionary
:param mail_options: The options necessary to connect to the SMTP server.
:type mail_options: dictionary
"""
return utils.log_parser.parse_build_log(job_id, json_obj, db_options)
@taskc.app.task(name="import-boot")
def import_boot(json_obj, db_options, mail_options=None):
"""Just a wrapper around the real boot import function.
This is used to provide a Celery-task access to the import function.
:param json_obj: The JSON object with the values necessary to import the
boot report.
:type json_obj: dictionary
:param db_options: The database connection parameters.
:type db_options: dictionary
:param mail_options: The options necessary to connect to the SMTP server.
:type mail_options: dictionary
"""
return utils.bootimport.import_and_save_boot(json_obj, db_options)
@taskc.app.task(name="batch-executor", ignore_result=False)
def execute_batch(json_obj, db_options):
"""Run batch operations based on the passed JSON object.
:param json_obj: The JSON object with the operations to perform.
:type json_obj: dictionary
:param db_options: The database connection parameters.
:type db_options: dictionary
:return The result of the batch operations.
"""
return utils.batch.common.execute_batch_operation(json_obj, db_options)
@taskc.app.task(name="boot-bisect", ignore_result=False)
def boot_bisect(doc_id, db_options, fields=None):
"""Run a boot bisect operation on the passed boot document id.
:param doc_id: The boot document ID.
:type doc_id: string
:param db_options: The database connection parameters.
:type db_options: dictionary
:param fields: A `fields` data structure with the fields to return or
exclude. Default to None.
:type fields: list or dictionary
:return The result of the boot bisect operation.
"""
return bootb.execute_boot_bisection(doc_id, db_options, fields=fields)
@taskc.app.task(name="boot-bisect-compare-to", ignore_result=False)
def boot_bisect_compared_to(doc_id, compare_to, db_options, fields=None):
"""Run a boot bisect operation compared to the provided tree name.
:param doc_id: The boot document ID.
:type doc_id: string
:param compare_to: The name of the tree to compare to.
:type compare_to: string
:param db_options: The database connection parameters.
:type db_options: dictionary
:param fields: A `fields` data structure with the fields to return or
exclude. Default to None.
:type fields: list or dictionary
:return The result of the boot bisect operation.
"""
return bootb.execute_boot_bisection_compared_to(
doc_id, compare_to, db_options, fields=fields)
@taskc.app.task(name="defconfig-bisect", ignore_result=False)
def defconfig_bisect(doc_id, db_options, fields=None):
"""Run a defconfig bisect operation on the passed defconfig document id.
:param doc_id: The boot document ID.
:type doc_id: string
:param db_options: The database connection parameters.
:type db_options: dictionary
:param fields: A `fields` data structure with the fields to return or
exclude. Default to None.
:type fields: list or dictionary
:return The result of the boot bisect operation.
"""
return defconfigb.execute_defconfig_bisection(
doc_id, db_options, fields=fields)
@taskc.app.task(name="defconfig-bisect-compared-to", ignore_result=False)
def defconfig_bisect_compared_to(doc_id, compare_to, db_options, fields=None):
"""Run a defconfig bisect operation compared to the provided tree name.
:param doc_id: The defconfig document ID.
:type doc_id: string
:param compare_to: The name of the tree to compare to.
:type compare_to: string
:param db_options: The database connection parameters.
:type db_options: dictionary
:param fields: A `fields` data structure with the fields to return or
exclude. Default to None.
:type fields: list or dictionary
:return The result of the defconfig bisect operation.
"""
return defconfigb.execute_defconfig_bisection_compared_to(
doc_id, compare_to, db_options, fields=fields)
@taskc.app.task(
name="send-boot-report",
acks_late=True,
track_started=True,
ignore_result=False)
def send_boot_report(job,
kernel,
lab_name,
email_format, to_addrs, db_options, mail_options):
"""Create the boot report email and send it.
:param job: The job name.
:type job: string
:param kernel: The kernel name.
:type kernel: string
:param lab_name: The name of the lab.
:type lab_name: string
:param email_format: The email format to send.
:type email_format: list
:param to_addrs: List of recipients.
:type to_addrs: list
:param db_options: The database connection parameters.
:type db_options: dictionary
:param mail_options: The options necessary to connect to the SMTP server.
:type mail_options: dictionary
"""
utils.LOG.info("Preparing boot report email for '%s-%s'", job, kernel)
status = "ERROR"
txt_body, html_body, subject, headers = \
utils.report.boot.create_boot_report(
job,
kernel,
lab_name,
email_format, db_options=db_options, mail_options=mail_options
)
if all([any([txt_body, html_body]), subject]):
utils.LOG.info("Sending boot report email for '%s-%s'", job, kernel)
status, errors = utils.emails.send_email(
to_addrs,
subject, txt_body, html_body, mail_options, headers=headers)
utils.report.common.save_report(
job, kernel, models.BOOT_REPORT, status, errors, db_options)
else:
utils.LOG.error(
"No email body nor subject found for boot report '%s-%s'",
job, kernel)
return status
@taskc.app.task(
name="send-build-report",
acks_late=True,
track_started=True,
ignore_result=False)
def send_build_report(job,
kernel,
email_format, to_addrs, db_options, mail_options):
"""Create the build report email and send it.
:param job: The job name.
:type job: string
:param kernel: The kernel name.
:type kernel: string
:param email_format: The email format to send.
:type email_format: list
:param to_addrs: List of recipients.
:type to_addrs: list
:param db_options: The database connection parameters.
:type db_options: dictionary
:param mail_options: The options necessary to connect to the SMTP server.
:type mail_options: dictionary
"""
utils.LOG.info("Preparing build report email for '%s-%s'", job, kernel)
status = "ERROR"
txt_body, html_body, subject, headers = \
utils.report.build.create_build_report(
job,
kernel,
email_format,
db_options=db_options,
mail_options=mail_options
)
if all([any([txt_body, html_body]), subject]):
utils.LOG.info("Sending build report email for '%s-%s'", job, kernel)
status, errors = utils.emails.send_email(
to_addrs,
subject, txt_body, html_body, mail_options, headers=headers)
utils.report.common.save_report(
job, kernel, models.BOOT_REPORT, status, errors, db_options)
else:
utils.LOG.error(
"No email body nor subject found for build report '%s-%s'",
job, kernel)
return status
@taskc.app.task(
name="complete-test-suite-import",
track_started=True,
ignore_result=False)
def complete_test_suite_import(suite_json, suite_id, db_options, **kwargs):
"""Complete the test suite import.
First update the test suite references, if what is provided is only the
*_id values. Then, import the test sets and test cases provided.
:param suite_json: The JSON object with the test suite.
:type suite_json: dictionary
:param suite_id: The ID of the test suite.
:type suite_id: bson.objectid.ObjectId
:param test_set: The list of test sets to import.
:type test_set: list
:param test_case: The list of test cases to import.
:type test_case: list
:param db_options: The database connection parameters.
:type db_options: dictionary
:return 200 if OK, 500 in case of errors; a dictionary containing the
kwargs passed plus new values take from the update action.
"""
ret_val, update_doc = tests_import.update_test_suite(
suite_json, suite_id, db_options, **kwargs)
# Update all the kwargs with the one taken from the test suite update
# process and pass them along to the next task.
kwargs.update(update_doc)
if ret_val != 200:
utils.LOG.error(
"Error updating test suite '%s' (%s)",
kwargs["suite_name"], suite_id)
return ret_val, kwargs
@taskc.app.task(
name="import-sets-from-suite",
track_started=True,
ignore_result=False,
add_to_parent=False)
def import_test_sets_from_test_suite(
prev_results, suite_id, tests_list, db_options, **kwargs):
"""Import the test sets provided in a test suite.
This task is linked from the test suite update one: the first argument is a
list that contains the return values from the previous task. That argument
is injected once the task has been completed.
:param prev_results: Injected value that contain the parent task results.
:type prev_results: list
:param suite_id: The ID of the suite.
:type suite_id: bson.objectid.ObjectId
:pram tests_list: The list of tests to import.
:type tests_list: list
:param db_options: The database connection parameters.
:type db_options: dictionary
:return 200 if OK, 500 in case of errors; a dictionary with errors or an
empty one.
"""
ret_val = 200
errors = {}
prev_val = prev_results[0]
other_args = prev_results[1]
if all([prev_val == 200, suite_id]):
test_ids, errors = tests_import.import_multi_test_sets(
tests_list, suite_id, db_options, **other_args)
if test_ids:
utils.LOG.info(
"Updating test suite '%s' (%s) with test set IDs",
kwargs["suite_name"], str(suite_id))
database = utils.db.get_db_connection(db_options)
ret_val = utils.db.update(
database[models.TEST_SUITE_COLLECTION],
suite_id, {models.TEST_SET_KEY: test_ids})
# TODO: handle errors.
else:
ret_val = 500
else:
utils.LOG.warn(
"Error saving test suite '%s', will not import tests cases",
kwargs["suite_name"])
return ret_val, errors
@taskc.app.task(
name="import-cases-from-suite",
track_started=True,
ignore_result=False,
add_to_parent=False)
def import_test_cases_from_test_suite(
prev_results, suite_id, tests_list, db_options, **kwargs):
"""Import the test cases provided in a test suite.
This task is linked from the test suite update one: the first argument is a
list that contains the return values from the previous task. That argument
is injected once the task has been completed.
:param prev_results: Injected value that contain the parent task results.
:type prev_results: list
:param suite_id: The ID of the suite.
:type suite_id: bson.objectid.ObjectId
:pram tests_list: The list of tests to import.
:type tests_list: list
:param db_options: The database connection parameters.
:type db_options: dictionary
:return 200 if OK, 500 in case of errors; a dictionary with errors or an
empty one.
"""
ret_val = 200
errors = {}
prev_val = prev_results[0]
other_args = prev_results[1]
if all([prev_val == 200, suite_id]):
test_ids, errors = tests_import.import_multi_test_cases(
tests_list, suite_id, db_options, **other_args)
if test_ids:
utils.LOG.info(
"Updating test suite '%s' (%s) with test case IDs",
kwargs["suite_name"], str(suite_id))
database = utils.db.get_db_connection(db_options)
ret_val = utils.db.update(
database[models.TEST_SUITE_COLLECTION],
suite_id, {models.TEST_CASE_KEY: test_ids})
# TODO: handle errors.
else:
ret_val = 500
else:
utils.LOG.warn(
"Error saving test suite '%s', will not import tests cases",
kwargs["suite_name"])
return ret_val, errors
@taskc.app.task(
name="import-test-cases-from-set", track_started=True, ignore_result=False)
def import_test_cases_from_test_set(
tests_list, suite_id, set_id, db_options, **kwargs):
"""Wrapper around the real import function.
Import the test cases included in a test set.
:param tests_list: The list of test cases to import.
:type tests_list: list
:param suite_id: The ID of the test suite.
:type suite_id: bson.objectid.ObjectId
:param set_id: The ID of the test set.
:type set_id: bson.objectid.ObjectId
:param db_options: The database connection parameters.
:type db_options: dictionary
:return 200 if OK, 500 in case of errors; a dictionary with errors or an
empty one.
"""
return tests_import.import_test_cases_from_test_set(
set_id, suite_id, tests_list, db_options, **kwargs)
def run_batch_group(batch_op_list, db_options):
"""Execute a list of batch operations.
:param batch_op_list: List of JSON object used to build the batch
operation.
:type batch_op_list: list
:param db_options: The database connection parameters.
:type db_options: dictionary
"""
job = celery.group(
[
execute_batch.s(batch_op, db_options)
for batch_op in batch_op_list
]
)
result = job.apply_async()
while not result.ready():
pass
return result.get()
| joyxu/kernelci-backend | app/taskqueue/tasks.py | Python | agpl-3.0 | 16,419 |
#!/usr/bin/env python
import click
import pagoda
import pagoda.viewer
import numpy as np
import numpy.random as rng
class World(pagoda.physics.World):
def reset(self):
for b in self.bodies:
b.position = np.array([0, 0, 10]) + 3 * rng.randn(3)
b.quaternion = pagoda.physics.make_quaternion(
np.pi * rng.rand(), 0, 1, 1)
@click.command()
@click.option('n', default=20, help='number of bodies in the simulation')
def main(n=20):
w = World()
# set the cfm parameter below for a trampoline-like floor !
# w.cfm = 1e-3
def g(n, k=0.1, size=1):
return np.clip(rng.gamma(n, k, size=size), 0.5, 1000)
for _ in range(n):
s, kw = sorted(dict(
box=dict(lengths=g(8, size=3)),
capsule=dict(radius=g(3), length=g(10)),
cylinder=dict(radius=g(2), length=g(10)),
sphere=dict(radius=g(2)),
).items())[rng.randint(4)]
b = w.create_body(s, **kw)
b.color = tuple(rng.uniform(0, 1, size=3)) + (0.9, )
w.reset()
pagoda.viewer.Viewer(w).run()
if __name__ == '__main__':
main()
| EmbodiedCognition/pagoda | examples/dropping-shapes.py | Python | mit | 1,141 |
import re
class RegexPattern(object):
def __init__(self, regex, name=''):
self.regex = re.compile(regex, re.UNICODE)
self.name = name
def resolve(self, string):
match = self.regex.search(string)
if match:
return self.name, match.groupdict()
class patterns(object):
def __init__(self, unknown, *args):
self.patterns = []
self.unknown = unknown
for pattern in args:
if pattern.__class__ == str:
self.patterns.append(RegexPattern(pattern))
else:
self.patterns.append(RegexPattern(*pattern))
def resolve(self, name):
for pattern in self.patterns:
match = pattern.resolve(name)
if match:
return match
return self.unknown
| Mercy-Nekesa/sokoapp | sokoapp/request/router.py | Python | mit | 819 |
# Copyright 2006 James Tauber and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas.ui.CellPanel import CellPanel
from pyjamas.ui import HasHorizontalAlignment
from pyjamas.ui import HasVerticalAlignment
class VerticalPanel(CellPanel):
def __init__(self, **kwargs):
if not kwargs.has_key('Spacing'): kwargs['Spacing'] = 0
if not kwargs.has_key('Padding'): kwargs['Padding'] = 0
self.horzAlign = HasHorizontalAlignment.ALIGN_LEFT
self.vertAlign = HasVerticalAlignment.ALIGN_TOP
CellPanel.__init__(self, **kwargs)
def add(self, widget):
self.insert(widget, self.getWidgetCount())
def getHorizontalAlignment(self):
return self.horzAlign
def getVerticalAlignment(self):
return self.vertAlign
def getWidget(self, index):
return self.children[index]
def getWidgetCount(self):
return len(self.children)
def getWidgetIndex(self, child):
return self.children.index(child)
def setWidget(self, index, widget):
"""Replace the widget at the given index with a new one"""
existing = self.getWidget(index)
if existing:
self.remove(existing)
self.insert(widget, index)
def insert(self, widget, beforeIndex):
widget.removeFromParent()
tr = DOM.createTR()
td = DOM.createTD()
DOM.insertChild(self.getBody(), tr, beforeIndex)
DOM.appendChild(tr, td)
CellPanel.insert(self, widget, td, beforeIndex)
self.setCellHorizontalAlignment(widget, self.horzAlign)
self.setCellVerticalAlignment(widget, self.vertAlign)
def remove(self, widget):
if isinstance(widget, int):
widget = self.getWidget(widget)
if widget.getParent() != self:
return False
td = DOM.getParent(widget.getElement())
tr = DOM.getParent(td)
DOM.removeChild(self.getBody(), tr)
CellPanel.remove(self, widget)
return True
def setHorizontalAlignment(self, align):
self.horzAlign = align
def setVerticalAlignment(self, align):
self.vertAlign = align
| lovelysystems/pyjamas | library/pyjamas/ui/VerticalPanel.py | Python | apache-2.0 | 2,699 |
from __future__ import unicode_literals
import logging
from django.contrib.auth.models import User
from djblets.extensions.models import RegisteredExtension
from djblets.extensions.resources import ExtensionResource
from djblets.webapi.resources import register_resource_for_model
from reviewboard.attachments.models import FileAttachment
from reviewboard.diffviewer.models import DiffSet, FileDiff
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.extensions.base import get_extension_manager
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.reviews.models import (Comment, DefaultReviewer,
Group, ReviewRequest,
ReviewRequestDraft, Review,
ScreenshotComment, Screenshot,
FileAttachmentComment)
from reviewboard.scmtools.models import Repository
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.models import WebAPIToken
class Resources(object):
"""Manages the instances for all API resources.
This handles dynamically loading API resource instances upon request,
and registering those resources with models.
When accessing a resource through this class for the first time, it will
be imported from the proper file and cached. Subsequent requests will be
returned from the cache.
"""
def __init__(self):
self.extension = ExtensionResource(get_extension_manager())
self._loaded = False
def __getattr__(self, name):
"""Returns a resource instance as an attribute.
If the resource hasn't yet been loaded into cache, it will be
imported, fetched from the module, and cached. Subsequent attribute
fetches for this resource will be returned from the cache.
"""
if not self._loaded:
self._loaded = True
self._register_resources()
if name not in self.__dict__:
instance_name = '%s_resource' % name
try:
mod = __import__('reviewboard.webapi.resources.%s' % name,
{}, {}, [instance_name])
self.__dict__[name] = getattr(mod, instance_name)
except (ImportError, AttributeError) as e:
logging.error('Unable to load webapi resource %s: %s'
% (name, e))
raise AttributeError('%s is not a valid resource name' % name)
return self.__dict__[name]
def _register_resources(self):
"""Registers all the resource model associations."""
register_resource_for_model(ChangeDescription, self.change)
register_resource_for_model(
Comment,
lambda obj: (obj.review.get().is_reply() and
self.review_reply_diff_comment or
self.review_diff_comment))
register_resource_for_model(DefaultReviewer, self.default_reviewer)
register_resource_for_model(
DiffSet,
lambda obj: obj.history_id and self.diff or self.draft_diff)
register_resource_for_model(
FileDiff,
lambda obj: (obj.diffset.history_id and
self.filediff or
self.draft_filediff))
register_resource_for_model(Group, self.review_group)
register_resource_for_model(RegisteredExtension, self.extension)
register_resource_for_model(HostingServiceAccount,
self.hosting_service_account)
register_resource_for_model(Repository, self.repository)
register_resource_for_model(
Review,
lambda obj: obj.is_reply() and self.review_reply or self.review)
register_resource_for_model(ReviewRequest, self.review_request)
register_resource_for_model(ReviewRequestDraft,
self.review_request_draft)
register_resource_for_model(Screenshot, self.screenshot)
register_resource_for_model(FileAttachment, self.file_attachment)
register_resource_for_model(
FileAttachment,
lambda obj: (obj.is_from_diff and
self.diff_file_attachment or
self.file_attachment))
register_resource_for_model(
ScreenshotComment,
lambda obj: (obj.review.get().is_reply() and
self.review_reply_screenshot_comment or
self.review_screenshot_comment))
register_resource_for_model(
FileAttachmentComment,
lambda obj: (obj.review.get().is_reply() and
self.review_reply_file_attachment_comment or
self.review_file_attachment_comment))
register_resource_for_model(User, self.user)
register_resource_for_model(WebAPIToken, self.api_token)
resources = Resources()
__all__ = ['Resources', 'resources', 'WebAPIResource']
| custode/reviewboard | reviewboard/webapi/resources/__init__.py | Python | mit | 5,104 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Tests data."""
| inveniosoftware/invenio-oaiserver | tests/data/v5/__init__.py | Python | mit | 253 |
"""Testing module for the kombu.transport.SQS package.
NOTE: The SQSQueueMock and SQSConnectionMock classes originally come from
http://github.com/pcsforeducation/sqs-mock-python. They have been patched
slightly.
"""
from __future__ import absolute_import
from kombu import five
from kombu import messaging
from kombu import Connection, Exchange, Queue
from kombu.tests.case import Case, case_requires
from kombu.transport import SQS
class SQSQueueMock(object):
def __init__(self, name):
self.name = name
self.messages = []
self._get_message_calls = 0
def clear(self, page_size=10, vtimeout=10):
empty, self.messages[:] = not self.messages, []
return not empty
def count(self, page_size=10, vtimeout=10):
return len(self.messages)
count_slow = count
def delete(self):
self.messages[:] = []
return True
def delete_message(self, message):
try:
self.messages.remove(message)
except ValueError:
return False
return True
def get_messages(self, num_messages=1, visibility_timeout=None,
attributes=None, *args, **kwargs):
self._get_message_calls += 1
return self.messages[:num_messages]
def read(self, visibility_timeout=None):
return self.messages.pop(0)
def write(self, message):
self.messages.append(message)
return True
class SQSConnectionMock(object):
def __init__(self):
self.queues = {}
def get_queue(self, queue):
return self.queues.get(queue)
def get_all_queues(self, prefix=""):
return self.queues.values()
def delete_queue(self, queue, force_deletion=False):
q = self.get_queue(queue)
if q:
if q.count():
return False
q.clear()
self.queues.pop(queue, None)
def delete_message(self, queue, message):
return queue.delete_message(message)
def create_queue(self, name, *args, **kwargs):
q = self.queues[name] = SQSQueueMock(name)
return q
@case_requires('boto')
class test_Channel(Case):
def handleMessageCallback(self, message):
self.callback_message = message
def setup(self):
"""Mock the back-end SQS classes"""
# Sanity check... if SQS is None, then it did not import and we
# cannot execute our tests.
SQS.Channel._queue_cache.clear()
# Common variables used in the unit tests
self.queue_name = 'unittest'
# Mock the sqs() method that returns an SQSConnection object and
# instead return an SQSConnectionMock() object.
self.sqs_conn_mock = SQSConnectionMock()
def mock_sqs():
return self.sqs_conn_mock
SQS.Channel.sqs = mock_sqs()
# Set up a task exchange for passing tasks through the queue
self.exchange = Exchange('test_SQS', type='direct')
self.queue = Queue(self.queue_name, self.exchange, self.queue_name)
# Mock up a test SQS Queue with the SQSQueueMock class (and always
# make sure its a clean empty queue)
self.sqs_queue_mock = SQSQueueMock(self.queue_name)
# Now, create our Connection object with the SQS Transport and store
# the connection/channel objects as references for use in these tests.
self.connection = Connection(transport=SQS.Transport)
self.channel = self.connection.channel()
self.queue(self.channel).declare()
self.producer = messaging.Producer(self.channel,
self.exchange,
routing_key=self.queue_name)
# Lastly, make sure that we're set up to 'consume' this queue.
self.channel.basic_consume(self.queue_name,
no_ack=False,
callback=self.handleMessageCallback,
consumer_tag='unittest')
def test_init(self):
"""kombu.SQS.Channel instantiates correctly with mocked queues"""
self.assertIn(self.queue_name, self.channel._queue_cache)
def test_new_queue(self):
queue_name = 'new_unittest_queue'
self.channel._new_queue(queue_name)
self.assertIn(queue_name, self.sqs_conn_mock.queues)
# For cleanup purposes, delete the queue and the queue file
self.channel._delete(queue_name)
def test_delete(self):
queue_name = 'new_unittest_queue'
self.channel._new_queue(queue_name)
self.channel._delete(queue_name)
self.assertNotIn(queue_name, self.channel._queue_cache)
def test_get_from_sqs(self):
# Test getting a single message
message = 'my test message'
self.producer.publish(message)
q = self.channel._new_queue(self.queue_name)
results = q.get_messages()
self.assertEquals(len(results), 1)
# Now test getting many messages
for i in range(3):
message = 'message: {0}'.format(i)
self.producer.publish(message)
results = q.get_messages(num_messages=3)
self.assertEquals(len(results), 3)
def test_get_with_empty_list(self):
with self.assertRaises(five.Empty):
self.channel._get(self.queue_name)
def test_get_bulk_raises_empty(self):
with self.assertRaises(five.Empty):
self.channel._get_bulk(self.queue_name)
def test_messages_to_python(self):
message_count = 3
# Create several test messages and publish them
for i in range(message_count):
message = 'message: %s' % i
self.producer.publish(message)
q = self.channel._new_queue(self.queue_name)
# Get the messages now
messages = q.get_messages(num_messages=message_count)
# Now convert them to payloads
payloads = self.channel._messages_to_python(
messages, self.queue_name,
)
# We got the same number of payloads back, right?
self.assertEquals(len(payloads), message_count)
# Make sure they're payload-style objects
for p in payloads:
self.assertTrue('properties' in p)
def test_put_and_get(self):
message = 'my test message'
self.producer.publish(message)
results = self.queue(self.channel).get().payload
self.assertEquals(message, results)
def test_put_and_get_bulk(self):
# With QoS.prefetch_count = 0
message = 'my test message'
self.producer.publish(message)
results = self.channel._get_bulk(self.queue_name)
self.assertEquals(1, len(results))
def test_puts_and_get_bulk(self):
# Generate 8 messages
message_count = 8
# Set the prefetch_count to 5
self.channel.qos.prefetch_count = 5
# Now, generate all the messages
for i in range(message_count):
message = 'message: %s' % i
self.producer.publish(message)
# Count how many messages are retrieved the first time. Should
# be 5 (message_count).
results = self.channel._get_bulk(self.queue_name)
self.assertEquals(5, len(results))
for i, r in enumerate(results):
self.channel.qos.append(r, i)
# Now, do the get again, the number of messages returned should be 1.
results = self.channel._get_bulk(self.queue_name)
self.assertEquals(len(results), 1)
def test_drain_events_with_empty_list(self):
def mock_can_consume():
return False
self.channel.qos.can_consume = mock_can_consume
with self.assertRaises(five.Empty):
self.channel.drain_events()
def test_drain_events_with_prefetch_5(self):
# Generate 20 messages
message_count = 20
expected_get_message_count = 4
# Set the prefetch_count to 5
self.channel.qos.prefetch_count = 5
# Now, generate all the messages
for i in range(message_count):
self.producer.publish('message: %s' % i)
# Now drain all the events
for i in range(message_count):
self.channel.drain_events()
# How many times was the SQSConnectionMock get_message method called?
self.assertEquals(
expected_get_message_count,
self.channel._queue_cache[self.queue_name]._get_message_calls)
def test_drain_events_with_prefetch_none(self):
# Generate 20 messages
message_count = 20
expected_get_message_count = 2
# Set the prefetch_count to None
self.channel.qos.prefetch_count = None
# Now, generate all the messages
for i in range(message_count):
self.producer.publish('message: %s' % i)
# Now drain all the events
for i in range(message_count):
self.channel.drain_events()
# How many times was the SQSConnectionMock get_message method called?
self.assertEquals(
expected_get_message_count,
self.channel._queue_cache[self.queue_name]._get_message_calls)
| rhcarvalho/kombu | kombu/tests/transport/test_SQS.py | Python | bsd-3-clause | 9,195 |
import click
cluster_name_option = click.option("-n", "--cluster-name", required=True, help="Name of the cluster")
| sigopt/sigopt-python | sigopt/cli/arguments/cluster_name.py | Python | mit | 117 |
from cbc.beat import *
class BoundaryStimulus(Expression):
def eval(self, values, x):
t = self.t
if x[0] == 0.0 and x[1] == 0.0 and t > 0.01 and t < 0.1:
values[0] = 10.0
else:
values[0] = 0.0
class FitzHughNagumo(CardiacCellModel):
def __init__(self, epsilon, gamma, alpha):
self.epsilon = epsilon
self.gamma = gamma
self.alpha = alpha
def F(self, v, s):
return self.epsilon*(v - self.gamma*s)
def I(self, v, s):
return v*(v - self.alpha)*(1 - v) - s
class MyFirstHeart(Heart):
def mesh(self):
m = Mesh("heart.xml.gz")
return refine(m)
def end_time(self):
return 1.0
def boundary_current(self):
return BoundaryStimulus()
def conductivities(self):
g = Expression("0.1/(sqrt(pow(x[0] - 0.15, 2) + pow(x[1], 2)) + 0.1)")
M_i = lambda v: g*v
M_ie = lambda v: 1.5*g*v
return (M_i, M_ie)
# Define cell model
cell = FitzHughNagumo(epsilon=0.01, gamma=0.5, alpha=0.1)
# Define heart
heart = MyFirstHeart(cell)
# Define solver (with time-step)
simulate = FullyImplicit(dt=0.01)
# Simulate heart
simulate(heart)
| Juanlu001/CBC.Solve | demo/beat/myfirstheart.py | Python | gpl-3.0 | 1,201 |
# Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import base
class OVSBridgeTestCase(base.BaseOVSLinuxTestCase):
# TODO(twilson) So far, only ovsdb-related tests are written. It would be
# good to also add the openflow-related functions
def setUp(self):
super(OVSBridgeTestCase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
def create_ovs_port(self, *interface_attrs):
# Convert ((a, b), (c, d)) to {a: b, c: d} and add 'type' by default
attrs = collections.OrderedDict(interface_attrs)
attrs.setdefault('type', 'internal')
port_name = net_helpers.get_rand_port_name()
return (port_name, self.br.add_port(port_name, *attrs.items()))
def create_ovs_vif_port(self, iface_id=None, mac=None,
iface_field='iface-id'):
if iface_id is None:
iface_id = base.get_rand_name()
if mac is None:
mac = base.get_rand_name()
attrs = ('external_ids', {iface_field: iface_id, 'attached-mac': mac})
port_name, ofport = self.create_ovs_port(attrs)
return ovs_lib.VifPort(port_name, ofport, iface_id, mac, self.br)
def test_port_lifecycle(self):
(port_name, ofport) = self.create_ovs_port(('type', 'internal'))
# ofport should always be an integer string with value -1 or > 0.
self.assertTrue(int(ofport))
self.assertTrue(int(self.br.get_port_ofport(port_name)))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual(self.br.br_name,
self.br.get_bridge_for_iface(port_name))
self.br.delete_port(port_name)
self.assertFalse(self.br.port_exists(port_name))
def test_duplicate_port_may_exist_false(self):
port_name, ofport = self.create_ovs_port(('type', 'internal'))
cmd = self.br.ovsdb.add_port(self.br.br_name,
port_name, may_exist=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_delete_port_if_exists_false(self):
cmd = self.br.ovsdb.del_port('nonexistantport', if_exists=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_replace_port(self):
port_name = net_helpers.get_rand_port_name()
self.br.replace_port(port_name, ('type', 'internal'))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('internal',
self.br.db_get_val('Interface', port_name, 'type'))
self.br.replace_port(port_name, ('type', 'internal'),
('external_ids', {'test': 'test'}))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('test', self.br.db_get_val('Interface', port_name,
'external_ids')['test'])
def test_attribute_lifecycle(self):
(port_name, ofport) = self.create_ovs_port()
tag = 42
self.ovs.set_db_attribute('Port', port_name, 'tag', tag)
self.assertEqual(tag, self.ovs.db_get_val('Port', port_name, 'tag'))
self.assertEqual(tag, self.br.get_port_tag_dict()[port_name])
self.ovs.clear_db_attribute('Port', port_name, 'tag')
self.assertEqual(self.ovs.db_get_val('Port', port_name, 'tag'), [])
self.assertEqual(self.br.get_port_tag_dict()[port_name], [])
def test_get_bridge_external_bridge_id(self):
self.ovs.set_db_attribute('Bridge', self.br.br_name,
'external_ids',
{'bridge-id': self.br.br_name})
self.assertEqual(
self.br.br_name,
self.ovs.get_bridge_external_bridge_id(self.br.br_name))
def test_controller_lifecycle(self):
controllers = {'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:55'}
self.br.set_controller(controllers)
self.assertSetEqual(controllers, set(self.br.get_controller()))
self.br.del_controller()
self.assertEqual([], self.br.get_controller())
def test_set_fail_mode(self):
self.br.set_secure_mode()
self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE)
def _assert_br_fail_mode(self, fail_mode):
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'fail_mode'),
fail_mode)
def test_set_protocols(self):
self.br.set_protocols('OpenFlow10')
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'protocols'),
"OpenFlow10")
def test_get_datapath_id(self):
brdev = ip_lib.IPDevice(self.br.br_name)
dpid = brdev.link.attributes['link/ether'].replace(':', '')
self.br.set_db_attribute('Bridge',
self.br.br_name, 'datapath_id', dpid)
self.assertIn(dpid, self.br.get_datapath_id())
def test_add_tunnel_port(self):
attrs = {
'remote_ip': '192.0.2.1', # RFC 5737 TEST-NET-1
'local_ip': '198.51.100.1', # RFC 5737 TEST-NET-2
}
port_name = net_helpers.get_rand_port_name()
self.br.add_tunnel_port(port_name, attrs['remote_ip'],
attrs['local_ip'])
self.assertEqual(self.ovs.db_get_val('Interface', port_name, 'type'),
'gre')
options = self.ovs.db_get_val('Interface', port_name, 'options')
for attr, val in attrs.items():
self.assertEqual(val, options[attr])
def test_add_patch_port(self):
local = net_helpers.get_rand_port_name()
peer = 'remotepeer'
self.br.add_patch_port(local, peer)
self.assertEqual(self.ovs.db_get_val('Interface', local, 'type'),
'patch')
options = self.ovs.db_get_val('Interface', local, 'options')
self.assertEqual(peer, options['peer'])
def test_get_port_name_list(self):
# Note that ovs-vsctl's list-ports does not include the port created
# with the same name as the bridge
ports = {self.create_ovs_port()[0] for i in range(5)}
self.assertSetEqual(ports, set(self.br.get_port_name_list()))
def test_get_port_stats(self):
# Nothing seems to use this function?
(port_name, ofport) = self.create_ovs_port()
stats = set(self.br.get_port_stats(port_name).keys())
self.assertTrue(set(['rx_packets', 'tx_packets']).issubset(stats))
def test_get_vif_ports(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
ports = self.br.get_vif_ports()
self.assertEqual(3, len(ports))
self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
self.assertEqual(sorted([x.port_name for x in vif_ports]),
sorted([x.port_name for x in ports]))
def test_get_vif_port_set(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(2)]
ports = self.br.get_vif_port_set()
expected = set([x.vif_id for x in vif_ports])
self.assertEqual(expected, ports)
def test_get_port_tag_dict(self):
# Simple case tested in port test_set_get_clear_db_val
pass
def test_get_vif_port_by_id(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
for vif in vif_ports:
self.assertEqual(self.br.get_vif_port_by_id(vif.vif_id).vif_id,
vif.vif_id)
def test_delete_ports(self):
# TODO(twilson) I intensely dislike the current delete_ports function
# as the default behavior is really delete_vif_ports(), then it acts
# more like a delete_ports() seems like it should if all_ports=True is
# passed
# Create 2 non-vif ports and 2 vif ports
nonvifs = {self.create_ovs_port()[0] for i in range(2)}
vifs = {self.create_ovs_vif_port().port_name for i in range(2)}
self.assertSetEqual(nonvifs.union(vifs),
set(self.br.get_port_name_list()))
self.br.delete_ports()
self.assertSetEqual(nonvifs, set(self.br.get_port_name_list()))
self.br.delete_ports(all_ports=True)
self.assertEqual(len(self.br.get_port_name_list()), 0)
def test_reset_bridge(self):
self.create_ovs_port()
self.br.reset_bridge()
self.assertEqual(len(self.br.get_port_name_list()), 0)
self._assert_br_fail_mode([])
def test_reset_bridge_secure_mode(self):
self.br.reset_bridge(secure_mode=True)
self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE)
class OVSLibTestCase(base.BaseOVSLinuxTestCase):
def setUp(self):
super(OVSLibTestCase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
def test_bridge_lifecycle_baseovs(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
self.addCleanup(self.ovs.delete_bridge, name)
br = self.ovs.add_bridge(name)
self.assertEqual(br.br_name, name)
self.assertTrue(self.ovs.bridge_exists(name))
self.ovs.delete_bridge(name)
self.assertFalse(self.ovs.bridge_exists(name))
def test_get_bridges(self):
bridges = {
self.useFixture(net_helpers.OVSBridgeFixture()).bridge.br_name
for i in range(5)}
self.assertTrue(set(self.ovs.get_bridges()).issuperset(bridges))
def test_bridge_lifecycle_ovsbridge(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
br = ovs_lib.OVSBridge(name)
self.assertEqual(br.br_name, name)
# Make sure that instantiating an OVSBridge does not actually create
self.assertFalse(self.ovs.bridge_exists(name))
self.addCleanup(self.ovs.delete_bridge, name)
br.create()
self.assertTrue(self.ovs.bridge_exists(name))
br.destroy()
self.assertFalse(self.ovs.bridge_exists(name))
| waltBB/neutron_read | neutron/tests/functional/agent/test_ovs_lib.py | Python | apache-2.0 | 10,944 |
# -*- coding: utf-8 -*-
# © Václav Šmilauer <[email protected]>
#
# Test case for sphere-facet interaction.
#O.bodyContainer="BodyVector"
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()],nBins=5,sweepLength=5e-3),
#SpatialQuickSortCollider(),
InteractionLoop(
[Ig2_Facet_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()],
),
NewtonIntegrator(damping=0.01,gravity=[0,0,-10]),
]
O.bodies.append([
facet([[-1,-1,0],[1,-1,0],[0,1,0]],fixed=True,color=[1,0,0]),
facet([[1,-1,0],[0,1,0,],[1,.5,.5]],fixed=True)
])
#Gl1_Facet(normals=True)
import random,sys
def addRandomSphere():
return O.bodies.append(sphere([random.gauss(0,1),random.gauss(0,1),random.uniform(1,2)],random.uniform(.02,.05)))
O.bodies[len(O.bodies)-1].state.vel=[random.gauss(0,.1),random.gauss(0,.1),random.gauss(0,.1)]
for i in range(0,100): addRandomSphere()
O.dt=1e-4
#O.run()
O.saveTmp('init')
from yade import qt
qt.Controller()
qt.View()
if 1:
for i in range(0,1000):
O.run(50,True);
if random.choice([True,False]):
idOld=random.randint(2,len(O.bodies)-1)
O.bodies.erase(idOld)
print "-%d"%idOld,
else:
idNew=addRandomSphere()
print "+%d"%idNew,
sys.stdout.flush()
| ThomasSweijen/yadesolute2 | examples/test/remove-body.py | Python | gpl-2.0 | 1,268 |
''' Copyright 2015 Neokami GmbH. '''
import requests
class NeokamiHttpClient():
def get(self, route, payload):
r = requests.get(route, params=payload)
return r
def post(self, route, api_key, payload):
headers = {'apikey': api_key }
r = requests.post(route, data=payload, headers=headers)
return r
def postBinary(self, route, bytestream, api_key, params={}):
'''
:param route:
:param bytestream:
:param api_key:
:param params:
:return:
'''
headers = {'apikey': api_key }
files = {'data':bytestream}
r = requests.post(
url=route,
data=params,
files=files,
headers=headers)
return r
| NeokamiCode/Python-SDK | src/Neokami/HttpClients/NeokamiCurl.py | Python | apache-2.0 | 812 |
#!/usr/bin/env python
from __future__ import print_function
import logging
from utils import argparser_factory, db_factory, plot_gender_counts_pf, get_gender_counts_for_year
log = logging.getLogger()
p = argparser_factory()
p.add_argument('-w', '--window', default=5, type=int)
args = p.parse_args()
db = db_factory(args.db)
c = db.cursor()
counts = get_gender_counts_for_year(c)
plot_gender_counts_pf(counts, 'gender-counts',
window=args.window,
height=args.height, width=args.width,
font_size=args.font_size)
| wejradford/castminer | stats/plot_all_gender_roles.py | Python | mit | 583 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-20 23:19
from __future__ import unicode_literals
import cms.common.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0004_category_image_size'),
]
operations = [
migrations.RemoveField(
model_name='categoriesmodule',
name='categories_names_headers_level',
),
migrations.RemoveField(
model_name='categoriesmodule',
name='show_categories_descriptions',
),
migrations.RemoveField(
model_name='categoriesmodule',
name='show_categories_names',
),
migrations.AddField(
model_name='categoriesmodule',
name='images_size',
field=cms.common.fields.FilebrowserVersionField(choices=[('1col', '1 column (65x37)'), ('2cols', '2 columns (160x90)'), ('3cols', '3 columns (255x143)'), ('4cols', '4 columns (350x197)'), ('5cols', '5 columns (445x250)'), ('6cols', '6 columns (540x304)'), ('7cols', '7 columns (635x357)'), ('8cols', '8 columns (730x411)'), ('9cols', '9 columns (825x464)'), ('10cols', '10 columns (920x518)'), ('11cols', '11 columns (1015x571)'), ('12cols', '12 columns (1110x624)'), ('fullhd', 'Full HD (1920x1080)')], default='3cols', max_length=255, verbose_name='images size'),
),
migrations.AddField(
model_name='categoriesmodule',
name='names_headers_level',
field=models.CharField(choices=[('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6')], default='3', max_length=1, verbose_name='names headers level'),
),
migrations.AddField(
model_name='categoriesmodule',
name='show_descriptions',
field=models.BooleanField(default=True, verbose_name='show descriptions'),
),
migrations.AddField(
model_name='categoriesmodule',
name='show_images',
field=models.BooleanField(default=True, verbose_name='show images'),
),
migrations.AddField(
model_name='categoriesmodule',
name='show_names',
field=models.BooleanField(default=True, verbose_name='show names'),
),
]
| HurtowniaPixeli/pixelcms-server | cms/content/migrations/0005_auto_20161121_0019.py | Python | mit | 2,319 |
from django.test.utils import override_settings
from mock import patch
from course_groups.models import CourseUserGroup
from xmodule.modulestore.tests.django_utils import TEST_DATA_MOCK_MODULESTORE
from django_comment_common.models import Role
from django_comment_common.utils import seed_permissions_roles
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class CohortedContentTestCase(ModuleStoreTestCase):
"""
Sets up a course with a student, a moderator and their cohorts.
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CohortedContentTestCase, self).setUp()
self.course = CourseFactory.create(
discussion_topics={
"cohorted topic": {"id": "cohorted_topic"},
"non-cohorted topic": {"id": "non_cohorted_topic"},
},
cohort_config={
"cohorted": True,
"cohorted_discussions": ["cohorted_topic"]
}
)
self.student_cohort = CourseUserGroup.objects.create(
name="student_cohort",
course_id=self.course.id,
group_type=CourseUserGroup.COHORT
)
self.moderator_cohort = CourseUserGroup.objects.create(
name="moderator_cohort",
course_id=self.course.id,
group_type=CourseUserGroup.COHORT
)
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
self.moderator = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
self.student_cohort.users.add(self.student)
self.moderator_cohort.users.add(self.moderator)
| peterm-itr/edx-platform | lms/djangoapps/django_comment_client/tests/utils.py | Python | agpl-3.0 | 2,119 |
import idaapi
import idautils
def get_all_strings():
"""
Get a list of strings stored in that segment
"""
list_of_strings = idautils.Strings()
for string in list_of_strings:
if not str(string).endswith("\n"):
f.write(str(string)+'\n')
else:
f.write(str(string))
#Wait for analysis to complete
idaapi.autoWait()
#Write results to output file
output_dir="C:\data\output\\"
output_filename=str(idc.GetInputFile())+'.txt'
with open(output_dir+output_filename,'a') as f:
get_all_strings()
#Exit program
idc.Exit(0)
| arvinddoraiswamy/slid | archived/get_all_strings.py | Python | mit | 603 |
# -*- coding: UTF-8 -*-
from django import forms
from .models import *
class LoginForm(forms.Form):
User = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'User', 'class' : 'form-control'}))
Password = forms.URLField(widget=forms.TextInput(attrs={'placeholder': 'Password', 'class' : 'form-control', 'type' : 'password'}))
class RegForm(forms.ModelForm):
class Meta:
model = Usuario
widgets = {
'User' : forms.TextInput(attrs={'placeholder': 'NickName', 'name': 'User', 'class' : 'form-control'}),
'Nombre' : forms.TextInput(attrs={'placeholder': 'Nombre', 'class' : 'form-control'}),
'Apellidos' : forms.TextInput(attrs={'placeholder': 'Apellidos', 'class' : 'form-control'}),
'Email' : forms.TextInput(attrs={'placeholder': 'Email', 'class' : 'form-control'}),
'Password': forms.PasswordInput(attrs={'placeholder': 'Password', 'class' : 'form-control'}),
'Visa' : forms.TextInput(attrs={'placeholder': 'VISA', 'class' : 'form-control'}),
'Fecha' : forms.SelectDateWidget(years=range(1950, 2016), attrs={'class' : 'form-control'}),
'Direccion' : forms.TextInput(attrs={'placeholder': 'Dirección', 'class' : 'form-control'}),
}
fields = ('User', 'Nombre', 'Apellidos', 'Email', 'Password', 'Visa', 'Fecha', 'Direccion', 'I_perfil', 'Aceptacion')
class VideoForm(forms.ModelForm):
class Meta:
model = Video
widgets = {
'titulo' : forms.TextInput(attrs={'placeholder': 'Título', 'class' : 'form-control'}),
'descripcion' : forms.Textarea(attrs={'placeholder': 'Introduce alguna descripción', 'class' : 'form-control'}),
}
fields = ('video', 'titulo', 'descripcion') | araluce/NextMedia | NextMedia/forms.py | Python | gpl-3.0 | 1,632 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Plot capacity chart."""
import collections
import json
import os
import matplotlib.pyplot as plt
import numpy as np
CWD = os.path.dirname(os.path.realpath(__file__))
CHART_DIR = os.path.join(CWD, "charts")
def chart():
if not os.path.exists(CHART_DIR):
os.makedirs(CHART_DIR)
# Capacity vs num objects for 100 unique features
#
# Generated with:
# python convergence_simulation.py --numObjects 200 400 600 800 1000 1200 1400 1600 1800 2000 2200 2400 2600 --numUniqueFeatures 100 --locationModuleWidth 5 --resultName results/capacity_100_feats_25_cpm.json
# python convergence_simulation.py --numObjects 200 400 600 800 1000 1200 1400 1600 1800 2000 2200 2400 2600 --numUniqueFeatures 100 --locationModuleWidth 10 --resultName results/capacity_100_feats_100_cpm.json
# python convergence_simulation.py --numObjects 200 400 600 800 1000 1200 1400 1600 1800 2000 2200 2400 2600 --numUniqueFeatures 100 --locationModuleWidth 20 --resultName results/capacity_100_feats_400_cpm.json
#plt.style.use("ggplot")
markers = ("s", "o", "^")
for cpm, marker in zip((100, 196, 400), markers):
with open("results/capacity_1_500_feats_{}_cpm.json".format(str(cpm)), "r") as f:
experiments = json.load(f)
expResults = []
for exp in experiments:
numObjects = exp[0]["numObjects"]
#if numObjects > 2601:
# continue
failed = exp[1]["convergence"].get("null", 0)
expResults.append((
numObjects,
1.0 - (float(failed) / float(numObjects))
))
x = []
y = []
for i, j in sorted(expResults):
x.append(i)
y.append(j)
plt.plot(
x, y, "{}-".format(marker), label="{} Cells Per Module".format(str(cpm)),
)
plt.xlabel("Number of Objects")
plt.ylabel("Accuracy")
plt.legend(loc="center left")
plt.tight_layout()
plt.savefig(os.path.join(CHART_DIR, "capacity100.pdf"))
plt.clf()
# Capacity vs num objects with different # of unique features
#
# Generated with:
# python convergence_simulation.py --numObjects 500 1000 1500 2000 2500 3000 3500 4000 4500 5000 --numUniqueFeatures 50 --locationModuleWidth 20 --thresholds 18 --resultName results/capacity_50_feats_400_cpm.json
# python convergence_simulation.py --numObjects 500 1000 1500 2000 2500 3000 3500 4000 4500 5000 --numUniqueFeatures 100 --locationModuleWidth 20 --thresholds 18 --resultName results/capacity_100_feats_400_cpm.json
# python convergence_simulation.py --numObjects 500 1000 1500 2000 2500 3000 3500 4000 4500 5000 --numUniqueFeatures 500 --locationModuleWidth 20 --thresholds 18 --resultName results/capacity_500_feats_400_cpm.json
#plt.style.use("ggplot")
for feats, marker in zip((100, 200, 500), markers):
with open("results/capacity_{}_feats_400_cpm.json".format(str(feats)), "r") as f:
experiments = json.load(f)
expResults = []
for exp in experiments:
numObjects = exp[0]["numObjects"]
failed = exp[1]["convergence"].get("null", 0)
expResults.append((
numObjects,
1.0 - (float(failed) / float(numObjects))
))
x = []
y = []
for i, j in sorted(expResults):
x.append(i)
y.append(j)
plt.plot(
x, y, "{}-".format(marker), label="{} Unique Features".format(str(feats)),
)
plt.xlabel("Number of Objects")
plt.ylabel("Accuracy")
plt.legend(loc="center left")
plt.tight_layout()
plt.savefig(os.path.join(CHART_DIR, "capacity_with_features.pdf"))
plt.clf()
## Capacity vs num objects for 5000 unique features
#plt.style.use("ggplot")
#plt.plot(
# X5k, Y5k25, "-", label="25 cells per module",
#)
#plt.plot(
# X5k, Y5k100, "-", label="100 cells per module",
#)
#plt.plot(
# X5k, Y5k400, "-", label="400 cells per module",
#)
#plt.xlabel("Number of Objects")
#plt.ylabel("Accuracy")
#plt.legend(loc="lower left")
#plt.ylim(-0.01, 1.01)
#plt.savefig(os.path.join(CHART_DIR, "capacity5000.pdf"))
#plt.clf()
if __name__ == "__main__":
chart()
| neuroidss/nupic.research | projects/union_path_integration/plot_capacity.py | Python | agpl-3.0 | 5,008 |
# Copyright (C) 2013,2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013,2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from nose.tools import eq_
from nose.tools import ok_
from ryu.lib.packet import bgp
from ryu.lib.packet import afi
from ryu.lib.packet import safi
class Test_bgp(unittest.TestCase):
""" Test case for ryu.lib.packet.bgp
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_open1(self):
msg = bgp.BGPOpen(my_as=30000, bgp_identifier='192.0.2.1')
binmsg = msg.serialize()
msg2, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
eq_(len(msg), 29)
eq_(rest, '')
def test_open2(self):
opt_param = [bgp.BGPOptParamCapabilityUnknown(cap_code=200,
cap_value='hoge'),
bgp.BGPOptParamCapabilityGracefulRestart(flags=0,
time=120,
tuples=[]),
bgp.BGPOptParamCapabilityRouteRefresh(),
bgp.BGPOptParamCapabilityCiscoRouteRefresh(),
bgp.BGPOptParamCapabilityMultiprotocol(
afi=afi.IP, safi=safi.MPLS_VPN),
bgp.BGPOptParamCapabilityCarryingLabelInfo(),
bgp.BGPOptParamCapabilityFourOctetAsNumber(
as_number=1234567),
bgp.BGPOptParamUnknown(type_=99, value='fuga')]
msg = bgp.BGPOpen(my_as=30000, bgp_identifier='192.0.2.2',
opt_param=opt_param)
binmsg = msg.serialize()
msg2, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
ok_(len(msg) > 29)
eq_(rest, '')
def test_update1(self):
msg = bgp.BGPUpdate()
binmsg = msg.serialize()
msg2, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
eq_(len(msg), 23)
eq_(rest, '')
def test_update2(self):
withdrawn_routes = [bgp.BGPWithdrawnRoute(length=0,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=1,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=3,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=7,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=32,
addr='192.0.2.13')]
mp_nlri = [
bgp.LabelledVPNIPAddrPrefix(24, '192.0.9.0',
route_dist='100:100',
labels=[1, 2, 3]),
bgp.LabelledVPNIPAddrPrefix(26, '192.0.10.192',
route_dist='10.0.0.1:10000',
labels=[5, 6, 7, 8]),
]
mp_nlri2 = [
bgp.LabelledIPAddrPrefix(24, '192.168.0.0', labels=[1, 2, 3])
]
communities = [
bgp.BGP_COMMUNITY_NO_EXPORT,
bgp.BGP_COMMUNITY_NO_ADVERTISE,
]
ecommunities = [
bgp.BGPTwoOctetAsSpecificExtendedCommunity(
subtype=1, as_number=65500, local_administrator=3908876543),
bgp.BGPFourOctetAsSpecificExtendedCommunity(
subtype=2, as_number=10000000, local_administrator=59876),
bgp.BGPIPv4AddressSpecificExtendedCommunity(
subtype=3, ipv4_address='192.0.2.1',
local_administrator=65432),
bgp.BGPOpaqueExtendedCommunity(opaque='abcdefg'),
bgp.BGPUnknownExtendedCommunity(type_=99, value='abcdefg'),
]
path_attributes = [
bgp.BGPPathAttributeOrigin(value=1),
bgp.BGPPathAttributeAsPath(value=[[1000], set([1001, 1002]),
[1003, 1004]]),
bgp.BGPPathAttributeNextHop(value='192.0.2.199'),
bgp.BGPPathAttributeMultiExitDisc(value=2000000000),
bgp.BGPPathAttributeLocalPref(value=1000000000),
bgp.BGPPathAttributeAtomicAggregate(),
bgp.BGPPathAttributeAggregator(as_number=40000,
addr='192.0.2.99'),
bgp.BGPPathAttributeCommunities(communities=communities),
bgp.BGPPathAttributeOriginatorId(value='10.1.1.1'),
bgp.BGPPathAttributeClusterList(value=['1.1.1.1', '2.2.2.2']),
bgp.BGPPathAttributeExtendedCommunities(communities=ecommunities),
bgp.BGPPathAttributeAs4Path(value=[[1000000], set([1000001, 1002]),
[1003, 1000004]]),
bgp.BGPPathAttributeAs4Aggregator(as_number=100040000,
addr='192.0.2.99'),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP, safi=safi.MPLS_VPN,
next_hop='1.1.1.1',
nlri=mp_nlri),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP, safi=safi.MPLS_LABEL,
next_hop='1.1.1.1',
nlri=mp_nlri2),
bgp.BGPPathAttributeMpUnreachNLRI(afi=afi.IP, safi=safi.MPLS_VPN,
withdrawn_routes=mp_nlri),
bgp.BGPPathAttributeUnknown(flags=0, type_=100, value=300 * 'bar')
]
nlri = [
bgp.BGPNLRI(length=24, addr='203.0.113.1'),
bgp.BGPNLRI(length=16, addr='203.0.113.0')
]
msg = bgp.BGPUpdate(withdrawn_routes=withdrawn_routes,
path_attributes=path_attributes,
nlri=nlri)
binmsg = msg.serialize()
msg2, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
ok_(len(msg) > 23)
eq_(rest, '')
def test_keepalive(self):
msg = bgp.BGPKeepAlive()
binmsg = msg.serialize()
msg2, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
eq_(len(msg), 19)
eq_(rest, '')
def test_notification(self):
data = "hoge"
msg = bgp.BGPNotification(error_code=1, error_subcode=2, data=data)
binmsg = msg.serialize()
msg2, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
eq_(len(msg), 21 + len(data))
eq_(rest, '')
def test_route_refresh(self):
msg = bgp.BGPRouteRefresh(afi=afi.IP, safi=safi.MPLS_VPN)
binmsg = msg.serialize()
msg2, rest = bgp.BGPMessage.parser(binmsg)
eq_(str(msg), str(msg2))
eq_(len(msg), 23)
eq_(rest, '')
def test_stream_parser(self):
msgs = [
bgp.BGPNotification(error_code=1, error_subcode=2, data="foo"),
bgp.BGPNotification(error_code=3, error_subcode=4, data="bar"),
bgp.BGPNotification(error_code=5, error_subcode=6, data="baz"),
]
binmsgs = ''.join([bytes(msg.serialize()) for msg in msgs])
sp = bgp.StreamParser()
results = []
for b in binmsgs:
for m in sp.parse(b):
results.append(m)
eq_(str(results), str(msgs))
def test_parser(self):
files = [
'bgp4-open',
# commented out because
# 1. we don't support 32 bit AS numbers in AS_PATH
# 2. quagga always uses EXTENDED for AS_PATH
# 'bgp4-update',
'bgp4-keepalive',
]
dir = '../packet_data/bgp4/'
for f in files:
print('testing ' + f)
binmsg = open(dir + f).read()
msg, rest = bgp.BGPMessage.parser(binmsg)
binmsg2 = msg.serialize()
eq_(binmsg, binmsg2)
eq_(rest, '')
def test_json1(self):
opt_param = [bgp.BGPOptParamCapabilityUnknown(cap_code=200,
cap_value='hoge'),
bgp.BGPOptParamCapabilityRouteRefresh(),
bgp.BGPOptParamCapabilityMultiprotocol(
afi=afi.IP, safi=safi.MPLS_VPN),
bgp.BGPOptParamCapabilityFourOctetAsNumber(
as_number=1234567),
bgp.BGPOptParamUnknown(type_=99, value='fuga')]
msg1 = bgp.BGPOpen(my_as=30000, bgp_identifier='192.0.2.2',
opt_param=opt_param)
jsondict = msg1.to_jsondict()
msg2 = bgp.BGPOpen.from_jsondict(jsondict['BGPOpen'])
eq_(str(msg1), str(msg2))
def test_json2(self):
withdrawn_routes = [bgp.BGPWithdrawnRoute(length=0,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=1,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=3,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=7,
addr='192.0.2.13'),
bgp.BGPWithdrawnRoute(length=32,
addr='192.0.2.13')]
mp_nlri = [
bgp.LabelledVPNIPAddrPrefix(24, '192.0.9.0',
route_dist='100:100',
labels=[1, 2, 3]),
bgp.LabelledVPNIPAddrPrefix(26, '192.0.10.192',
route_dist='10.0.0.1:10000',
labels=[5, 6, 7, 8]),
]
communities = [
bgp.BGP_COMMUNITY_NO_EXPORT,
bgp.BGP_COMMUNITY_NO_ADVERTISE,
]
ecommunities = [
bgp.BGPTwoOctetAsSpecificExtendedCommunity(
subtype=1, as_number=65500, local_administrator=3908876543),
bgp.BGPFourOctetAsSpecificExtendedCommunity(
subtype=2, as_number=10000000, local_administrator=59876),
bgp.BGPIPv4AddressSpecificExtendedCommunity(
subtype=3, ipv4_address='192.0.2.1',
local_administrator=65432),
bgp.BGPOpaqueExtendedCommunity(opaque='abcdefg'),
bgp.BGPUnknownExtendedCommunity(type_=99, value='abcdefg'),
]
path_attributes = [
bgp.BGPPathAttributeOrigin(value=1),
bgp.BGPPathAttributeAsPath(value=[[1000], set([1001, 1002]),
[1003, 1004]]),
bgp.BGPPathAttributeNextHop(value='192.0.2.199'),
bgp.BGPPathAttributeMultiExitDisc(value=2000000000),
bgp.BGPPathAttributeLocalPref(value=1000000000),
bgp.BGPPathAttributeAtomicAggregate(),
bgp.BGPPathAttributeAggregator(as_number=40000,
addr='192.0.2.99'),
bgp.BGPPathAttributeCommunities(communities=communities),
bgp.BGPPathAttributeExtendedCommunities(communities=ecommunities),
bgp.BGPPathAttributeAs4Path(value=[[1000000], set([1000001, 1002]),
[1003, 1000004]]),
bgp.BGPPathAttributeAs4Aggregator(as_number=100040000,
addr='192.0.2.99'),
bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP, safi=safi.MPLS_VPN,
next_hop='1.1.1.1',
nlri=mp_nlri),
bgp.BGPPathAttributeMpUnreachNLRI(afi=afi.IP, safi=safi.MPLS_VPN,
withdrawn_routes=mp_nlri),
bgp.BGPPathAttributeUnknown(flags=0, type_=100, value=300 * 'bar')
]
nlri = [
bgp.BGPNLRI(length=24, addr='203.0.113.1'),
bgp.BGPNLRI(length=16, addr='203.0.113.0')
]
msg1 = bgp.BGPUpdate(withdrawn_routes=withdrawn_routes,
path_attributes=path_attributes,
nlri=nlri)
jsondict = msg1.to_jsondict()
msg2 = bgp.BGPUpdate.from_jsondict(jsondict['BGPUpdate'])
eq_(str(msg1), str(msg2))
| gopchandani/ryu | ryu/tests/unit/packet/test_bgp.py | Python | apache-2.0 | 13,171 |
import unittest
import random, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_rf, h2o_hosts, h2o_import as h2i
# RF train parameters
paramsTrainRF = {
'ntree' : 100,
'depth' : 300,
'bin_limit' : 20000,
'ignore' : None,
'stat_type' : 'ENTROPY',
'out_of_bag_error_estimate': 1,
'exclusive_split_limit': 0,
'timeoutSecs': 14800,
}
# RF test parameters
paramsTestRF = {
# scoring requires the response_variable. it defaults to last, so normally
# we don't need to specify. But put this here and (above if used)
# in case a dataset doesn't use last col
'response_variable': None,
'out_of_bag_error_estimate': 0,
'timeoutSecs': 14800,
}
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_iris(self):
# Train RF
trainParseResult = h2i.import_parse(bucket='smalldata', path='iris/iris2.csv', hex_key='train_iris2.hex', schema='put')
kwargs = paramsTrainRF.copy()
trainResult = h2o_rf.trainRF(trainParseResult, **kwargs)
scoreParseResult = h2i.import_parse(bucket='smalldata', path='iris/iris2.csv', hex_key='score_iris2.hex', schema='put')
kwargs = paramsTestRF.copy()
scoreResult = h2o_rf.scoreRF(scoreParseResult, trainResult, **kwargs)
print "\nTrain\n=========={0}".format(h2o_rf.pp_rf_result(trainResult))
print "\nScoring\n========={0}".format(h2o_rf.pp_rf_result(scoreResult))
if __name__ == '__main__':
h2o.unit_main()
| janezhango/BigDataMachineLearning | py/testdir_ec2/test_rf_iris.py | Python | apache-2.0 | 2,017 |
import logging
from django.utils.log import DEFAULT_LOGGING
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DEBUG", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"SECRET_KEY",
default="fOqtAorZrVqWYbuMPOcZnTzw2D5bKeHGpXUwCaNBnvFUmO1njCQZGz05x1BhDG0E",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
import socket
import os
if os.environ.get("USE_DOCKER") == "yes":
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1" for ip in ips]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| vitorfs/bootcamp | config/settings/local.py | Python | mit | 2,835 |
# coding=utf-8
"""Provenance Keys."""
from safe.definitions.layer_purposes import (
layer_purpose_exposure_summary,
layer_purpose_aggregate_hazard_impacted,
layer_purpose_aggregation_summary,
layer_purpose_analysis_impacted,
layer_purpose_exposure_summary_table,
layer_purpose_profiling
)
from safe.utilities.i18n import tr
provenance_action_checklist = {
'key': 'provenance_action_checklist',
'name': tr('Action Checklist'),
'provenance_key': 'action_checklist'
}
provenance_aggregation_keywords = {
'key': 'provenance_aggregation_keywords',
'name': tr('Aggregation Keywords'),
'provenance_key': 'aggregation_keywords'
}
provenance_aggregation_layer = {
'key': 'provenance_aggregation_layer',
'name': tr('Aggregation Layer'),
'provenance_key': 'aggregation_layer'
}
provenance_aggregation_layer_id = {
'key': 'provenance_aggregation_layer_id',
'name': tr('Aggregation Layer ID'),
'provenance_key': 'aggregation_layer_id'
}
provenance_analysis_extent = {
'key': 'provenance_analysis_extent',
'name': tr('Analysis Extent'),
'provenance_key': 'analysis_extent'
}
provenance_analysis_question = {
'key': 'provenance_analysis_question',
'name': tr('Analysis Question'),
'provenance_key': 'analysis_question'
}
provenance_data_store_uri = {
'key': 'provenance_data_store_uri',
'name': tr('Data Store URI'),
'provenance_key': 'data_store_uri'
}
provenance_duration = {
'key': 'provenance_duration',
'name': tr('Duration'),
'provenance_key': 'duration'
}
provenance_end_datetime = {
'key': 'provenance_end_datetime',
'name': tr('End Datetime'),
'provenance_key': 'end_datetime'
}
provenance_exposure_keywords = {
'key': 'provenance_exposure_keywords',
'name': tr('Exposure Keywords'),
'provenance_key': 'exposure_keywords'
}
provenance_exposure_layer = {
'key': 'provenance_exposure_layer',
'name': tr('Exposure Layer'),
'provenance_key': 'exposure_layer'
}
provenance_exposure_layer_id = {
'key': 'provenance_exposure_layer_id',
'name': tr('Exposure Layer Id'),
'provenance_key': 'exposure_layer_id'
}
provenance_gdal_version = {
'key': 'provenance_gdal_version',
'name': tr('GDAL Version'),
'provenance_key': 'gdal_version'
}
provenance_hazard_keywords = {
'key': 'provenance_hazard_keywords',
'name': tr('Hazard Keywords'),
'provenance_key': 'hazard_keywords'
}
provenance_hazard_layer = {
'key': 'provenance_hazard_layer',
'name': tr('Hazard Layer'),
'provenance_key': 'hazard_layer'
}
provenance_hazard_layer_id = {
'key': 'provenance_hazard_layer_id',
'name': tr('Hazard Layer ID'),
'provenance_key': 'hazard_layer_id'
}
provenance_host_name = {
'key': 'provenance_host_name',
'name': tr('Host Name'),
'provenance_key': 'host_name'
}
provenance_impact_function_name = {
'key': 'provenance_impact_function_name',
'name': tr('Impact Function Name'),
'provenance_key': 'impact_function_name'
}
provenance_impact_function_title = {
'key': 'provenance_impact_function_title',
'name': tr('Impact Function Title'),
'provenance_key': 'impact_function_title'
}
provenance_inasafe_version = {
'key': 'provenance_inasafe_version',
'name': tr('InaSAFE Version'),
'provenance_key': 'inasafe_version'
}
provenance_map_legend_title = {
'key': 'provenance_map_legend_title',
'name': tr('Map Legend Title'),
'provenance_key': 'map_legend_title'
}
provenance_map_title = {
'key': 'provenance_map_title',
'name': tr('Map Title'),
'provenance_key': 'map_title'
}
provenance_notes = {
'key': 'provenance_notes',
'name': tr('Notes'),
'provenance_key': 'notes'
}
provenance_os = {
'key': 'provenance_os',
'name': tr('OS'),
'provenance_key': 'os'
}
provenance_pyqt_version = {
'key': 'provenance_pyqt_version',
'name': tr('PyQT Version'),
'provenance_key': 'pyqt_version'
}
provenance_qgis_version = {
'key': 'provenance_qgis_version',
'name': tr('QGIS Version'),
'provenance_key': 'qgis_version'
}
provenance_qt_version = {
'key': 'provenance_qt_version',
'name': tr('QT Version'),
'provenance_key': 'qt_version'
}
provenance_requested_extent = {
'key': 'provenance_requested_extent',
'name': tr('Requested Extent'),
'provenance_key': 'requested_extent'
}
provenance_start_datetime = {
'key': 'provenance_start_datetime',
'name': tr('Start Datetime'),
'provenance_key': 'start_datetime'
}
provenance_user = {
'key': 'provenance_user',
'name': tr('User'),
'provenance_key': 'user'
}
# Output layer path
provenance_layer_exposure_summary = {
'key': 'provenance_layer_exposure_summary',
'name': layer_purpose_exposure_summary['name'],
'provenance_key': layer_purpose_exposure_summary['key']
}
provenance_layer_aggregate_hazard_impacted = {
'key': 'provenance_layer_aggregate_hazard_impacted',
'name': layer_purpose_aggregate_hazard_impacted['name'],
'provenance_key': layer_purpose_aggregate_hazard_impacted['key']
}
provenance_layer_aggregation_summary = {
'key': 'provenance_layer_aggregation_summary',
'name': layer_purpose_aggregation_summary['name'],
'provenance_key': layer_purpose_aggregation_summary['key']
}
provenance_layer_analysis_impacted = {
'key': 'provenance_layer_analysis_impacted',
'name': layer_purpose_analysis_impacted['name'],
'provenance_key': layer_purpose_analysis_impacted['key']
}
provenance_layer_exposure_summary_table = {
'key': 'provenance_layer_exposure_summary_table',
'name': layer_purpose_exposure_summary_table['name'],
'provenance_key': layer_purpose_exposure_summary_table['key']
}
provenance_layer_profiling = {
'key': 'provenance_layer_profiling',
'name': layer_purpose_profiling['name'],
'provenance_key': layer_purpose_profiling['key']
}
# Layers ID
provenance_layer_exposure_summary_id = {
'key': 'provenance_layer_exposure_summary_id',
'name': layer_purpose_exposure_summary['name'] + ' ID',
'provenance_key': layer_purpose_exposure_summary['key'] + '_id'
}
provenance_layer_aggregate_hazard_impacted_id = {
'key': 'provenance_layer_aggregate_hazard_impacted_id',
'name': layer_purpose_aggregate_hazard_impacted['name'] + ' ID',
'provenance_key': layer_purpose_aggregate_hazard_impacted['key'] + '_id'
}
provenance_layer_aggregation_summary_id = {
'key': 'provenance_layer_aggregation_summary_id',
'name': layer_purpose_aggregation_summary['name'] + ' ID',
'provenance_key': layer_purpose_aggregation_summary['key'] + '_id'
}
provenance_layer_analysis_impacted_id = {
'key': 'provenance_layer_analysis_impacted_id',
'name': layer_purpose_analysis_impacted['name'] + ' ID',
'provenance_key': layer_purpose_analysis_impacted['key'] + '_id'
}
provenance_layer_exposure_summary_table_id = {
'key': 'provenance_layer_exposure_summary_table_id',
'name': layer_purpose_exposure_summary_table['name'] + ' ID',
'provenance_key': layer_purpose_exposure_summary_table['key'] + '_id'
}
provenance_layer_profiling_id = {
'key': 'provenance_layer_profiling_id',
'name': layer_purpose_profiling['name'] + ' ID',
'provenance_key': layer_purpose_profiling['key'] + '_id'
}
provenance_list = [
provenance_action_checklist,
provenance_aggregation_keywords,
provenance_aggregation_layer,
provenance_aggregation_layer_id,
provenance_analysis_extent,
provenance_analysis_question,
provenance_data_store_uri,
provenance_duration,
provenance_end_datetime,
provenance_exposure_keywords,
provenance_exposure_layer,
provenance_exposure_layer_id,
provenance_gdal_version,
provenance_hazard_keywords,
provenance_hazard_layer,
provenance_hazard_layer_id,
provenance_host_name,
provenance_impact_function_name,
provenance_impact_function_title,
provenance_inasafe_version,
provenance_map_legend_title,
provenance_map_title,
provenance_notes,
provenance_os,
provenance_pyqt_version,
provenance_qgis_version,
provenance_qt_version,
provenance_requested_extent,
provenance_start_datetime,
provenance_user,
# Output layer path
provenance_layer_exposure_summary,
provenance_layer_aggregate_hazard_impacted,
provenance_layer_aggregation_summary,
provenance_layer_analysis_impacted,
provenance_layer_exposure_summary_table,
provenance_layer_profiling,
provenance_layer_exposure_summary_id,
provenance_layer_aggregate_hazard_impacted_id,
provenance_layer_aggregation_summary_id,
provenance_layer_analysis_impacted_id,
provenance_layer_exposure_summary_table_id,
provenance_layer_profiling_id,
]
| akbargumbira/inasafe | safe/definitions/provenance.py | Python | gpl-3.0 | 8,780 |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Install inference gym."""
import os
import sys
from setuptools import find_packages
from setuptools import setup
# To enable importing version.py directly, we add its path to sys.path.
version_path = os.path.join(
os.path.dirname(__file__), 'inference_gym')
sys.path.append(version_path)
from version import __version__ # pylint: disable=g-import-not-at-top
with open('README.md', 'r') as fh:
oryx_long_description = fh.read()
setup(
name='inference_gym',
python_requires='>=3.6',
version=__version__,
description='The Inference Gym is the place to exercise inference methods to help make them faster, leaner and more robust.',
long_description=oryx_long_description,
long_description_content_type='text/markdown',
author='Google LLC',
author_email='[email protected]',
url='https://github.com/tensorflow/probability/tree/main/spinoffs/inference_gym',
license='Apache 2.0',
packages=find_packages('.'),
# Add in any packaged data.
exclude_package_data={'': ['BUILD']},
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='tensorflow jax probability statistics bayesian machine learning',
)
| tensorflow/probability | spinoffs/inference_gym/setup.py | Python | apache-2.0 | 2,660 |
# Scrapy settings for dirbot project
SPIDER_MODULES = ['dirbot.spiders']
NEWSPIDER_MODULE = 'dirbot.spiders'
DEFAULT_ITEM_CLASS = 'dirbot.items.Website'
ITEM_PIPELINES = ['dirbot.pipelines.FilterWordsPipeline']
| 535521469/crawler_sth | dirbot/settings.py | Python | bsd-3-clause | 213 |
#Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Guess the Word: Count Matching Letters
#Problem level: 7 kyu
def count_correct_characters(correct, guess):
if len(correct) != len(guess): raise()
count = 0
for i in range(len(correct)):
if correct[i] == guess[i]: count+=1
return count
| Kunalpod/codewars | guess_the_word_count_matching_letters.py | Python | mit | 317 |
# Authors:
# Petr Viktorin <[email protected]>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib/plugins/ping.py` module, and XML-RPC in general.
"""
from ipalib import api, errors, _
from tests.util import assert_equal, Fuzzy
from xmlrpc_test import Declarative
class test_ping(Declarative):
tests = [
dict(
desc='Ping the server',
command=('ping', [], {}),
expected=dict(
summary=Fuzzy('IPA server version .*. API version .*')),
),
dict(
desc='Try to ping with an argument',
command=('ping', ['bad_arg'], {}),
expected=errors.ZeroArgumentError(name='ping'),
),
dict(
desc='Try to ping with an option',
command=('ping', [], dict(bad_arg=True)),
expected=errors.OptionError(_('Unknown option: %(option)s'),
option='bad_arg'),
),
]
| hatchetation/freeipa | tests/test_xmlrpc/test_ping_plugin.py | Python | gpl-3.0 | 1,632 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registro', '0028_auto_20170126_2106'),
]
operations = [
migrations.CreateModel(
name='CertificacionItem',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('created_at', models.DateTimeField(verbose_name='Fecha de creación', auto_now_add=True)),
('modified_at', models.DateTimeField(verbose_name='Fecha de modificación', auto_now=True)),
('descripcion', models.CharField(verbose_name='descripción', max_length=255)),
('monto', models.DecimalField(verbose_name='Monto ($)', max_digits=18, decimal_places=2)),
('adicional', models.BooleanField(verbose_name='adicional', default=False)),
('certificacion', models.ForeignKey(verbose_name='certificación', related_name='items', to='registro.Certificacion')),
],
options={
'verbose_name': 'ítem certificación',
'verbose_name_plural': 'ítemes de certificaciones',
},
),
]
| infoINGenieria/Zweb | z_web/registro/migrations/0029_certificacionitem.py | Python | gpl-2.0 | 1,293 |
import json
# Django Libraries
from django import template
from django.utils import six
# Register template tag library
register = template.Library()
@register.simple_tag
def py2js(obj):
"""
Convert a Python object to a JavaScript value to render on page.
"""
# Undefined
if obj == None:
return 'null'
# Boolean values
if isinstance(obj, bool):
return 'true' if obj else 'false'
# Integer values
if isinstance(obj, six.integer_types):
return obj
# String values
if isinstance(obj, six.string_types):
return '"{0}"'.format(obj)
# List / dictionary values
if isinstance(obj, (list, dict)):
return json.dumps(obj) | djtaylor/lense-portal | usr/lib/python2.7/dist-packages/lense/portal/ui/util/templatetags/common.py | Python | gpl-3.0 | 730 |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''
Provide auth0 / OpenID Connect protection for API endpoints.
accept_token will take an oauth2 access_token provided by auth0 and
user the userinfo endpoint to validate it. This is because the token
info endpoint used by the Flask-OIDC accept_token wrapper has some
issues with validating tokens for certain application types.
'''
from __future__ import absolute_import
import cli_common.log
import flask
import flask_oidc
import functools
import json
import requests
logger = cli_common.log.get_logger(__name__)
auth0 = flask_oidc.OpenIDConnect()
def mozilla_accept_token(render_errors=True):
'''
Use this to decorate view functions that should accept OAuth2 tokens,
this will most likely apply to API functions.
Tokens are accepted as part of
* the query URL (access_token value)
* a POST form value (access_token)
* the header Authorization: Bearer <token value>
:param render_errors: Whether or not to eagerly render error objects
as JSON API responses. Set to False to pass the error object back
unmodified for later rendering.
:type render_errors: bool
Side effects: flask.g gets the 'userinfo' attribute containing the data
from the response
.. versionadded:: 1.0
'''
def wrapper(view_func):
@functools.wraps(view_func)
def decorated(*args, **kwargs):
token = None
if flask.request.headers.get('Authorization', '').startswith('Bearer'):
token = flask.request.headers['Authorization'].split(maxsplit=1)[
1].strip()
if 'access_token' in flask.request.form:
token = flask.request.form['access_token']
elif 'access_token' in flask.request.args:
token = flask.request.args['access_token']
url = auth0.client_secrets.get(
'userinfo_uri', 'https://auth.mozilla.auth0.com/userinfo')
payload = {'access_token': token}
response = requests.get(url, params=payload)
# Because auth0 returns http 200 even if the token is invalid.
if response.content == b'Unauthorized':
response_body = {'error': 'invalid_token',
'error_description': str(response.content, 'utf-8')}
if render_errors:
response_body = json.dumps(response_body)
return response_body, 401, {'WWW-Authenticate': 'Bearer'}
# store response.content for later
flask.g.userinfo = json.loads(str(response.content, 'utf-8'))
# g.oidc_id_token = token # requires a specific format
flask.g.access_token = token
return view_func(*args, **kwargs)
return decorated
return wrapper
def init_app(app):
if app.config.get('SECRET_KEY') is None:
raise Exception('When using `auth0` extention you need to specify SECRET_KEY.')
auth0.init_app(app)
return auth0
| garbas/mozilla-releng-services | lib/backend_common/backend_common/auth0.py | Python | mpl-2.0 | 3,216 |
from smart_pointer_templatevariables import *
d = DiffImContainerPtr_D(create(1234, 5678))
if (d.id != 1234):
raise RuntimeError
# if (d.xyz != 5678):
# raise RuntimeError
d.id = 4321
#d.xyz = 8765
if (d.id != 4321):
raise RuntimeError
# if (d.xyz != 8765):
# raise RuntimeError
| DGA-MI-SSI/YaCo | deps/swig-3.0.7/Examples/test-suite/python/smart_pointer_templatevariables_runme.py | Python | gpl-3.0 | 293 |
# Natural Language Toolkit: Chunk parsing API
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Named entity chunker
"""
from __future__ import print_function
import os, re, pickle
from xml.etree import ElementTree as ET
from nltk.tag import ClassifierBasedTagger, pos_tag
try:
from nltk.classify import MaxentClassifier
except ImportError:
pass
from nltk.tree import Tree
from nltk.tokenize import word_tokenize
from nltk.data import find
from nltk.chunk.api import ChunkParserI
from nltk.chunk.util import ChunkScore
class NEChunkParserTagger(ClassifierBasedTagger):
"""
The IOB tagger used by the chunk parser.
"""
def __init__(self, train):
ClassifierBasedTagger.__init__(
self, train=train,
classifier_builder=self._classifier_builder)
def _classifier_builder(self, train):
return MaxentClassifier.train(train, algorithm='megam',
gaussian_prior_sigma=1,
trace=2)
def _english_wordlist(self):
try:
wl = self._en_wordlist
except AttributeError:
from nltk.corpus import words
self._en_wordlist = set(words.words('en-basic'))
wl = self._en_wordlist
return wl
def _feature_detector(self, tokens, index, history):
word = tokens[index][0]
pos = simplify_pos(tokens[index][1])
if index == 0:
prevword = prevprevword = None
prevpos = prevprevpos = None
prevshape = prevtag = prevprevtag = None
elif index == 1:
prevword = tokens[index-1][0].lower()
prevprevword = None
prevpos = simplify_pos(tokens[index-1][1])
prevprevpos = None
prevtag = history[index-1][0]
prevshape = prevprevtag = None
else:
prevword = tokens[index-1][0].lower()
prevprevword = tokens[index-2][0].lower()
prevpos = simplify_pos(tokens[index-1][1])
prevprevpos = simplify_pos(tokens[index-2][1])
prevtag = history[index-1]
prevprevtag = history[index-2]
prevshape = shape(prevword)
if index == len(tokens)-1:
nextword = nextnextword = None
nextpos = nextnextpos = None
elif index == len(tokens)-2:
nextword = tokens[index+1][0].lower()
nextpos = tokens[index+1][1].lower()
nextnextword = None
nextnextpos = None
else:
nextword = tokens[index+1][0].lower()
nextpos = tokens[index+1][1].lower()
nextnextword = tokens[index+2][0].lower()
nextnextpos = tokens[index+2][1].lower()
# 89.6
features = {
'bias': True,
'shape': shape(word),
'wordlen': len(word),
'prefix3': word[:3].lower(),
'suffix3': word[-3:].lower(),
'pos': pos,
'word': word,
'en-wordlist': (word in self._english_wordlist()),
'prevtag': prevtag,
'prevpos': prevpos,
'nextpos': nextpos,
'prevword': prevword,
'nextword': nextword,
'word+nextpos': '%s+%s' % (word.lower(), nextpos),
'pos+prevtag': '%s+%s' % (pos, prevtag),
'shape+prevtag': '%s+%s' % (prevshape, prevtag),
}
return features
class NEChunkParser(ChunkParserI):
"""
Expected input: list of pos-tagged words
"""
def __init__(self, train):
self._train(train)
def parse(self, tokens):
"""
Each token should be a pos-tagged word
"""
tagged = self._tagger.tag(tokens)
tree = self._tagged_to_parse(tagged)
return tree
def _train(self, corpus):
# Convert to tagged sequence
corpus = [self._parse_to_tagged(s) for s in corpus]
self._tagger = NEChunkParserTagger(train=corpus)
def _tagged_to_parse(self, tagged_tokens):
"""
Convert a list of tagged tokens to a chunk-parse tree.
"""
sent = Tree('S', [])
for (tok,tag) in tagged_tokens:
if tag == 'O':
sent.append(tok)
elif tag.startswith('B-'):
sent.append(Tree(tag[2:], [tok]))
elif tag.startswith('I-'):
if (sent and isinstance(sent[-1], Tree) and
sent[-1].label() == tag[2:]):
sent[-1].append(tok)
else:
sent.append(Tree(tag[2:], [tok]))
return sent
@staticmethod
def _parse_to_tagged(sent):
"""
Convert a chunk-parse tree to a list of tagged tokens.
"""
toks = []
for child in sent:
if isinstance(child, Tree):
if len(child) == 0:
print("Warning -- empty chunk in sentence")
continue
toks.append((child[0], 'B-%s' % child.label()))
for tok in child[1:]:
toks.append((tok, 'I-%s' % child.label()))
else:
toks.append((child, 'O'))
return toks
def shape(word):
if re.match('[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$', word, re.UNICODE):
return 'number'
elif re.match('\W+$', word, re.UNICODE):
return 'punct'
elif re.match('\w+$', word, re.UNICODE):
if word.istitle():
return 'upcase'
elif word.islower():
return 'downcase'
else:
return 'mixedcase'
else:
return 'other'
def simplify_pos(s):
if s.startswith('V'): return "V"
else: return s.split('-')[0]
def postag_tree(tree):
# Part-of-speech tagging.
words = tree.leaves()
tag_iter = (pos for (word, pos) in pos_tag(words))
newtree = Tree('S', [])
for child in tree:
if isinstance(child, Tree):
newtree.append(Tree(child.label(), []))
for subchild in child:
newtree[-1].append( (subchild, next(tag_iter)) )
else:
newtree.append( (child, next(tag_iter)) )
return newtree
def load_ace_data(roots, fmt='binary', skip_bnews=True):
for root in roots:
for root, dirs, files in os.walk(root):
if root.endswith('bnews') and skip_bnews:
continue
for f in files:
if f.endswith('.sgm'):
for sent in load_ace_file(os.path.join(root, f), fmt):
yield sent
def load_ace_file(textfile, fmt):
print(' - %s' % os.path.split(textfile)[1])
annfile = textfile+'.tmx.rdc.xml'
# Read the xml file, and get a list of entities
entities = []
with open(annfile, 'r') as infile:
xml = ET.parse(infile).getroot()
for entity in xml.findall('document/entity'):
typ = entity.find('entity_type').text
for mention in entity.findall('entity_mention'):
if mention.get('TYPE') != 'NAME': continue # only NEs
s = int(mention.find('head/charseq/start').text)
e = int(mention.find('head/charseq/end').text)+1
entities.append( (s, e, typ) )
# Read the text file, and mark the entities.
with open(textfile, 'r') as infile:
text = infile.read()
# Strip XML tags, since they don't count towards the indices
text = re.sub('<(?!/?TEXT)[^>]+>', '', text)
# Blank out anything before/after <TEXT>
def subfunc(m): return ' '*(m.end()-m.start()-6)
text = re.sub('[\s\S]*<TEXT>', subfunc, text)
text = re.sub('</TEXT>[\s\S]*', '', text)
# Simplify quotes
text = re.sub("``", ' "', text)
text = re.sub("''", '" ', text)
entity_types = set(typ for (s,e,typ) in entities)
# Binary distinction (NE or not NE)
if fmt == 'binary':
i = 0
toks = Tree('S', [])
for (s,e,typ) in sorted(entities):
if s < i: s = i # Overlapping! Deal with this better?
if e <= s: continue
toks.extend(word_tokenize(text[i:s]))
toks.append(Tree('NE', text[s:e].split()))
i = e
toks.extend(word_tokenize(text[i:]))
yield toks
# Multiclass distinction (NE type)
elif fmt == 'multiclass':
i = 0
toks = Tree('S', [])
for (s,e,typ) in sorted(entities):
if s < i: s = i # Overlapping! Deal with this better?
if e <= s: continue
toks.extend(word_tokenize(text[i:s]))
toks.append(Tree(typ, text[s:e].split()))
i = e
toks.extend(word_tokenize(text[i:]))
yield toks
else:
raise ValueError('bad fmt value')
# This probably belongs in a more general-purpose location (as does
# the parse_to_tagged function).
def cmp_chunks(correct, guessed):
correct = NEChunkParser._parse_to_tagged(correct)
guessed = NEChunkParser._parse_to_tagged(guessed)
ellipsis = False
for (w, ct), (w, gt) in zip(correct, guessed):
if ct == gt == 'O':
if not ellipsis:
print(" %-15s %-15s %s" % (ct, gt, w))
print(' %-15s %-15s %s' % ('...', '...', '...'))
ellipsis = True
else:
ellipsis = False
print(" %-15s %-15s %s" % (ct, gt, w))
def build_model(fmt='binary'):
print('Loading training data...')
train_paths = [find('corpora/ace_data/ace.dev'),
find('corpora/ace_data/ace.heldout'),
find('corpora/ace_data/bbn.dev'),
find('corpora/ace_data/muc.dev')]
train_trees = load_ace_data(train_paths, fmt)
train_data = [postag_tree(t) for t in train_trees]
print('Training...')
cp = NEChunkParser(train_data)
del train_data
print('Loading eval data...')
eval_paths = [find('corpora/ace_data/ace.eval')]
eval_trees = load_ace_data(eval_paths, fmt)
eval_data = [postag_tree(t) for t in eval_trees]
print('Evaluating...')
chunkscore = ChunkScore()
for i, correct in enumerate(eval_data):
guess = cp.parse(correct.leaves())
chunkscore.score(correct, guess)
if i < 3: cmp_chunks(correct, guess)
print(chunkscore)
outfilename = '/tmp/ne_chunker_%s.pickle' % fmt
print('Saving chunker to %s...' % outfilename)
with open(outfilename, 'wb') as outfile:
pickle.dump(cp, outfile, -1)
return cp
if __name__ == '__main__':
# Make sure that the pickled object has the right class name:
from nltk.chunk.named_entity import build_model
build_model('binary')
build_model('multiclass')
| adazey/Muzez | libs/nltk/chunk/named_entity.py | Python | gpl-3.0 | 11,160 |
# Gufw 12.10.0 - http://gufw.tuxfamily.org
# Copyright (C) 2008-2012 Marcos Alvarez Costales https://launchpad.net/~costales
#
# Giacomo Picchiarelli https://launchpad.net/~gpicchiarelli
#
# Gufw is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Gufw is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gufw; if not, see http://www.gnu.org/licenses for more
# information.
import commands
import time
from time import strftime
import os
import hashlib
class BackupRules:
rules_file = 'user.rules'
rules6_file = 'user6.rules'
def __init__(self):
self = self.backup()
def templateFile(self):
tempo = strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
file = "Gufw - Uncomplicated Firewall GUI Backup Rules file\n\n@@ DO NOT EDIT THIS FILE @@\n\nCreated: "+tempo+"\n"
return file
def backup(self):
#READ FILE AND EXTRACT RULES TEXT
input = open(self.getRulesPath(self.rules_file),'r')
content = input.read()
input.close()
sect1 = content.partition("### RULES ###")
sect2 = sect1[2].partition("### END RULES ###")
rules = sect2[0]
#READ FILE AND EXTRACT RULES6 TEXT
input = open(self.getRulesPath(self.rules6_file),'r')
content = input.read()
input.close()
sect1 = content.partition("### RULES ###")
sect2 = sect1[2].partition("### END RULES ###")
rules6 = sect2[0]
#COMPILE FILEBACKUP
comp = ''
comp += str(self.templateFile())
comp += '#RULES-SECTION'+str(rules)+'#END-RULES-SECTION'
comp +='\n'+'#RULES6-SECTION'+str(rules6)+'#END6-RULES-SECTION'
checkin = self.integrityCheck(comp)
comp += '\n --- INTEGRITY CHECK SHA512 --- \n' + checkin
#Save file
namesave = 'gufw_rules_'+strftime("%d-%m-%Y_%H.%M.%S", time.localtime())
fsave = open(self.getBackupPath(namesave),'w+b')
fsave.write(comp)
fsave.close()
def restore(self):
input = open(self.getBackupPath(namesave),'w+b')
def integrityCheck(self,p_file):
return hashlib.sha512(p_file).hexdigest()
#/lib/ufw/user.rules
def getRulesPath (self,rsfile):
"""Return Rules Path""" # /lib/ufw
path = os.path.join('/home', 'giacomo', rsfile)
if not os.path.exists(path):
print "Error: rules file not found"
return path
#/lib/ufw/user.rules
def getBackupPath (self,rsfile):
path = os.path.join('/home', 'giacomo', rsfile)
return path
obj = BackupRules()
| antiX-Linux/gufw-jessie | gui-ufw-12.10.0/gufw/view/BackupRules.py | Python | gpl-3.0 | 3,113 |
import contextlib
import os
import sys
import unittest
from pikos.runner import get_function, get_focused_on
def module_function():
pass
class DummyClass(object):
def method(self):
pass
class TestRunner(unittest.TestCase):
def test_get_module_level_function(self):
function = get_function('pikos.tests.test_runner.module_function')
self.assertEqual(function.func_code, module_function.func_code)
def test_get_class_level_function(self):
function = get_function(
'pikos.tests.test_runner.DummyClass.method')
self.assertEqual(function.func_code, DummyClass.method.func_code)
def test_focused_on_script_method(self):
filename = self._script_file()
with self._python_path(filename):
functions = get_focused_on(filename, 'module_function')
self.assertEqual(len(functions), 1)
function = functions[0]
self.assertEqual(function.func_code, module_function.func_code)
def test_get_focused_on_script_class_method(self):
filename = self._script_file()
with self._python_path(filename):
functions = get_focused_on(filename, 'DummyClass.method')
self.assertEqual(len(functions), 1)
function = functions[0]
self.assertEqual(function.func_code, DummyClass.method.func_code)
def test_get_focused_with_multiple_functions(self):
filename = self._script_file()
with self._python_path(filename):
functions = get_focused_on(
filename, 'module_function, DummyClass.method')
self.assertEqual(len(functions), 2)
self.assertEqual(
[functions[0].func_code, functions[1].func_code],
[module_function.func_code, DummyClass.method.func_code])
def _script_file(self):
module_file = os.path.splitext(__file__)[0]
return '.'.join((module_file, 'py'))
@contextlib.contextmanager
def _python_path(self, path):
self = os.path.dirname(path)
sys.path.insert(0, self)
try:
yield
finally:
sys.path.remove(self)
if __name__ == '__main__':
unittest.main()
| enthought/pikos | pikos/tests/test_runner.py | Python | bsd-3-clause | 2,188 |
from django import http
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django_statsd.clients import statsd
boomerang = {
'window.performance.navigation.redirectCount': 'nt_red_cnt',
'window.performance.navigation.type': 'nt_nav_type',
'window.performance.timing.connectEnd': 'nt_con_end',
'window.performance.timing.connectStart': 'nt_con_st',
'window.performance.timing.domComplete': 'nt_domcomp',
'window.performance.timing.domContentLoaded': 'nt_domcontloaded',
'window.performance.timing.domInteractive': 'nt_domint',
'window.performance.timing.domLoading': 'nt_domloading',
'window.performance.timing.domainLookupEnd': 'nt_dns_end',
'window.performance.timing.domainLookupStart': 'nt_dns_st',
'window.performance.timing.fetchStart': 'nt_fet_st',
'window.performance.timing.loadEventEnd': 'nt_load_end',
'window.performance.timing.loadEventStart': 'nt_load_st',
'window.performance.timing.navigationStart': 'nt_nav_st',
'window.performance.timing.redirectEnd': 'nt_red_end',
'window.performance.timing.redirectStart': 'nt_red_st',
'window.performance.timing.requestStart': 'nt_req_st',
'window.performance.timing.responseEnd': 'nt_res_end',
'window.performance.timing.responseStart': 'nt_res_st',
'window.performance.timing.unloadEventEnd': 'nt_unload_end',
'window.performance.timing.unloadEventStart': 'nt_unload_st'
}
types = {
'0': 'navigate',
'1': 'reload',
'2': 'back_forward',
'255': 'reserved'
}
# These are the default keys that we will try and record.
stick_keys = [
'window.performance.timing.domComplete',
'window.performance.timing.domInteractive',
'window.performance.timing.domLoading',
'window.performance.timing.loadEventEnd',
'window.performance.timing.responseStart',
'window.performance.navigation.redirectCount',
'window.performance.navigation.type',
]
def process_key(start, key, value):
if 'timing' in key:
# Some values will be zero. We want the output of that to
# be zero relative to start.
value = max(start, int(value)) - start
statsd.timing(key, value)
elif key == 'window.performance.navigation.type':
statsd.incr('%s.%s' % (key, types[value]))
elif key == 'window.performance.navigation.redirectCount':
statsd.incr(key, int(value))
def _process_summaries(start, keys):
calculated = {
'network': keys['window.performance.timing.responseStart'] - start,
'app': keys['window.performance.timing.domLoading'] -
keys['window.performance.timing.responseStart'],
'dom': keys['window.performance.timing.domComplete'] -
keys['window.performance.timing.domLoading'],
'rendering': keys['window.performance.timing.loadEventEnd'] -
keys['window.performance.timing.domComplete'],
}
for k, v in calculated.items():
# If loadEventEnd still does not get populated, we could end up with
# negative numbers here.
statsd.timing('window.performance.calculated.%s' % k, max(v, 0))
@require_http_methods(['GET', 'HEAD'])
def _process_boomerang(request):
if 'nt_nav_st' not in request.GET:
raise ValueError('nt_nav_st not in request.GET, make sure boomerang'
' is made with navigation API timings as per the following'
' http://yahoo.github.com/boomerang/doc/howtos/howto-9.html')
# This when the request started, everything else will be relative to this
# for the purposes of statsd measurement.
start = int(request.GET['nt_nav_st'])
keys = {}
for k in getattr(settings, 'STATSD_RECORD_KEYS', stick_keys):
v = request.GET.get(boomerang[k])
if not v or v == 'undefined':
continue
if k in boomerang:
process_key(start, k, v)
keys[k] = v
try:
_process_summaries(start, keys)
except KeyError:
pass
@require_http_methods(['POST'])
def _process_stick(request):
start = request.POST.get('window.performance.timing.navigationStart', None)
if not start:
return http.HttpResponseBadRequest()
start = int(start)
keys = {}
for k in getattr(settings, 'STATSD_RECORD_KEYS', stick_keys):
v = request.POST.get(k, None)
if v:
keys[k] = int(request.POST[k])
process_key(start, k, request.POST[k])
# Only process the network when we have these.
for key in ['window.performance.timing.loadEventEnd',
'window.performance.timing.responseStart']:
if key not in keys:
return
_process_summaries(start, keys)
clients = {
'boomerang': _process_boomerang,
'stick': _process_stick,
}
@csrf_exempt
def record(request):
"""
This is a Django method you can link to in your URLs that process
the incoming data. Be sure to add a client parameter into your request
so that we can figure out how to process this request. For example
if you are using boomerang, you'll need: client = boomerang.
You can define a method in STATSD_RECORD_GUARD that will do any lookup
you need for imposing security on this method, so that not just anyone
can post to it.
"""
if 'client' not in request.REQUEST:
return http.HttpResponseBadRequest()
client = request.REQUEST['client']
if client not in clients:
return http.HttpResponseBadRequest()
guard = getattr(settings, 'STATSD_RECORD_GUARD', None)
if guard:
if not callable(guard):
raise ValueError('STATSD_RECORD_GUARD must be callable')
result = guard(request)
if result:
return result
try:
response = clients[client](request)
except (ValueError, KeyError):
return http.HttpResponseBadRequest()
if response:
return response
return http.HttpResponse('recorded')
| AdrianGaudebert/socorro-crashstats | vendor-local/lib/python/django_statsd/views.py | Python | mpl-2.0 | 5,934 |
# -*- coding: utf-8 -*-
#
# Electrum documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 18 14:24:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Electrum'
copyright = u'2015, Thomas Voegtlin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.5'
# The full version, including alpha/beta/rc tags.
release = '2.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Electrumdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Electrum.tex', u'Electrum Documentation',
u'Thomas Voegtlin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'electrum', u'Electrum Documentation',
[u'Thomas Voegtlin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Electrum', u'Electrum Documentation',
u'Thomas Voegtlin', 'Electrum', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| joelstanner/electrum-docs | conf.py | Python | mit | 8,955 |
import asyncio
import sys
from urllib.parse import urlsplit
from aiohttp.web import Response, WebSocketResponse
import six
def create_route(app, engineio_server, engineio_endpoint):
"""This function sets up the engine.io endpoint as a route for the
application.
Note that both GET and POST requests must be hooked up on the engine.io
endpoint.
"""
app.router.add_get(engineio_endpoint, engineio_server.handle_request)
app.router.add_post(engineio_endpoint, engineio_server.handle_request)
app.router.add_route('OPTIONS', engineio_endpoint,
engineio_server.handle_request)
def translate_request(request):
"""This function takes the arguments passed to the request handler and
uses them to generate a WSGI compatible environ dictionary.
"""
message = request._message
payload = request._payload
uri_parts = urlsplit(message.path)
environ = {
'wsgi.input': payload,
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.async': True,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'SERVER_SOFTWARE': 'aiohttp',
'REQUEST_METHOD': message.method,
'QUERY_STRING': uri_parts.query or '',
'RAW_URI': message.path,
'SERVER_PROTOCOL': 'HTTP/%s.%s' % message.version,
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '0',
'SERVER_NAME': 'aiohttp',
'SERVER_PORT': '0',
'aiohttp.request': request
}
for hdr_name, hdr_value in message.headers.items():
hdr_name = hdr_name.upper()
if hdr_name == 'CONTENT-TYPE':
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == 'CONTENT-LENGTH':
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_%s' % hdr_name.replace('-', '_')
if key in environ:
hdr_value = '%s,%s' % (environ[key], hdr_value)
environ[key] = hdr_value
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
path_info = uri_parts.path
environ['PATH_INFO'] = path_info
environ['SCRIPT_NAME'] = ''
return environ
def make_response(status, headers, payload, environ):
"""This function generates an appropriate response object for this async
mode.
"""
return Response(body=payload, status=int(status.split()[0]),
headers=headers)
class WebSocket(object): # pragma: no cover
"""
This wrapper class provides a aiohttp WebSocket interface that is
somewhat compatible with eventlet's implementation.
"""
def __init__(self, handler):
self.handler = handler
self._sock = None
async def __call__(self, environ):
request = environ['aiohttp.request']
self._sock = WebSocketResponse()
await self._sock.prepare(request)
self.environ = environ
await self.handler(self)
return self._sock
async def close(self):
await self._sock.close()
async def send(self, message):
if isinstance(message, bytes):
f = self._sock.send_bytes
else:
f = self._sock.send_str
if asyncio.iscoroutinefunction(f):
await f(message)
else:
f(message)
async def wait(self):
msg = await self._sock.receive()
if not isinstance(msg.data, six.binary_type) and \
not isinstance(msg.data, six.text_type):
raise IOError()
return msg.data
_async = {
'asyncio': True,
'create_route': create_route,
'translate_request': translate_request,
'make_response': make_response,
'websocket': WebSocket,
}
| max00xam/service.maxxam.teamwatch | lib/engineio/async_drivers/aiohttp.py | Python | gpl-3.0 | 3,777 |
##############################################################################
#
# Swiss localization Direct Debit module for OpenERP
# Copyright (C) 2014 Compassion (http://www.compassion.ch)
# @author: Cyril Sester <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api, _, netsvc, exceptions
class invoice(models.Model):
''' Inherit invoice to add invoice freeing functionality. It's about
moving related payment line in a new cancelled payment order. This
way, the invoice (properly, invoice's move lines) can be used again
in another payment order.
'''
_inherit = 'account.invoice'
@api.multi
def cancel_payment_lines(self):
''' This function simply find related payment lines and move them
in a new payment order.
'''
mov_line_obj = self.env['account.move.line']
pay_line_obj = self.env['payment.line']
pay_order_obj = self.env['payment.order']
active_ids = self.env.context.get('active_ids')
move_ids = self.browse(active_ids).mapped('move_id.id')
move_line_ids = mov_line_obj.search([('move_id', 'in', move_ids)]).ids
pay_lines = pay_line_obj.search([('move_line_id',
'in', move_line_ids)])
if not pay_lines:
raise exceptions.Warning(_('No payment line found !'))
old_pay_order = pay_lines[0].order_id
vals = {
'date_created': old_pay_order.date_created,
'date_prefered': old_pay_order.date_prefered,
'payment_order_type': old_pay_order.payment_order_type,
'mode': old_pay_order.mode.id,
}
pay_order = pay_order_obj.create(vals)
wf_service = netsvc.LocalService('workflow')
wf_service.trg_validate(self.env.uid, 'payment.order',
pay_order.id, 'cancel', self.env.cr)
pay_lines.write({'order_id': pay_order.id})
return pay_order
class account_invoice_free(models.TransientModel):
''' Wizard to free invoices. When job is done, user is redirected on new
payment order.
'''
_name = 'account.invoice.free'
@api.multi
def invoice_free(self):
inv_obj = self.env['account.invoice']
order = inv_obj.cancel_payment_lines()
action = {
'name': 'Payment order',
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form, tree',
'res_model': 'payment.order',
'res_id': order.id,
'target': 'current',
}
return action
| ndtran/l10n-switzerland | l10n_ch_lsv_dd/model/invoice.py | Python | agpl-3.0 | 3,491 |
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.curdir, 'beta-issues')))
from flask import Flask, render_template, flash, request, url_for, redirect, session, abort, Markup
from werkzeug.utils import secure_filename
from functools import wraps
from wtforms import Form, TextField, PasswordField, BooleanField, validators
from passlib.hash import sha256_crypt
import datetime
import gc
from models import Issues, User
from database import db
from sqlalchemy import func
# User scripts
sys.path.append(os.path.abspath(os.path.join(os.path.curdir, 'beta-issues', 'static', 'scripts')))
# from beta_pdf_miner import get_issues_list
app = Flask(__name__)
app.config.from_object(__name__) # load config from this file, flaskr.py
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'mydb.db'),
SCHEMA=os.path.join(app.root_path, 'schema.sql'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default',
UPLOAD_FOLDER=os.path.join(app.root_path, 'static', 'upload'),
SQLALCHEMY_DATABASE_URI='sqlite:///database.db',
SQLALCHEMY_TRACK_MODIFICATIONS=False,
))
db.init_app(app)
with app.app_context():
db.create_all()
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if "logged_in" in session:
return f(*args, **kwargs)
else:
flash("You need to login first")
return redirect(url_for('login'))
return wrap
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html', error=e)
@app.route('/')
@app.route('/index/')
def homepage():
return render_template('main.html')
@app.route('/help/')
def help_page():
return render_template('help.html')
@app.route('/issue_status/')
def issue_status():
resolved = Issues.query.filter_by(evektor=True, resolved=True).all()
unresolved = Issues.query.filter_by(evektor=True, resolved=False).all()
issues = {'resolved': resolved, 'unresolved': unresolved}
today = datetime.datetime.today().strftime('%Y-%m-%d')
return render_template('issue_status.html', today=today, issues=issues)
@app.route('/add_issue/', methods=['GET', 'POST'])
def add_issue():
if not session.get('logged_in'):
abort(401)
issue = request.form.get('issue').strip()
description = request.form.get('description')
date_issued = request.form.get('date_issued').strip()
author = request.form.get('author').strip()
details = request.form.get('details')
issue_found = Issues.query.filter_by(issue=issue).all()
# Issue not in DB, add it
if len(issue_found) == 0:
print("DEBUG", "len(issue_found) == 0", "issue_found", issue_found)
issue_add = Issues(
issue=issue,
evektor=True,
description=description,
date_issued=date_issued,
author=author,
details=details,
)
db.session.add(issue_add)
# Issue found in DB
else:
print("DEBUG", "ELSE", "issue_found", issue_found)
q = Issues.query.filter_by(issue=issue, resolved=True).one_or_none()
# Issue is already tagged as RESOLVED, update other attributes
if q:
msg = Markup("<p><strong>This issue is already resolved!</strong></p>"
"<p>Version: {}</p>"
"<p>Date resolved: {}</p>".format(q.version, q.date_resolved))
flash(msg, 'success')
Issues.query.filter_by(issue=issue).update(dict(
evektor=True,
description=description,
date_issued=date_issued,
author=author,
details=details,
))
# Issue is UNRESOLVED, user tries to add existing issue
else:
flash("Issue <{}> already exists...".format(issue), 'danger')
return redirect(url_for('issue_status'))
db.session.commit()
flash('New issue was successfully added into database.', 'success')
return redirect(url_for('issue_status'))
@app.route('/edit_issue/', methods=['GET', 'POST'])
def edit_issue():
if not session.get('logged_in'):
abort(401)
if request.method == 'POST' and request.form.get('submit') == 'details':
sys.exit()
orig_issue = request.form.get('orig_issue')
res = Issues.query.filter_by(issue=orig_issue).all()
if len(res) == 0:
flash("This issue was not found in database... Weird...", 'danger')
return redirect(url_for('issue_status'))
Issues.query.filter_by(issue=orig_issue).update(dict(
issue=request.form.get('issue'),
description=request.form.get('description'),
date_issued=request.form.get('date_issued'),
author=request.form.get('author'),
details=request.form.get('details'),
))
db.session.commit()
flash("Issue: [{}] was modified successfully.".format(request.form.get('issue')), 'success')
return redirect(url_for('issue_status'))
@app.route('/upload_release_changes/')
@login_required
def upload_release_changes():
data = db.session.query(Issues.version, func.count('issue')).group_by('version').all()
data = [(version, count) for (version, count) in data if version is not None] # [('None', 1), ('ANSA v...')]
versions = sorted(data, reverse=True) # [('META v17.0.1', '46'), ('ANSA v17.1.0', '86'), ...]
resolved_db = Issues.query.filter_by(resolved=True).all() # return all issues with attr resolved = True
return render_template('upload_release_changes.html', resolved_db=resolved_db, versions=versions)
@app.route('/upload_release_changes/', methods=['GET', 'POST'])
def upload_changes():
file_list = request.files.getlist('UploadFiles')
if file_list[0].filename == '':
flash('No file(s) selected', 'warning')
return redirect(url_for('upload_release_changes'))
issues = []
for file in file_list:
filename = secure_filename(file.filename)
filepath = os.path.join(app.config.get('UPLOAD_FOLDER'), filename)
file.save(filepath)
if 'ansa' in filename.lower() and 'release_notes' in filename.lower():
issues.extend(get_issues_list(filepath, 'Ansa'))
elif 'meta' in filename.lower() and 'release_notes' in filename.lower():
issues.extend(get_issues_list(filepath, 'Meta'))
else:
flash("Can't recognize this file: {}. I has to have 'Release Notes' and ANSA or META in it's name...".format(
filename), 'danger')
return redirect(request.url)
os.remove(filepath)
# Iterate over found issues in PDF
count = 0
for issue_tuple in issues:
issue, version = issue_tuple
issue_in_db = Issues.query.filter_by(issue=issue).all()
# Not in DB: add name, version, date resolved, resolved
if len(issue_in_db) == 0:
# flash("ADDING Issue: [{}]".format(issue))
issue_add = Issues(
issue=issue,
version=version,
date_resolved=datetime.datetime.today().strftime('%Y-%m-%d'),
resolved=True,
)
db.session.add(issue_add)
count += 1
# In DB
else:
issue_resolved = Issues.query.filter_by(issue=issue, resolved=True).all()
# Already uploaded from PDF, not unresolved issue, ignore
if len(issue_resolved) == 1:
pass
# flash("Already there: Issue: {}, Version: {}".format(issue, version))
# Not uploaded from PDF, update version, date resolved, resolved to True
else:
q = Issues.query.filter_by(issue=issue)
subq = q.with_entities(Issues.issue, Issues.description).one()
issue_num, description = subq
msg = Markup("<p><strong>Our issue resolved!</strong></p>"
"<p>Issue: {}</p>"
"<p>Description: {}</p>".format(issue_num, description))
flash(msg, 'success')
Issues.query.filter_by(issue=issue).update(dict(
version=version,
date_resolved=datetime.datetime.today().strftime('%Y-%m-%d'),
resolved=True,
))
count += 1
db.session.commit()
msg = Markup("<p>{} resolved issues found in selected document(s).</p>"
"<p><strong>{}</strong> new issues added to database.</p>".format(len(issues), count))
flash(msg, 'success')
return redirect(url_for('upload_release_changes'))
@app.route('/login/', methods=['GET', 'POST']) # /?variable=this (post)
def login():
error = None
try:
if request.method == 'POST':
form_username = request.form.get('username')
form_password = request.form.get('password')
q = User.query.filter_by(username=form_username).one_or_none()
if q and sha256_crypt.verify(form_password, q.password):
session['logged_in'] = True
session['username'] = q.username
flash("Your are now logged in", 'success')
return redirect(url_for('issue_status'))
else:
error = "Invalid credentials, try again."
return render_template('login.html', error=error)
except Exception as e:
error = "Invalid credentials, try again."
flash(e)
return render_template('login.html', error=error)
class RegistrationForm(Form):
username = TextField('Username', [validators.Length(min=4, max=20)])
email = TextField('Email Address', [validators.Length(min=6, max=50)])
password = PasswordField('Password', [validators.Required(),
validators.EqualTo('confirm', message="Passwords must match.")])
confirm = PasswordField('Repeat Password')
accept_tos = BooleanField('I accept the Terms of Service and the Privacy notice.',
[validators.Required()])
@app.route('/register/', methods=['GET', 'POST'])
def register_page():
try:
form = RegistrationForm(request.form)
if request.method == 'POST' and form.validate():
username = form.username.data
email = form.email.data
password = sha256_crypt.encrypt(str(form.password.data))
q = User.query.filter_by(username=username).all()
if len(q) != 0:
flash("That username is already taken, please choose another", 'danger')
return render_template('register.html', form=form)
user_add = User(username=username, password=password, email=email)
db.session.add(user_add)
db.session.commit()
flash("Thanks for registering.", 'success')
session['logged_in'] = True
session['username'] = username
return redirect(url_for('issue_status'))
return render_template('register.html', form=form)
except Exception as e:
return("Erra: {}".format(e))
@app.route('/logout/')
@login_required
def logout():
session.pop('logged_in', None)
flash("You have been logged out.", "info")
gc.collect()
return redirect(url_for('issue_status'))
@app.route('/issue_status/', methods=['GET', 'POST'])
def issue_modify():
issue = request.form.get('remove_issue')
q = Issues.query.filter_by(issue=issue).one()
if q.resolved:
Issues.query.filter_by(issue=issue).update(dict(
evektor=False,
description=None,
details=None,
author=None,
date_issued=None,
))
else:
db.session.delete(q)
db.session.commit()
flash("Issue: [{}] deleted.".format(q.issue), 'danger')
return redirect(url_for('issue_status'))
if __name__ == '__main__':
app.run(host='localhost', port=5001, debug=True)
| SonGokussj4/beta-issues | beta-issues/__init__.py | Python | mit | 12,056 |
#!/usr/bin/python3
#
# api_test.py: Test/demo of the python3-lxc API
#
# (C) Copyright Canonical Ltd. 2012
#
# Authors:
# Stéphane Graber <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import warnings
warnings.filterwarnings("ignore", "The python-lxc API isn't yet stable")
import lxc
import uuid
import sys
import time
# Some constants
LXC_TEMPLATE = "ubuntu"
# Let's pick a random name, avoiding clashes
CONTAINER_NAME = str(uuid.uuid1())
CLONE_NAME = str(uuid.uuid1())
## Instantiate the container instance
print("Getting instance for '%s'" % CONTAINER_NAME)
container = lxc.Container(CONTAINER_NAME)
# A few basic checks of the current state
assert(container.config_file_name == "%s/%s/config" %
(lxc.default_config_path, CONTAINER_NAME))
assert(not container.defined)
assert(container.init_pid == -1)
assert(container.name == CONTAINER_NAME)
assert(not container.running)
assert(container.state == "STOPPED")
## Create a rootfs
print("Creating rootfs using '%s'" % LXC_TEMPLATE)
container.create(LXC_TEMPLATE)
assert(container.defined)
assert(container.name == CONTAINER_NAME
== container.get_config_item("lxc.utsname"))
assert(container.name in lxc.list_containers())
## Test the config
print("Testing the configuration")
capdrop = container.get_config_item("lxc.cap.drop")
container.clear_config_item("lxc.cap.drop")
container.set_config_item("lxc.cap.drop", capdrop[:-1])
container.append_config_item("lxc.cap.drop", capdrop[-1])
container.save_config()
# A few basic checks of the current state
assert(isinstance(capdrop, list))
assert(capdrop == container.get_config_item("lxc.cap.drop"))
## Test the networking
print("Testing the networking")
# A few basic checks of the current state
assert("name" in container.get_keys("lxc.network.0"))
assert(len(container.network) == 1)
assert(container.network[0].hwaddr.startswith("00:16:3e"))
## Starting the container
print("Starting the container")
container.start()
container.wait("RUNNING", 3)
# A few basic checks of the current state
assert(container.init_pid > 1)
assert(container.running)
assert(container.state == "RUNNING")
## Checking IP address
print("Getting the IP addresses")
count = 0
ips = []
while not ips or count == 10:
ips = container.get_ips()
time.sleep(1)
count += 1
container.attach("NETWORK|UTSNAME", "/sbin/ifconfig", "eth0")
# A few basic checks of the current state
assert(len(ips) > 0)
## Testing cgroups a bit
print("Testing cgroup API")
max_mem = container.get_cgroup_item("memory.max_usage_in_bytes")
current_limit = container.get_cgroup_item("memory.limit_in_bytes")
assert(container.set_cgroup_item("memory.limit_in_bytes", max_mem))
assert(container.get_cgroup_item("memory.limit_in_bytes") != current_limit)
## Freezing the container
print("Freezing the container")
container.freeze()
container.wait("FROZEN", 3)
# A few basic checks of the current state
assert(container.init_pid > 1)
assert(container.running)
assert(container.state == "FROZEN")
## Unfreezing the container
print("Unfreezing the container")
container.unfreeze()
container.wait("RUNNING", 3)
# A few basic checks of the current state
assert(container.init_pid > 1)
assert(container.running)
assert(container.state == "RUNNING")
if len(sys.argv) > 1 and sys.argv[1] == "--with-console":
## Attaching to tty1
print("Attaching to tty1")
container.console(tty=1)
## Shutting down the container
print("Shutting down the container")
container.shutdown(3)
if container.running:
print("Stopping the container")
container.stop()
container.wait("STOPPED", 3)
# A few basic checks of the current state
assert(container.init_pid == -1)
assert(not container.running)
assert(container.state == "STOPPED")
## Cloning the container
print("Cloning the container")
clone = lxc.Container(CLONE_NAME)
clone.clone(container)
clone.start()
clone.stop()
clone.destroy()
## Destroy the container
print("Destroying the container")
container.destroy()
assert(not container.defined)
| smoser/lxc | src/python-lxc/examples/api_test.py | Python | lgpl-2.1 | 4,693 |
"""
Australian-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.contrib.localflavor.au.au_states import STATE_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
PHONE_DIGITS_RE = re.compile(r'^(\d{10})$')
class AUPostCodeField(RegexField):
""" Australian post code field.
Assumed to be 4 digits.
Northern Territory 3-digit postcodes should have leading zero.
"""
default_error_messages = {
'invalid': _('Enter a 4 digit postcode.'),
}
def __init__(self, max_length=4, min_length=None, *args, **kwargs):
super(AUPostCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
class AUPhoneNumberField(Field):
"""Australian phone number field."""
default_error_messages = {
'invalid': 'Phone numbers must contain 10 digits.',
}
def clean(self, value):
"""
Validate a phone number. Strips parentheses, whitespace and hyphens.
"""
super(AUPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\(|\)|\s+|-)', '', smart_unicode(value))
phone_match = PHONE_DIGITS_RE.search(value)
if phone_match:
return '%s' % phone_match.group(1)
raise ValidationError(self.error_messages['invalid'])
class AUStateSelect(Select):
"""
A Select widget that uses a list of Australian states/territories as its
choices.
"""
def __init__(self, attrs=None):
super(AUStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
| rebost/django | django/contrib/localflavor/au/forms.py | Python | bsd-3-clause | 1,832 |
'''
Created on Apr 26, 2017
@author: dj
'''
from concurrent.futures import ProcessPoolExecutor
from time import time
def gcd(pair):
a, b = pair
low = min(a, b)
for i in range(low, 0, -1):
if (a % i == 0) and (b % i == 0):
return i
numbers = [(1963309, 2265973), (2030677, 3814172),
(1551645, 2229620), (2039045, 2020802)]
print("-" * 40)
print("Run in ProcessPoolExecutor.")
start = time()
pool = ProcessPoolExecutor(max_workers=2)
results = list(pool.map(gcd, numbers))
stop = time()
print("results =", results)
print("duration time (s) =", stop - start)
# Currently, there is error:
# concurrent.futures.process.BrokenProcessPool: A process in the process
# pool was terminated abruptly while the future was running or pending.
if __name__ == '__main__':
pass
| djsilenceboy/LearnTest | Python_Test/PySample1/com/djs/learn/concurrency/TestProcess2.py | Python | apache-2.0 | 819 |
#coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
"""
ロジスティック回帰
交差エントロピー誤差関数の勾配降下法で解く
"""
def plotData(X, y):
# positiveクラスのデータのインデックス
positive = [i for i in range(len(y)) if y[i] == 1]
# negativeクラスのデータのインデックス
negative = [i for i in range(len(y)) if y[i] == 0]
plt.scatter(X[positive, 0], X[positive, 1], c='red', marker='o', label="positive")
plt.scatter(X[negative, 0], X[negative, 1], c='blue', marker='o', label="negative")
def sigmoid(z):
return 1.0 / (1 + np.exp(-z))
def safe_log(x, minval=0.0000000001):
return np.log(x.clip(min=minval))
def computeCost(X, y, theta):
# 二乗誤差関数ではなく、交差エントロピー誤差関数を使用
h = sigmoid(np.dot(X, theta))
J = (1.0 / m) * np.sum(-y * safe_log(h) - (1 - y) * safe_log(1 - h))
return J
def gradientDescent(X, y, theta, alpha, iterations):
m = len(y) # 訓練データ数
J_history = [] # 各更新でのコスト
for iter in range(iterations):
# sigmoid関数を適用する点が線形回帰と異なる
h = sigmoid(np.dot(X, theta))
theta = theta - alpha * (1.0 / m) * np.dot(X.T, h - y)
cost = computeCost(X, y, theta)
print iter, cost
J_history.append(cost)
return theta, J_history
if __name__ == "__main__":
# 訓練データをロード
data = np.genfromtxt("ex2data1.txt", delimiter=",")
X = data[:, (0, 1)]
y = data[:, 2]
# 訓練データ数
m = len(y)
# 訓練データをプロット
plt.figure(1)
plotData(X, y)
# 訓練データの1列目に1を追加
X = X.reshape((m, 2))
X = np.hstack((np.ones((m, 1)), X))
# パラメータを0で初期化
theta = np.zeros(3)
iterations = 300000
alpha = 0.001
# 初期状態のコストを計算
initialCost = computeCost(X, y, theta)
print "initial cost:", initialCost
# 勾配降下法でパラメータ推定
theta, J_history = gradientDescent(X, y, theta, alpha, iterations)
print "theta:", theta
print "final cost:", J_history[-1]
# コストの履歴をプロット
plt.figure(2)
plt.plot(J_history)
plt.xlabel("iteration")
plt.ylabel("J(theta)")
# 決定境界を描画
plt.figure(1)
xmin, xmax = min(X[:,1]), max(X[:,1])
xs = np.linspace(xmin, xmax, 100)
ys = [- (theta[0] / theta[2]) - (theta[1] / theta[2]) * x for x in xs]
plt.plot(xs, ys, 'b-', label="decision boundary")
plt.xlabel("x1")
plt.ylabel("x2")
plt.xlim((30, 100))
plt.ylim((30, 100))
plt.legend()
plt.show()
| TenninYan/Perceptron | ch4/logistic_regression.py | Python | mit | 2,738 |
#!/usr/bin/python
import serial
ser = serial.Serial('COM9', 9600)
ser.write(b'5~')
ser.close()
| GeoffSpielman/Hackathin_Examples | Python_To_Arduino_Communication/Python_To_Arduino_Communication.py | Python | mit | 96 |
from .base import BaseConfig
__all__ = ["ImmunizationSite"]
class ImmunizationSite(BaseConfig):
@classmethod
def build_fhir_object_from_health(cls, health_site):
# Health equivalents
# ('lvl', 'left vastus lateralis'),
# ('rvl', 'right vastus lateralis'),
# ('ld', 'left deltoid'),
# ('rd', 'right deltoid'),
# ('lalt', 'left anterolateral fat of thigh'),
# ('ralt', 'right anterolateral fat of thigh'),
# ('lpua', 'left posterolateral fat of upper arm'),
# ('rpua', 'right posterolateral fat of upper arm'),
# ('lfa', 'left fore arm'),
# ('rfa', 'right fore arm')
# The FHIR value set is limited -- use Health's
if health_site == "lvl":
return cls.get_fhir_lvl()
elif health_site == "rvl":
return cls.get_fhir_rvl()
elif health_site == "ld":
return cls.get_fhir_ld()
elif health_site == "rd":
return cls.get_fhir_rd()
elif health_site == "lalt":
return cls.get_fhir_lalt()
elif health_site == "ralt":
return cls.get_fhir_ralt()
elif health_site == "lpua":
return cls.get_fhir_lpua()
elif health_site == "rpua":
return cls.get_fhir_rpua()
elif health_site == "lfa":
return cls.get_fhir_lfa()
elif health_site == "rfa":
return cls.get_fhir_rfa()
else:
return None
@classmethod
def build_health_object_from_fhir(cls, fhir_site):
if fhir_site in [
"LVL",
"RVL",
"LD",
"RD",
"LALT",
"RALT",
"LPUA",
"RPUA",
"LFA",
"RFA",
]:
return fhir_site.lower()
else:
return None
@classmethod
def get_fhir_lvl(cls):
return cls.build_codeable_concept(code="LVL", text="left vastus lateralis")
@classmethod
def get_fhir_rvl(cls):
return cls.build_codeable_concept(code="RVL", text="right vastus lateralis")
@classmethod
def get_fhir_ld(cls):
return cls.build_codeable_concept(code="LD", text="left deltoid")
@classmethod
def get_fhir_rd(cls):
return cls.build_codeable_concept(code="RD", text="right deltoid")
@classmethod
def get_fhir_lalt(cls):
return cls.build_codeable_concept(
code="LALT", text="left anterolateral fat of thigh"
)
@classmethod
def get_fhir_ralt(cls):
return cls.build_codeable_concept(
code="RALT", text="right anterolateral fat of thigh"
)
@classmethod
def get_fhir_lpua(cls):
return cls.build_codeable_concept(
code="LPUA", text="left posterolateral fat of thigh"
)
@classmethod
def get_fhir_rpua(cls):
return cls.build_codeable_concept(
code="RPUA", text="right posterolateral fat of thigh"
)
@classmethod
def get_fhir_lfa(cls):
return cls.build_codeable_concept(code="LFA", text="left forearm")
@classmethod
def get_fhir_rfa(cls):
return cls.build_codeable_concept(code="RFA", text="right forearm")
| teffalump/health_fhir | gnu_health_fhir/config/converters/config_immunization_site.py | Python | gpl-3.0 | 3,260 |
class C:
def __init__(self):
self.foo = 42
def method(foo, x):
print(foo)
method(C().foo, 1)
| hurricup/intellij-community | python/testData/refactoring/makeFunctionTopLevel/methodSingleAttributeRead.after.py | Python | apache-2.0 | 113 |
# -*- coding: utf-8 -*-
"""
Django settings for overlord project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('overlord')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'rest_framework', # django rest framework
)
# Apps specific for this project go here.
LOCAL_APPS = (
'overlord.core',
'overlord.users', # custom users app
'overlord.minions',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'overlord.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Farfor""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///overlord"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your common stuff: Below this line define 3rd party library settings
REST_FRAMEWORK = {
'PAGE_SIZE': 10
}
| Farforr/overlord | config/settings/common.py | Python | bsd-3-clause | 9,673 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-07 05:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0039_trainingrequest_cert_status'),
]
operations = [
migrations.CreateModel(
name='topperlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('emailid', models.EmailField(max_length=100)),
('userid', models.PositiveIntegerField()),
],
),
]
| Spoken-tutorial/spoken-website | events/migrations/0040_topperlist.py | Python | gpl-3.0 | 652 |
# -*- coding: utf-8 -*-
# Copyright 2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from itertools import chain
import numpy as np
import scipy.linalg
from hyperspy.misc.machine_learning.import_sklearn import (
fast_svd, sklearn_installed)
from hyperspy.external.progressbar import progressbar
_logger = logging.getLogger(__name__)
def _thresh(X, lambda1):
res = np.abs(X) - lambda1
return np.sign(X) * ((res > 0) * res)
def rpca_godec(X, rank, fast=False, lambda1=None,
power=None, tol=None, maxiter=None):
"""
This function performs Robust PCA with missing or corrupted data,
using the GoDec algorithm.
Parameters
----------
X : numpy array
is the [nfeatures x nsamples] matrix of observations.
rank : int
The model dimensionality.
lambda1 : None | float
Regularization parameter.
If None, set to 1 / sqrt(nsamples)
power : None | integer
The number of power iterations used in the initialization
If None, set to 0 for speed
tol : None | float
Convergence tolerance
If None, set to 1e-3
maxiter : None | integer
Maximum number of iterations
If None, set to 1e3
Returns
-------
Xhat : numpy array
is the [nfeatures x nsamples] low-rank matrix
Ehat : numpy array
is the [nfeatures x nsamples] sparse error matrix
Ghat : numpy array
is the [nfeatures x nsamples] Gaussian noise matrix
U, S, V : numpy arrays
are the results of an SVD on Xhat
Notes
-----
Algorithm based on the following research paper:
Tianyi Zhou and Dacheng Tao, "GoDec: Randomized Low-rank & Sparse Matrix
Decomposition in Noisy Case", ICML-11, (2011), pp. 33-40.
Code: https://sites.google.com/site/godecomposition/matrix/artifact-1
"""
if fast is True and sklearn_installed is True:
def svd(X):
return fast_svd(X, rank)
else:
def svd(X):
return scipy.linalg.svd(X, full_matrices=False)
# Get shape
m, n = X.shape
# Operate on transposed matrix for speed
transpose = False
if m < n:
transpose = True
X = X.T
# Get shape
m, n = X.shape
# Check options if None
if lambda1 is None:
_logger.warning("Threshold 'lambda1' is set to "
"default: 1 / sqrt(nsamples)")
lambda1 = 1.0 / np.sqrt(n)
if power is None:
_logger.warning("Number of power iterations not specified. "
"Defaulting to 0")
power = 0
if tol is None:
_logger.warning("Convergence tolerance not specifed. "
"Defaulting to 1e-3")
tol = 1e-3
if maxiter is None:
_logger.warning("Maximum iterations not specified. "
"Defaulting to 1e3")
maxiter = 1e3
# Initialize L and E
L = X
E = np.zeros(L.shape)
itr = 0
while True:
itr += 1
# Initialization with bilateral random projections
Y2 = np.random.randn(n, rank)
for i in range(power + 1):
Y2 = np.dot(L.T, np.dot(L, Y2))
Q, tmp = scipy.linalg.qr(Y2, mode='economic')
# Estimate the new low-rank and sparse matrices
Lnew = np.dot(np.dot(L, Q), Q.T)
A = L - Lnew + E
L = Lnew
E = _thresh(A, lambda1)
A -= E
L += A
# Check convergence
eps = np.linalg.norm(A)
if (eps < tol):
_logger.info("Converged to %f in %d iterations" % (eps, itr))
break
elif (itr >= maxiter):
_logger.warning("Maximum iterations reached")
break
# Get the remaining Gaussian noise matrix
G = X - L - E
# Transpose back
if transpose:
L = L.T
E = E.T
G = G.T
# Rescale
Xhat = L
Ehat = E
Ghat = G
# Do final SVD
U, S, Vh = svd(Xhat)
V = Vh.T
# Chop small singular values which
# likely arise from numerical noise
# in the SVD.
S[rank:] = 0.
return Xhat, Ehat, Ghat, U, S, V
def _solveproj(z, X, I, lambda2):
m, n = X.shape
s = np.zeros(m)
x = np.zeros(n)
maxiter = 1e9
itr = 0
ddt = np.dot(scipy.linalg.inv(np.dot(X.T, X) + I), X.T)
while True:
itr += 1
xtmp = x
x = np.dot(ddt, (z - s))
stmp = s
s = np.maximum(z - np.dot(X, x) - lambda2, 0.0)
stopx = np.sqrt(np.dot(x - xtmp, (x - xtmp).conj()))
stops = np.sqrt(np.dot(s - stmp, (s - stmp).conj()))
stop = max(stopx, stops) / m
if stop < 1e-6 or itr > maxiter:
break
return x, s
def _updatecol(X, A, B, I):
tmp, n = X.shape
L = X
A = A + I
for i in range(n):
b = B[:, i]
x = X[:, i]
a = A[:, i]
temp = (b - np.dot(X, a)) / A[i, i] + x
L[:, i] = temp / max(np.sqrt(np.dot(temp, temp.conj())), 1)
return L
class ORPCA:
def __init__(self, rank, fast=False, lambda1=None, lambda2=None,
method=None, learning_rate=None, init=None,
training_samples=None, momentum=None):
self.nfeatures = None
self.normalize = False
if fast is True and sklearn_installed is True:
def svd(X):
return fast_svd(X, rank)
else:
def svd(X):
return scipy.linalg.svd(X, full_matrices=False)
self.svd = svd
self.t = 0
# Check options if None
if method is None:
_logger.warning("No method specified. Defaulting to "
"'CF' (closed-form solver)")
method = 'CF'
if init is None:
_logger.warning("No initialization specified. Defaulting to "
"'qr' initialization")
init = 'qr'
if training_samples is None:
if init == 'qr':
if rank >= 10:
training_samples = rank
else:
training_samples = 10
_logger.warning("Number of training samples for 'qr' method "
"not specified. Defaulting to %d samples" %
training_samples)
if learning_rate is None:
if method in ('SGD', 'MomentumSGD'):
_logger.warning("Learning rate for SGD algorithm is "
"set to default: 1.0")
learning_rate = 1.0
if momentum is None:
if method == 'MomentumSGD':
_logger.warning("Momentum parameter for SGD algorithm is "
"set to default: 0.5")
momentum = 0.5
self.rank = rank
self.lambda1 = lambda1
self.lambda2 = lambda2
self.method = method
self.init = init
self.training_samples = training_samples
self.learning_rate = learning_rate
self.momentum = momentum
# Check options are valid
if method not in ('CF', 'BCD', 'SGD', 'MomentumSGD'):
raise ValueError("'method' not recognised")
if not isinstance(init, np.ndarray) and init not in ('qr', 'rand'):
raise ValueError("'method' not recognised")
if init == 'qr' and training_samples < rank:
raise ValueError(
"'training_samples' must be >= 'output_dimension'")
if method == 'MomentumSGD' and (momentum > 1. or momentum < 0.):
raise ValueError("'momentum' must be a float between 0 and 1")
def _setup(self, X, normalize=False):
if isinstance(X, np.ndarray):
n, m = X.shape
iterating = False
else:
x = next(X)
m = len(x)
X = chain([x], X)
iterating = True
self.nfeatures = m
self.iterating = iterating
if self.lambda1 is None:
_logger.warning("Nuclear norm regularization parameter "
"is set to default: 1 / sqrt(nfeatures)")
self.lambda1 = 1.0 / np.sqrt(m)
if self.lambda2 is None:
_logger.warning("Sparse regularization parameter "
"is set to default: 1 / sqrt(nfeatures)")
self.lambda2 = 1.0 / np.sqrt(m)
self.L = self._initialize(X)
self.I = self.lambda1 * np.eye(self.rank)
self.R = []
self.E = []
# Extra variables for CF and BCD methods
if self.method in ('CF', 'BCD'):
self.A = np.zeros((self.rank, self.rank))
self.B = np.zeros((m, self.rank))
if self.method == 'MomentumSGD':
self.vnew = np.zeros_like(self.L)
return X
def _initialize(self, X):
m = self.nfeatures
iterating = self.iterating
# Initialize the subspace estimate
if self.init in ('qr', 'rand'):
if self.init == 'qr':
if iterating:
Y2 = np.stack([next(X) for _ in range(self.training_samples)],
axis=-1)
X = chain(iter(Y2.T.copy()), X)
else:
Y2 = X[:self.training_samples, :].T
elif self.init == 'rand':
Y2 = np.random.randn(m, self.rank)
L, _ = scipy.linalg.qr(Y2, mode='economic')
return L[:, :self.rank]
elif isinstance(self.init, np.ndarray):
if init.ndim != 2:
raise ValueError("'init' has to be a two-dimensional matrix")
init_m, init_r = init.shape
if init_m != m or init_r != self.rank:
raise ValueError(
"'init' has to be of shape [nfeatures x rank]")
return init.copy()
else:
raise ValueError('Bad initialization options')
def fit(self, X, iterating=None):
if self.nfeatures is None:
X = self._setup(X)
if iterating is None:
iterating = self.iterating
else:
self.iterating = iterating
num = None
if isinstance(X, np.ndarray):
num = X.shape[0]
X = iter(X)
for z in progressbar(X, leave=False, total=num):
if not self.t or not (self.t + 1) % 10:
_logger.info("Processing sample : %s" % (self.t + 1))
# TODO: what about z.min()?
thislambda2 = self.lambda2 # * z.max()
thislambda1 = self.lambda1 # * z.max()
r, e = _solveproj(z, self.L, self.I, thislambda2)
self.R.append(r)
if not iterating:
self.E.append(e)
if self.method == 'CF':
# Closed-form solution
self.A += np.outer(r, r.T)
self.B += np.outer((z - e), r.T)
self.L = np.dot(self.B, scipy.linalg.inv(self.A + self.I))
elif self.method == 'BCD':
# Block-coordinate descent
self.A += np.outer(r, r.T)
self.B += np.outer((z - e), r.T)
self.L = _updatecol(self.L, self.A, self.B, self.I)
elif self.method == 'SGD':
# Stochastic gradient descent
learn = self.learning_rate * (1 + self.learning_rate *
thislambda1 * self.t)
self.L -= (np.dot(self.L, np.outer(r, r.T))
- np.outer((z - e), r.T)
+ thislambda1 * self.L) / learn
elif self.method == 'MomentumSGD':
# Stochastic gradient descent with momentum
learn = self.learning_rate * (1 + self.learning_rate *
thislambda1 * self.t)
vold = self.momentum * self.vnew
self.vnew = (np.dot(self.L, np.outer(r, r.T))
- np.outer((z - e), r.T)
+ thislambda1 * self.L) / learn
self.L -= (vold + self.vnew)
self.t += 1
def project(self, X):
num = None
if isinstance(X, np.ndarray):
num = X.shape[0]
X = iter(X)
for v in progressbar(X, leave=False, total=num):
r, _ = _solveproj(v, self.L, self.I, self.lambda2)
self.R.append(r.copy())
def finish(self):
R = np.stack(self.R, axis=-1)
Xhat = np.dot(self.L, R)
if len(self.E):
Ehat = np.stack(self.E, axis=-1)
# both keep an indicator that we had something and remove the
# duplicate data
self.E = [1]
# Do final SVD
U, S, Vh = self.svd(Xhat)
V = Vh.T
# Chop small singular values which
# likely arise from numerical noise
# in the SVD.
S[self.rank:] = 0.
if len(self.E):
return Xhat.T, Ehat, U, S, V
else:
return Xhat.T, 1, U, S, V
def orpca(X, rank, fast=False,
lambda1=None,
lambda2=None,
method=None,
learning_rate=None,
init=None,
training_samples=None,
momentum=None):
"""
This function performs Online Robust PCA
with missing or corrupted data.
Parameters
----------
X : {numpy array, iterator}
[nfeatures x nsamples] matrix of observations
or an iterator that yields samples, each with nfeatures elements.
rank : int
The model dimensionality.
lambda1 : {None, float}
Nuclear norm regularization parameter.
If None, set to 1 / sqrt(nsamples)
lambda2 : {None, float}
Sparse error regularization parameter.
If None, set to 1 / sqrt(nsamples)
method : {None, 'CF', 'BCD', 'SGD', 'MomentumSGD'}
'CF' - Closed-form solver
'BCD' - Block-coordinate descent
'SGD' - Stochastic gradient descent
'MomentumSGD' - Stochastic gradient descent with momentum
If None, set to 'CF'
learning_rate : {None, float}
Learning rate for the stochastic gradient
descent algorithm
If None, set to 1
init : {None, 'qr', 'rand', np.ndarray}
'qr' - QR-based initialization
'rand' - Random initialization
np.ndarray if the shape [nfeatures x rank].
If None, set to 'qr'
training_samples : {None, integer}
Specifies the number of training samples to use in
the 'qr' initialization
If None, set to 10
momentum : {None, float}
Momentum parameter for 'MomentumSGD' method, should be
a float between 0 and 1.
If None, set to 0.5
Returns
-------
Xhat : numpy array
is the [nfeatures x nsamples] low-rank matrix
Ehat : numpy array
is the [nfeatures x nsamples] sparse error matrix
U, S, V : numpy arrays
are the results of an SVD on Xhat
Notes
-----
The ORPCA code is based on a transcription of MATLAB code obtained from
the following research paper:
Jiashi Feng, Huan Xu and Shuicheng Yuan, "Online Robust PCA via
Stochastic Optimization", Advances in Neural Information Processing
Systems 26, (2013), pp. 404-412.
It has been updated to include a new initialization method based
on a QR decomposition of the first n "training" samples of the data.
A stochastic gradient descent (SGD) solver is also implemented,
along with a MomentumSGD solver for improved convergence and robustness
with respect to local minima. More information about the gradient descent
methods and choosing appropriate parameters can be found here:
Sebastian Ruder, "An overview of gradient descent optimization
algorithms", arXiv:1609.04747, (2016), http://arxiv.org/abs/1609.04747.
"""
X = X.T
_orpca = ORPCA(rank, fast=fast, lambda1=lambda1,
lambda2=lambda2, method=method,
learning_rate=learning_rate, init=init,
training_samples=training_samples,
momentum=momentum)
_orpca._setup(X, normalize=True)
_orpca.fit(X)
Xhat, Ehat, U, S, V = _orpca.finish()
return Xhat.T, Ehat, U, S, V
| sem-geologist/hyperspy | hyperspy/learn/rpca.py | Python | gpl-3.0 | 17,037 |
# Generated by Django 2.1.4 on 2018-12-20 14:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('groups', '0011_customuser_require_2_fact_auth'),
]
operations = [
migrations.AlterField(
model_name='workinggroup',
name='registry',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='rdrf.Registry'),
),
]
| muccg/rdrf | rdrf/registry/groups/migrations/0012_auto_20181220_1457.py | Python | agpl-3.0 | 501 |
from mpi4py import MPI
from cplpy import CPL
import numpy as np
comm = MPI.COMM_WORLD
CPL = CPL()
nsteps = 1
dt = 0.2
density = 0.8
# Parameters of the cpu topology (cartesian grid)
NPx = 2
NPy = 2
NPz = 1
NProcs = NPx*NPy*NPz
# Parameters of the mesh topology (cartesian grid)
ncxyz = np.array([64, 18, 64], order='F', dtype=np.int32)
xyzL = np.array([10.0, 10.0, 10.0], order='F', dtype=np.float64)
xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
# Create communicators and check that number of processors is consistent
CFD_COMM = CPL.init(CPL.CFD_REALM)
nprocs_realm = CFD_COMM.Get_size()
if (nprocs_realm != NProcs):
print("ERROR: Non-coherent number of processors.")
MPI.Abort(errorcode=1)
cart_comm = CFD_COMM.Create_cart([NPx, NPy, NPz])
CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)
cart_rank = cart_comm.Get_rank()
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
send_array = np.zeros((3, ncxl, ncyl, nczl), order='F', dtype=np.float64)
for i in range(0, ncxl):
for j in range(0, ncyl):
for k in range(0, nczl):
ii = i + portion[0]
jj = j + portion[2]
kk = k + portion[4]
send_array[0, i, j, k] = ii
send_array[1, i, j, k] = jj
send_array[2, i, j, k] = kk
ierr = CPL.send(send_array, olap_limits)
MPI.COMM_WORLD.Barrier()
CFD_COMM.Free()
cart_comm.Free()
CPL.finalize()
MPI.Finalize()
| Crompulence/cpl-library | examples/sendrecv_globcell/python/cfd_send_cells.py | Python | gpl-3.0 | 1,497 |
"""The Energy websocket API."""
from __future__ import annotations
import asyncio
from collections import defaultdict
from datetime import datetime, timedelta
import functools
from itertools import chain
from types import ModuleType
from typing import Any, Awaitable, Callable, cast
import voluptuous as vol
from homeassistant.components import recorder, websocket_api
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.integration_platform import (
async_process_integration_platforms,
)
from homeassistant.helpers.singleton import singleton
from homeassistant.util import dt as dt_util
from .const import DOMAIN
from .data import (
DEVICE_CONSUMPTION_SCHEMA,
ENERGY_SOURCE_SCHEMA,
EnergyManager,
EnergyPreferencesUpdate,
async_get_manager,
)
from .types import EnergyPlatform, GetSolarForecastType
from .validate import async_validate
EnergyWebSocketCommandHandler = Callable[
[HomeAssistant, websocket_api.ActiveConnection, "dict[str, Any]", "EnergyManager"],
None,
]
AsyncEnergyWebSocketCommandHandler = Callable[
[HomeAssistant, websocket_api.ActiveConnection, "dict[str, Any]", "EnergyManager"],
Awaitable[None],
]
@callback
def async_setup(hass: HomeAssistant) -> None:
"""Set up the energy websocket API."""
websocket_api.async_register_command(hass, ws_get_prefs)
websocket_api.async_register_command(hass, ws_save_prefs)
websocket_api.async_register_command(hass, ws_info)
websocket_api.async_register_command(hass, ws_validate)
websocket_api.async_register_command(hass, ws_solar_forecast)
websocket_api.async_register_command(hass, ws_get_fossil_energy_consumption)
@singleton("energy_platforms")
async def async_get_energy_platforms(
hass: HomeAssistant,
) -> dict[str, GetSolarForecastType]:
"""Get energy platforms."""
platforms: dict[str, GetSolarForecastType] = {}
async def _process_energy_platform(
hass: HomeAssistant, domain: str, platform: ModuleType
) -> None:
"""Process energy platforms."""
if not hasattr(platform, "async_get_solar_forecast"):
return
platforms[domain] = cast(EnergyPlatform, platform).async_get_solar_forecast
await async_process_integration_platforms(hass, DOMAIN, _process_energy_platform)
return platforms
def _ws_with_manager(
func: Any,
) -> websocket_api.WebSocketCommandHandler:
"""Decorate a function to pass in a manager."""
@websocket_api.async_response
@functools.wraps(func)
async def with_manager(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
manager = await async_get_manager(hass)
result = func(hass, connection, msg, manager)
if asyncio.iscoroutine(result):
await result
return with_manager
@websocket_api.websocket_command(
{
vol.Required("type"): "energy/get_prefs",
}
)
@_ws_with_manager
@callback
def ws_get_prefs(
hass: HomeAssistant,
connection: websocket_api.ActiveConnection,
msg: dict,
manager: EnergyManager,
) -> None:
"""Handle get prefs command."""
if manager.data is None:
connection.send_error(msg["id"], websocket_api.ERR_NOT_FOUND, "No prefs")
return
connection.send_result(msg["id"], manager.data)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required("type"): "energy/save_prefs",
vol.Optional("energy_sources"): ENERGY_SOURCE_SCHEMA,
vol.Optional("device_consumption"): [DEVICE_CONSUMPTION_SCHEMA],
}
)
@_ws_with_manager
async def ws_save_prefs(
hass: HomeAssistant,
connection: websocket_api.ActiveConnection,
msg: dict,
manager: EnergyManager,
) -> None:
"""Handle get prefs command."""
msg_id = msg.pop("id")
msg.pop("type")
await manager.async_update(cast(EnergyPreferencesUpdate, msg))
connection.send_result(msg_id, manager.data)
@websocket_api.websocket_command(
{
vol.Required("type"): "energy/info",
}
)
@websocket_api.async_response
async def ws_info(
hass: HomeAssistant,
connection: websocket_api.ActiveConnection,
msg: dict,
) -> None:
"""Handle get info command."""
forecast_platforms = await async_get_energy_platforms(hass)
connection.send_result(
msg["id"],
{
"cost_sensors": hass.data[DOMAIN]["cost_sensors"],
"solar_forecast_domains": list(forecast_platforms),
},
)
@websocket_api.websocket_command(
{
vol.Required("type"): "energy/validate",
}
)
@websocket_api.async_response
async def ws_validate(
hass: HomeAssistant,
connection: websocket_api.ActiveConnection,
msg: dict,
) -> None:
"""Handle validate command."""
connection.send_result(msg["id"], (await async_validate(hass)).as_dict())
@websocket_api.websocket_command(
{
vol.Required("type"): "energy/solar_forecast",
}
)
@_ws_with_manager
async def ws_solar_forecast(
hass: HomeAssistant,
connection: websocket_api.ActiveConnection,
msg: dict,
manager: EnergyManager,
) -> None:
"""Handle solar forecast command."""
if manager.data is None:
connection.send_result(msg["id"], {})
return
config_entries: dict[str, str | None] = {}
for source in manager.data["energy_sources"]:
if (
source["type"] != "solar"
or source.get("config_entry_solar_forecast") is None
):
continue
# typing is not catching the above guard for config_entry_solar_forecast being none
for config_entry in source["config_entry_solar_forecast"]: # type: ignore[union-attr]
config_entries[config_entry] = None
if not config_entries:
connection.send_result(msg["id"], {})
return
forecasts = {}
forecast_platforms = await async_get_energy_platforms(hass)
for config_entry_id in config_entries:
config_entry = hass.config_entries.async_get_entry(config_entry_id)
# Filter out non-existing config entries or unsupported domains
if config_entry is None or config_entry.domain not in forecast_platforms:
continue
forecast = await forecast_platforms[config_entry.domain](hass, config_entry_id)
if forecast is not None:
forecasts[config_entry_id] = forecast
connection.send_result(msg["id"], forecasts)
@websocket_api.websocket_command(
{
vol.Required("type"): "energy/fossil_energy_consumption",
vol.Required("start_time"): str,
vol.Required("end_time"): str,
vol.Required("energy_statistic_ids"): [str],
vol.Required("co2_statistic_id"): str,
vol.Required("period"): vol.Any("5minute", "hour", "day", "month"),
}
)
@websocket_api.async_response
async def ws_get_fossil_energy_consumption(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Calculate amount of fossil based energy."""
start_time_str = msg["start_time"]
end_time_str = msg["end_time"]
if start_time := dt_util.parse_datetime(start_time_str):
start_time = dt_util.as_utc(start_time)
else:
connection.send_error(msg["id"], "invalid_start_time", "Invalid start_time")
return
if end_time := dt_util.parse_datetime(end_time_str):
end_time = dt_util.as_utc(end_time)
else:
connection.send_error(msg["id"], "invalid_end_time", "Invalid end_time")
return
statistic_ids = list(msg["energy_statistic_ids"])
statistic_ids.append(msg["co2_statistic_id"])
# Fetch energy + CO2 statistics
statistics = await hass.async_add_executor_job(
recorder.statistics.statistics_during_period,
hass,
start_time,
end_time,
statistic_ids,
"hour",
True,
)
def _combine_sum_statistics(
stats: dict[str, list[dict[str, Any]]], statistic_ids: list[str]
) -> dict[datetime, float]:
"""Combine multiple statistics, returns a dict indexed by start time."""
result: defaultdict[datetime, float] = defaultdict(float)
for statistics_id, stat in stats.items():
if statistics_id not in statistic_ids:
continue
for period in stat:
if period["sum"] is None:
continue
result[period["start"]] += period["sum"]
return {key: result[key] for key in sorted(result)}
def _calculate_deltas(sums: dict[datetime, float]) -> dict[datetime, float]:
prev: float | None = None
result: dict[datetime, float] = {}
for period, sum_ in sums.items():
if prev is not None:
result[period] = sum_ - prev
prev = sum_
return result
def _reduce_deltas(
stat_list: list[dict[str, Any]],
same_period: Callable[[datetime, datetime], bool],
period_start_end: Callable[[datetime], tuple[datetime, datetime]],
period: timedelta,
) -> list[dict[str, Any]]:
"""Reduce hourly deltas to daily or monthly deltas."""
result: list[dict[str, Any]] = []
deltas: list[float] = []
if not stat_list:
return result
prev_stat: dict[str, Any] = stat_list[0]
# Loop over the hourly deltas + a fake entry to end the period
for statistic in chain(
stat_list, ({"start": stat_list[-1]["start"] + period},)
):
if not same_period(prev_stat["start"], statistic["start"]):
start, _ = period_start_end(prev_stat["start"])
# The previous statistic was the last entry of the period
result.append(
{
"start": start.isoformat(),
"delta": sum(deltas),
}
)
deltas = []
if statistic.get("delta") is not None:
deltas.append(statistic["delta"])
prev_stat = statistic
return result
merged_energy_statistics = _combine_sum_statistics(
statistics, msg["energy_statistic_ids"]
)
energy_deltas = _calculate_deltas(merged_energy_statistics)
indexed_co2_statistics = {
period["start"]: period["mean"]
for period in statistics.get(msg["co2_statistic_id"], {})
}
# Calculate amount of fossil based energy, assume 100% fossil if missing
fossil_energy = [
{"start": start, "delta": delta * indexed_co2_statistics.get(start, 100) / 100}
for start, delta in energy_deltas.items()
]
if msg["period"] == "hour":
reduced_fossil_energy = [
{"start": period["start"].isoformat(), "delta": period["delta"]}
for period in fossil_energy
]
elif msg["period"] == "day":
reduced_fossil_energy = _reduce_deltas(
fossil_energy,
recorder.statistics.same_day,
recorder.statistics.day_start_end,
timedelta(days=1),
)
else:
reduced_fossil_energy = _reduce_deltas(
fossil_energy,
recorder.statistics.same_month,
recorder.statistics.month_start_end,
timedelta(days=1),
)
result = {period["start"]: period["delta"] for period in reduced_fossil_energy}
connection.send_result(msg["id"], result)
| mezz64/home-assistant | homeassistant/components/energy/websocket_api.py | Python | apache-2.0 | 11,488 |
import util
import functools
import operator
class test_gather:
def init(self):
for ary, shape in util.gen_random_arrays("R", 3, max_dim=50, dtype="np.float64"):
nelem = functools.reduce(operator.mul, shape)
if nelem == 0:
continue
cmd = "R = bh.random.RandomState(42); a = %s; " % ary
cmd += "ind = M.arange(%d, dtype=np.int64).reshape(%s); " % (nelem, shape)
yield cmd
yield cmd + "ind = ind[::2]; "
if shape[0] > 2:
yield cmd + "ind = ind[1:]; "
if len(shape) > 1 and shape[1] > 5:
yield cmd + "ind = ind[3:]; "
@util.add_bh107_cmd
def test_take(self, cmd):
return cmd + "res = M.take(a, ind)"
def test_take_ary_mth(self, cmd):
return cmd + "res = a.take(ind)"
@util.add_bh107_cmd
def test_indexing(self, cmd):
return cmd + "res = a.flatten()[ind.flatten()]"
class test_scatter:
def init(self):
for ary, shape in util.gen_random_arrays("R", 3, max_dim=50, dtype="np.float64"):
nelem = functools.reduce(operator.mul, shape)
if nelem == 0:
continue
cmd = "R = bh.random.RandomState(42); res = %s; " % ary
cmd += "ind = M.arange(%d, dtype=np.int64).reshape(%s); " % (nelem, shape)
VAL = "val = R.random(shape=ind.shape, bohrium=BH).astype(np.float64); "
yield cmd + VAL
yield cmd + "ind = ind[::2]; " + VAL
if shape[0] > 2:
yield cmd + "ind = ind[1:];" + VAL
if len(shape) > 1 and shape[1] > 5:
yield cmd + "ind = ind[3:];" + VAL
@util.add_bh107_cmd
def test_put(self, cmd):
return cmd + "M.put(res, ind, val)"
@util.add_bh107_cmd
def test_put_scalar(self, cmd):
return cmd + "M.put(res, ind, 42)"
@util.add_bh107_cmd
def test_put_fixed_length_val(self, cmd):
return cmd + "M.put(res, ind, M.arange(10))"
def test_put_ary_mth(self, cmd):
return cmd + "res.put(ind, val)"
@util.add_bh107_cmd
def test_indexing(self, cmd):
return cmd + "res = res.flatten(); res[ind] = val"
def test_cond(self, cmd):
cmd += cmd + "mask = R.random(shape=ind.size, bohrium=BH).astype(np.bool).reshape(ind.shape); "
np_cmd = cmd + "np.put(res, ind[mask], val[mask])"
bh_cmd = cmd + "M.cond_scatter(res, ind, val, mask)"
bh107_cmd = bh_cmd.replace("bh.random.RandomState", "bh107.random.RandomState").replace(", bohrium=BH", "") \
.replace("bh.take", "bh107.take")
return (np_cmd, bh_cmd, bh107_cmd)
class test_nonzero:
def init(self):
for ary, shape in util.gen_random_arrays("R", 3, max_dim=50, dtype="np.float64"):
nelem = functools.reduce(operator.mul, shape)
if nelem == 0:
continue
cmd = "R = bh.random.RandomState(42); a = %s; " % ary
yield cmd
@util.add_bh107_cmd
def test_flatnonzero(self, cmd):
return cmd + "res = M.flatnonzero(a)"
def test_nonzero(self, cmd):
return cmd + "res = M.concatenate(M.nonzero(a))"
class test_fancy_indexing_get:
def init(self):
for ary, shape in util.gen_random_arrays("R", 3, max_dim=50, dtype="np.float64"):
nelem = functools.reduce(operator.mul, shape)
if nelem == 0:
continue
cmd = "R = bh.random.RandomState(42); a = %s; " % ary
ind = "ind = ("
for dim in shape:
ind += "R.random(shape=(10,), bohrium=BH).astype(np.uint64) %% %d, " % dim
ind += "); "
yield cmd + ind
def test_take_using_index_tuple(self, cmd):
cmd += "res = bh.take_using_index_tuple(a, ind)"
bh107_cmd = cmd.replace("bh.random.RandomState",
"bh107.random.RandomState") \
.replace(", bohrium=BH", "") \
.replace("bh.take", "bh107.take")
return (cmd, cmd, bh107_cmd)
@util.add_bh107_cmd
def test_indexing(self, cmd):
return cmd + "res = a[ind]"
class test_fancy_indexing_set:
def init(self):
for ary, shape in util.gen_random_arrays("R", 3, max_dim=50, dtype="np.float64"):
nelem = functools.reduce(operator.mul, shape)
if nelem == 0:
continue
cmd = "R = bh.random.RandomState(42); res = %s; " % ary
ind = "ind = ("
for dim in shape:
ind += "R.random(shape=(10,), bohrium=BH).astype(np.uint64) %% %d, " % dim
ind += "); "
yield cmd + ind
def test_put_using_index_tuple(self, cmd):
cmd += "bh.put_using_index_tuple(res, ind, 42)"
bh107_cmd = cmd.replace("bh.random.RandomState",
"bh107.random.RandomState") \
.replace(", bohrium=BH", "") \
.replace("bh.put", "bh107.put")
return (cmd, cmd, bh107_cmd)
@util.add_bh107_cmd
def test_indexing(self, cmd):
return cmd + "res[ind] = 42"
| madsbk/bohrium | test/python/tests/test_reorganization.py | Python | apache-2.0 | 5,152 |
import json
import os
import shutil
import sys
from formats import *
class CaseInsensCreepyDict(dict):
def __setitem__(self, key, value):
super(CaseInsensCreepyDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensCreepyDict, self).__getitem__(key.lower())
def resolve_paths(path):
try:
path = json.loads(path)
except TypeError:
return None
resolver = sys.modules['formats.%s' % (path['type'],)]
path = resolver.resolve(path)
return path
def setup_tmpdir(tmpdir):
shutil.rmtree(tmpdir, ignore_errors=True)
try:
os.mkdir(tmpdir)
except FileExistsError:
pass
| JackSlateur/raven | utils.py | Python | gpl-2.0 | 629 |
import numpy as np
import matplotlib as mpl
from distutils.version import LooseVersion
import nose
import matplotlib.pyplot as plt
import nose.tools as nt
import numpy.testing as npt
from . import PlotTestCase
from .. import rcmod
class RCParamTester(object):
def flatten_list(self, orig_list):
iter_list = map(np.atleast_1d, orig_list)
flat_list = [item for sublist in iter_list for item in sublist]
return flat_list
def assert_rc_params(self, params):
for k, v in params.items():
if k == "svg.embed_char_paths":
# This param causes test issues and is deprecated anyway
continue
elif isinstance(v, np.ndarray):
npt.assert_array_equal(mpl.rcParams[k], v)
else:
nt.assert_equal((k, mpl.rcParams[k]), (k, v))
class TestAxesStyle(RCParamTester):
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
def test_default_return(self):
current = rcmod.axes_style()
self.assert_rc_params(current)
def test_key_usage(self):
_style_keys = set(rcmod._style_keys)
for style in self.styles:
nt.assert_true(not set(rcmod.axes_style(style)) ^ _style_keys)
def test_bad_style(self):
with nt.assert_raises(ValueError):
rcmod.axes_style("i_am_not_a_style")
def test_rc_override(self):
rc = {"axes.facecolor": "blue", "foo.notaparam": "bar"}
out = rcmod.axes_style("darkgrid", rc)
nt.assert_equal(out["axes.facecolor"], "blue")
nt.assert_not_in("foo.notaparam", out)
def test_set_style(self):
for style in self.styles:
style_dict = rcmod.axes_style(style)
rcmod.set_style(style)
self.assert_rc_params(style_dict)
def test_style_context_manager(self):
rcmod.set_style("darkgrid")
orig_params = rcmod.axes_style()
with rcmod.axes_style("whitegrid"):
context_params = rcmod.axes_style("whitegrid")
self.assert_rc_params(context_params)
self.assert_rc_params(orig_params)
def test_style_context_independence(self):
nt.assert_true(set(rcmod._style_keys) ^ set(rcmod._context_keys))
def test_set_rc(self):
rcmod.set(rc={"lines.linewidth": 4})
nt.assert_equal(mpl.rcParams["lines.linewidth"], 4)
rcmod.set()
def test_reset_defaults(self):
# Changes to the rc parameters make this test hard to manage
# on older versions of matplotlib, so we'll skip it
if LooseVersion(mpl.__version__) < LooseVersion("1.3"):
raise nose.SkipTest
rcmod.reset_defaults()
self.assert_rc_params(mpl.rcParamsDefault)
rcmod.set()
def test_reset_orig(self):
# Changes to the rc parameters make this test hard to manage
# on older versions of matplotlib, so we'll skip it
if LooseVersion(mpl.__version__) < LooseVersion("1.3"):
raise nose.SkipTest
rcmod.reset_orig()
self.assert_rc_params(mpl.rcParamsOrig)
rcmod.set()
class TestPlottingContext(RCParamTester):
contexts = ["paper", "notebook", "talk", "poster"]
def test_default_return(self):
current = rcmod.plotting_context()
self.assert_rc_params(current)
def test_key_usage(self):
_context_keys = set(rcmod._context_keys)
for context in self.contexts:
missing = set(rcmod.plotting_context(context)) ^ _context_keys
nt.assert_true(not missing)
def test_bad_context(self):
with nt.assert_raises(ValueError):
rcmod.plotting_context("i_am_not_a_context")
def test_font_scale(self):
notebook_ref = rcmod.plotting_context("notebook")
notebook_big = rcmod.plotting_context("notebook", 2)
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize", "font.size"]
for k in font_keys:
nt.assert_equal(notebook_ref[k] * 2, notebook_big[k])
def test_rc_override(self):
key, val = "grid.linewidth", 5
rc = {key: val, "foo": "bar"}
out = rcmod.plotting_context("talk", rc=rc)
nt.assert_equal(out[key], val)
nt.assert_not_in("foo", out)
def test_set_context(self):
for context in self.contexts:
context_dict = rcmod.plotting_context(context)
rcmod.set_context(context)
self.assert_rc_params(context_dict)
def test_context_context_manager(self):
rcmod.set_context("notebook")
orig_params = rcmod.plotting_context()
with rcmod.plotting_context("paper"):
context_params = rcmod.plotting_context("paper")
self.assert_rc_params(context_params)
self.assert_rc_params(orig_params)
class TestFonts(PlotTestCase):
def test_set_font(self):
rcmod.set(font="Verdana")
_, ax = plt.subplots()
ax.set_xlabel("foo")
try:
nt.assert_equal(ax.xaxis.label.get_fontname(),
"Verdana")
except AssertionError:
if has_verdana():
raise
else:
raise nose.SkipTest("Verdana font is not present")
finally:
rcmod.set()
def test_set_serif_font(self):
rcmod.set(font="serif")
_, ax = plt.subplots()
ax.set_xlabel("foo")
nt.assert_in(ax.xaxis.label.get_fontname(),
mpl.rcParams["font.serif"])
rcmod.set()
def test_different_sans_serif(self):
if LooseVersion(mpl.__version__) < LooseVersion("1.4"):
raise nose.SkipTest
rcmod.set()
rcmod.set_style(rc={"font.sans-serif":
["Verdana"]})
_, ax = plt.subplots()
ax.set_xlabel("foo")
try:
nt.assert_equal(ax.xaxis.label.get_fontname(),
"Verdana")
except AssertionError:
if has_verdana():
raise
else:
raise nose.SkipTest("Verdana font is not present")
finally:
rcmod.set()
def has_verdana():
"""Helper to verify if Verdana font is present"""
# This import is relatively lengthy, so to prevent its import for
# testing other tests in this module not requiring this knowledge,
# import font_manager here
import matplotlib.font_manager as mplfm
try:
verdana_font = mplfm.findfont('Verdana', fallback_to_default=False)
except:
# if https://github.com/matplotlib/matplotlib/pull/3435
# gets accepted
return False
# otherwise check if not matching the logic for a 'default' one
try:
unlikely_font = mplfm.findfont("very_unlikely_to_exist1234",
fallback_to_default=False)
except:
# if matched verdana but not unlikely, Verdana must exist
return True
# otherwise -- if they match, must be the same default
return verdana_font != unlikely_font
| lypzln/seaborn | seaborn/tests/test_rcmod.py | Python | bsd-3-clause | 7,181 |
import requests
import kacpaw.content_abcs as abcs
from kacpaw.utils import kaurl, update_dict_path
class User(abcs.Editable):
"""
A user on KA.
Note on deletion: Users are technically deletable, but I don't want to
implement account deletion. There are no use cases I can think of and
it's not something that you would want to do on accident. If you want to
implement account deletion, subclass this like this::
class DeletableUser(Deletable, User):
...
and define api_delete.
"""
get_user = kaurl("api/internal/user/profile")
api_get = property((get_user + "?kaid={.id}").format)
# todo: I'm still trying to get user profile editing to work. It seems that giving the entire metadata object causes errors.
api_edit = kaurl("api/internal/user/profile")
api_edit_method = "POST"
meta_path_map = {
"bio": ["bio"],
"name": ["nickname"],
"username": ["username"]
}
def __init__(self, ka_id):
self.ka_id = ka_id
@classmethod
def _from_identifier(cls, identifier_kind, identifier):
"""Gets a user by an arbitrary identifier"""
resp = requests.get(cls.get_user, params={
identifier_kind: identifier
})
resp.raise_for_status()
return cls(resp.json()["kaid"])
@classmethod
def from_username(cls, username):
"""Gets a user by their username"""
return cls._from_identifier("username", username)
@classmethod
def from_email(cls, email):
"""Gets a user by thier email"""
return cls._from_identifier("email", email)
@property
def id(self):
"""A user's id is their ka_id"""
return self.ka_id
# todo: things like Comments that have properties like text_content which send
# off requests are for some reason being called when I use autocompletion in
# my python repl. This is bad because the properties take a long time to be
# called and autocompletion results should be fast. The properties can also
# raise errors, which is extra bad. This is due to hasattr which checks to
# see if getattr raises an AttributeError, so I'm not sure if I can do much
# about it...
class Comment(abcs.Editable, abcs.Replyable, abcs.Deletable):
"""Any kind of comment on KA"""
# these properties work no matter where the comment is
api_delete = property(kaurl("api/internal/feedback/{.id}").format)
api_reply = property(kaurl("api/internal/discussions/{.id}/replies").format)
meta_path_map = {
"text_content": ["content"]
}
def __init__(self, comment_id, context):
self.comment_id = comment_id
def get_reply_data(self):
resp = requests.get(self.api_reply)
resp.raise_for_status()
yield from resp.json()
def get_author(self):
"""Returns the ``User`` who wrote the comment."""
return User(self.get_metadata()["authorKaid"])
def get_parent(self):
raise NotImplementedError
def edit(self, session, message):
session.put(self.api_edit,
json={
"text": message
# topic?
}
).raise_for_status()
@property
def id(self):
"""Comments use their comment_id for identication"""
return self.comment_id
class _CommentDoesntExistError(requests.HTTPError):
"""
This is a hopefully temporary exception that is raised when you try to get
data from a comment doesn't exist.
We subclass from HTTPError, because hopefully in the future, this will be
an HTTPError and not a _CommentDoesntExistError
"""
def __init__(self, comment):
super().__init__((
"{.id} does not identify a valid comment. For future "
"compatibility reasons, if you want to handle this error, handle "
"it as a requests.HTTPError instead of a "
"kacpaw._CommentDoesntExistError"
).format(comment))
class ProgramComment(Comment):
"""A comment in the context of a KA program"""
api_get = property(kaurl("api/internal/discussions/scratchpad/{0.program_id}/comments?qa_expand_key={0.id}").format)
api_edit = property(kaurl("api/internal/discussions/scratchpad/{0.program_id}/comments/{0.id}").format)
# ProgramCommentReply is not - and cannot be - implemented yet, so we
# can't just set reply_type to ProgramCommentReply
reply_type = property(lambda _: ProgramCommentReply)
def __init__(self, ka_id, context):
"""Program comments take a ka_id, which is a long string KA uses to identify the comment. \
Usually, this is a string that starts with "kaencrypted_". I believe that some other types of \
comment ids work, although thanks to the undocumented nature of the internal api, \
I would stick to the "kaencrypted_" ones.
You also need a context object. These are normally Program objects, but they can also be other \
ProgramComment objects, or anything with a program_id"""
super().__init__(ka_id, context)
self.program_id = context.program_id
def _comment_exists(self):
"""
This is yet another horrible workaround to keep this from falling apart.
Because of the other workarounds to get around the fact that you
cannot get metadata from a comment, we've run into problems where it
is not always obvious if a comment even exists. For this reason, we
need to have a method to test it for us.
"""
try:
# Getting the reply data from a comment should raise an error if
# it doesn't exist. We use Comment's get_reply_data because
# ProgramCommentReply's get_reply_data is another one of those
# horrible workarounds
list(Comment(self.id, self.get_program()).get_reply_data())
except requests.HTTPError:
return False
return True
def get_program(self):
"""Returns the ``Program`` that the comment was posted on."""
return Program(self.program_id)
get_parent = get_program
def get_metadata(self):
# when using qa_expand_key, the first comment will be the one we want,
# so pop out the first comment
data = super().get_metadata()["feedback"].pop(0)
# order _might_ matter here on the offchance that a comment is deleted
# after we check that it exists an before we get its data
if self._comment_exists():
return data
raise _CommentDoesntExistError(self)
@property
def url(self):
return "{}?qa_expand_key={}".format(self.get_program().url, self.id)
class ProgramCommentReply(ProgramComment):
"""A reply to a program comment"""
def reply(self, session, message):
"""Adds a ``ProgramCommentReply`` to the thread.
The reply will start with the character '@', followed by the author of this comment \
to make it more clear what we are replying to.
If you don't want this behavior, use ``reply.get_parent().reply`` instead."""
return self.get_parent().reply(session, "@{metadata[authorNickname]}: {message}".format(
metadata=self.get_metadata(), message=message
))
def get_parent(self):
"""Returns the ``ProgramComment`` that started the thread."""
return ProgramComment(super().get_metadata()["key"], self)
def get_metadata(self):
"""Returns a dictionary with information about this ``ProgramCommentReply``."""
# there's no way that I've found to get comment reply metadata directly,
# so we iterate though the comment thread until you find this comment
for comment_data in self.get_parent().get_reply_data():
if comment_data["key"] == self.id:
return comment_data
raise _CommentDoesntExistError(self)
# I'm keeping this todo until I can fully address it, although I did
# add an error
# todo: raise some error instead of returning None. What error? IDK.
# I'm almost tempted to pretend it's an HTTPError, but I'll need to do
# some research into why we would get here (We can get here btw.
# That's how I found this). Would self.get_parent().get_reply_data()
# raise an HTTPError if self.comment_key was nonsense? If that's the
# case, we will only (probably) be here if comment_key was a
# ProgramComment key instead of a ProgramCommentReply key, so we would
# probably want to have an error that's more specific (like TypeError
# maybe?). Man, there are so many edge cases with this stuff that I
# really should look into now that I think about it... We are also
# probably going to need to keep a lot of this comment to explain why
# we raise the error we do.
def get_reply_data(self):
"""Yields all ``ProgramCommentReply``s that were posted after this one."""
# Similar principle to get_metadata - we can't get what we want directly.
gen = self.get_parent().get_reply_data()
while next(gen)["key"] != self.id:
pass
yield from gen
# jinja2 is probably a good choice for Program formaters. I might even want to add one to this class for convenience.
class Program(abcs.Editable, abcs.Replyable, abcs.Questionable, abcs.Spinoffable, abcs.Deletable):
"""
Any kind of "program" on KA, such as one created using
https://www.khanacademy.org/computer-programming/new/pjs
"""
api_get = api_edit = api_delete = property(kaurl("api/internal/scratchpads/{.id}").format)
api_reply = property(kaurl("api/internal/discussions/scratchpad/{.id}/comments").format)
api_create_program = kaurl("api/internal/scratchpads")
reply_type = ProgramComment
meta_path_map = {
"image_url": ["revision", "imageUrl"],
"url": ["url"],
"code": ["revision", "code"],
"width": ["width"],
"height": ["height"],
"title": ["title"],
"kind": ["userAuthoredContentType"]
}
def __init__(self, program_id):
"""Programs are constructed using a program id.
A program id is the last part of a program's url, so \
https://www.khanacademy.org/computer-programming/ka-api-bot-test/4617827881975808 \
has the program id 4617827881975808"""
self.program_id = program_id
@classmethod
def create(cls):
raise todo
def get_reply_data(self, **params):
resp = requests.get(self.api_reply,
params=dict({
"sort": 1,
"subject": "all",
"lang": "en",
"limit": 10
}, **params)
)
resp.raise_for_status()
data = resp.json()
yield from data["feedback"]
if not data["isComplete"]: # There are more comments we haven't gotten to yet
yield from self.get_reply_data(**dict(params, cursor=data["cursor"]))
def get_metadata(self):
metadata = super().get_metadata()
# image_url isn't in the right place, so put it there
return update_dict_path(metadata, self.meta_path_map["image_url"], metadata.get("imageUrl"))
@property
def id(self):
"""Programs use their program_id for identication"""
return self.program_id | Potato42/kacpaw | kacpaw/content.py | Python | mit | 11,428 |
# coding=utf-8
"""Unittest for Earthquake Report."""
import os
import io
import shutil
import unittest
from jinja2.environment import Template
from safe.common.utilities import safe_dir
from safe.definitions.constants import ANALYSIS_SUCCESS
from safe.definitions.reports.components import (
standard_impact_report_metadata_html,
general_report_component,
population_chart_svg_component, infographic_report)
from safe.impact_function.impact_function import ImpactFunction
from safe.report.impact_report import ImpactReport
from safe.report.report_metadata import ReportMetadata
from safe.test.utilities import (
get_qgis_app,
load_test_raster_layer)
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "[email protected]"
__revision__ = '$Format:%H$'
# TODO: find out why this test makes test_minimum_needs_outputs failing
# if this test runs in travis before test_minimum_needs_outputs. We change
# the filename at the moment so it will run after test_minimum_needs_outputs.
class TestEarthquakeReport(unittest.TestCase):
"""Test Earthquake Report.
.. versionadded:: 4.0
"""
maxDiff = None
@classmethod
def fixtures_dir(cls, path):
"""Helper to return fixture path."""
directory_name = os.path.dirname(__file__)
return os.path.join(directory_name, 'fixtures', path)
def assert_compare_file_control(self, control_path, actual_path):
"""Helper to compare file."""
current_directory = safe_dir(sub_dir='../resources')
context = {
'current_directory': current_directory
}
with open(control_path) as control_file:
template_string = control_file.read()
template = Template(template_string)
control_string = template.render(context).strip()
with io.open(actual_path, encoding='utf-8') as actual_file:
actual_string = actual_file.read().strip()
self.assertEquals(control_string, actual_string)
def test_earthquake_population_without_aggregation(self):
"""Testing Earthquake in Population without aggregation.
.. versionadded:: 4.0
"""
output_folder = self.fixtures_dir('../output/earthquake_population')
# Classified vector with building-points
shutil.rmtree(output_folder, ignore_errors=True)
hazard_layer = load_test_raster_layer(
'hazard', 'earthquake.tif')
exposure_layer = load_test_raster_layer(
'exposure', 'pop_binary_raster_20_20.asc')
impact_function = ImpactFunction()
impact_function.exposure = exposure_layer
impact_function.hazard = hazard_layer
impact_function.prepare()
return_code, message = impact_function.run()
self.assertEqual(return_code, ANALYSIS_SUCCESS, message)
report_metadata = ReportMetadata(
metadata_dict=standard_impact_report_metadata_html)
impact_report = ImpactReport(
IFACE,
report_metadata,
impact_function=impact_function)
impact_report.output_folder = output_folder
return_code, message = impact_report.process_components()
self.assertEqual(
return_code, ImpactReport.REPORT_GENERATION_SUCCESS, message)
"""Checking generated context"""
empty_component_output_message = 'Empty component output'
# Check Analysis Summary
analysis_summary = impact_report.metadata.component_by_key(
general_report_component['key'])
""":type: safe.report.report_metadata.Jinja2ComponentsMetadata"""
expected_context = {
'table_header': (
u'Estimated Number of people affected per MMI intensity'),
'header': u'General Report',
'summary': [
{
'header_label': u'Hazard Zone',
'rows': [
{'value': 0, 'name': u'X', 'key': 'X'},
{'value': 0, 'name': u'IX', 'key': 'IX'},
{'value': '200', 'name': u'VIII', 'key': 'VIII'},
{'value': 0, 'name': u'VII', 'key': 'VII'},
{'value': 0, 'name': u'VI', 'key': 'VI'},
{'value': 0, 'name': u'V', 'key': 'V'},
{'value': 0, 'name': u'IV', 'key': 'IV'},
{'value': 0, 'name': u'III', 'key': 'III'},
{'value': 0, 'name': u'II', 'key': 'II'},
{'value': 0, 'name': u'I', 'key': 'I'},
{
'as_header': True,
'key': 'total_field',
'name': u'Total',
'value': '200'
}
],
'value_label': u'Count'
},
{
'header_label': u'Population',
'rows': [
{
'value': '200',
'name': u'Affected',
'key': 'total_affected_field',
}, {
'key': 'total_not_affected_field',
'name': u'Not Affected',
'value': '0'
}, {
'key': 'total_not_exposed_field',
'name': u'Not Exposed',
'value': '0'},
{
'value': '200',
'name': u'Displaced',
'key': 'displaced_field'
}, {
'value': '0 - 100',
'name': u'Fatalities',
'key': 'fatalities_field'
}],
'value_label': u'Count'
}
],
'notes': [
'Exposed People: People who are present in hazard zones and '
'are thereby subject to potential losses. In InaSAFE, people '
'who are exposed are those people who are within the extent '
'of the hazard.',
'Affected People: People who are affected by a hazardous '
'event. People can be affected directly or indirectly. '
'Affected people may experience short-term or long-term '
'consequences to their lives, livelihoods or health and in '
'the economic, physical, social, cultural and environmental '
'assets. In InaSAFE, people who are killed during the event '
'are also considered affected.',
'Displaced People: Displaced people are people who, for '
'different reasons and circumstances because of risk or '
'disaster, have to leave their place of residence. '
'In InaSAFE, demographic and minimum needs reports are based '
'on displaced / evacuated people.'
]
}
actual_context = analysis_summary.context
self.assertDictEqual(expected_context, actual_context)
self.assertTrue(
analysis_summary.output, empty_component_output_message)
report_metadata = ReportMetadata(
metadata_dict=infographic_report)
infographic_impact_report = ImpactReport(
IFACE,
report_metadata,
impact_function=impact_function)
infographic_impact_report.output_folder = output_folder
return_code, message = infographic_impact_report.process_components()
self.assertEqual(
return_code, ImpactReport.REPORT_GENERATION_SUCCESS, message)
# check population pie chart if we have 100% donut slice
population_chart_svg = (
infographic_impact_report.metadata.component_by_key(
population_chart_svg_component['key'])
)
expected_slices = [
{
'value': 200,
'show_label': True,
'center': (224.0, 128.0),
'stroke_opacity': 1,
'path': 'M128.000000,0.000000a128.000000,128.000000 0 0 1 '
'0.000000,256.000000l-0.000000,-64.000000a64.000000,'
'64.000000 0 0 0 0.000000,-128.000000Z',
'percentage': 100,
'label': u'VIII',
'stroke': u'#ff7000',
'label_position': (256, 0),
'fill': u'#ff7000'
}, {
'value': 100,
'show_label': False,
'center': (32.0, 128.0),
'stroke_opacity': 1,
'path': 'M128.000000,256.000000a128.000000,128.000000 0 0 1 '
'-0.000000,-256.000000l0.000000,64.000000a64.000000,'
'64.000000 0 0 0 0.000000,128.000000Z',
'percentage': 50.0,
'label': '',
'stroke': u'#ff7000',
'label_position': (256, 0),
'fill': u'#ff7000'
}, {
'value': 0,
'show_label': False,
'center': (128.0, 224.0),
'stroke_opacity': 1,
'path': 'M128.000000,256.000000a128.000000,128.000000 0 0 1 '
'0.000000,0.000000l-0.000000,-64.000000a64.000000,'
'64.000000 0 0 0 0.000000,0.000000Z',
'percentage': 0.0,
'label': u'Total Not Affected',
'stroke': '#fff',
'label_position': (256, 0),
'fill': u'#1a9641'
}]
actual_context = population_chart_svg.context['context']
actual_slices = actual_context.slices
self.assertEqual(expected_slices, actual_slices)
self.assertTrue(
population_chart_svg.output, empty_component_output_message)
shutil.rmtree(output_folder, ignore_errors=True)
| Gustry/inasafe | safe/report/test/test_impact_report_earthquake.py | Python | gpl-3.0 | 10,264 |
print('I will now count my chickens:')
print('Hens', 25 + 30 / 6)
print('Roosters', 100 - 25 * 3 % 4)
print('Now I will count the eggs:')
print(3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6)
print('Is it true that 3 + 2 < 5 - 7?')
print(3 + 2 < 5 - 7)
print('What is 3 + 2?', 3 + 2)
print('What is 5 -7?', 5 -7)
print('Oh, that\'s why it\'s False.')
print('How about some more.')
print('Is it greater?', 5 > -2)
print('Is it greater or equal?', 5 >= -2)
print('Is it less or equal?', 5 <= -2)
| johnwang117/learn-python-the-hard-way | ex3.py | Python | gpl-3.0 | 490 |
# -*- coding: utf-8 -*-
# Copyright 2016 LasLabs Inc.
# License GPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from psycopg2 import IntegrityError
from openerp.tests.common import TransactionCase
from openerp.exceptions import ValidationError
class TestMedicalPathology(TransactionCase):
def setUp(self):
super(TestMedicalPathology, self).setUp()
self.pathology_1 = self.env.ref(
'medical_pathology.medical_pathology_medical_pathology_1'
)
def test_check_unique_code(self):
""" Test 2 same codes per code_type raises integrity error """
with self.assertRaises(IntegrityError):
self.pathology_1.code = '[DEMO] B54'
def test_check_recursive_parent(self):
""" Test category recursive parent raises ValidationError """
parent = self.env.ref(
'medical_pathology.medical_pathology_medical_pathology_A00',
)
with self.assertRaises(ValidationError):
parent.parent_id = self.env.ref(
'medical_pathology.medical_pathology_medical_pathology_A00_0',
).id
| laslabs/vertical-medical | medical_pathology/tests/test_medical_pathology.py | Python | agpl-3.0 | 1,121 |
from casepro.utils import parse_csv
from .models import ContainsTest, GroupsTest, Quantifier, WordCountTest
class RuleFormMixin(object):
def derive_initial(self):
initial = super(RuleFormMixin, self).derive_initial()
if self.object:
tests_by_type = {t.TYPE: t for t in self.object.get_tests()}
contains_test = tests_by_type.get("contains")
groups_test = tests_by_type.get("groups")
field_test = tests_by_type.get("field")
words_test = tests_by_type.get("words")
if contains_test:
initial["keywords"] = ", ".join(contains_test.keywords)
if groups_test:
initial["groups"] = groups_test.groups
if field_test:
initial["field_test"] = field_test
if words_test:
initial["ignore_single_words"] = True
return initial
def construct_tests(self):
"""
Constructs tests from form field values
"""
data = self.form.cleaned_data
keywords = parse_csv(data["keywords"])
groups = data["groups"]
field_test = data["field_test"]
ignore_single_words = data["ignore_single_words"]
tests = []
if keywords:
tests.append(ContainsTest(keywords, Quantifier.ANY))
if groups:
tests.append(GroupsTest(groups, Quantifier.ANY))
if field_test:
tests.append(field_test)
if ignore_single_words:
tests.append(WordCountTest(2))
return tests
| praekelt/casepro | casepro/rules/mixins.py | Python | bsd-3-clause | 1,578 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageNonMaximumSuppression(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageNonMaximumSuppression(), 'Processing.',
('vtkImageData', 'vtkImageData'), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkImageNonMaximumSuppression.py | Python | bsd-3-clause | 530 |
"""
Futures tools for threadly
"""
import threading
import time
class ListenableFuture(object):
"""
This class i used to make a Future that can have listeners and callbacks
added to it. Once setter(object) is called all listeners/callbacks are
also called. Callbacks will be given the set object, and .get() will
return said object.
"""
def __init__(self):
self.lock = threading.Condition()
self.settable = None
self.listeners = list()
self.callables = list()
def add_listener(self, listener, args=None, kwargs=None):
"""
Add a listener function to this ListenableFuture. Once set is called
on this future all listeners will be ran. Arguments for the listener
can be given if needed.
`listener` a callable that will be called when the future is completed
`args` tuple arguments that will be passed to the listener when called.
`kwargs` dict keyword arguments to be passed to the passed listener
when called.
"""
args = args or ()
kwargs = kwargs or {}
if self.settable is None:
self.listeners.append((listener, args, kwargs))
else:
listener(*args, **kwargs)
def add_callable(self, cable, args=None, kwargs=None):
"""
Add a callable function to this ListenableFuture. Once set is called
on this future all callables will be ran. This works the same as the
listener except the set object is passed as the first argument when
the callable is called. Arguments for the listener can be given if
needed.
`cable` a callable that will be called when the future is completed,
it must have at least 1 argument.
`args` tuple arguments that will be passed to the listener when called.
`kwargs` dict keyword arguments to be passed to the passed listener
when called.
"""
args = args or ()
kwargs = kwargs or {}
if self.settable is None:
self.callables.append((cable, args, kwargs))
else:
cable(self.settable, *args, **kwargs)
def get(self, timeout=2 ** 32):
"""
This is a blocking call that will return the set object once it is set.
`timeout` The max amount of time to wait for get (in seconds).
If this is reached a null is returned.
`returns` the set object. This can technically be anything so know
what your listening for.
"""
if self.settable is not None:
return self.settable
start = time.time()
try:
self.lock.acquire()
while self.settable is None and time.time() - start < timeout:
self.lock.wait(timeout - (time.time() - start))
return self.settable
finally:
self.lock.release()
def setter(self, obj):
"""
This is used to complete this future. Whatever thread sets this will
be used to call all listeners and callables for this future.
`obj` The object you want to set on this future
(usually use just True if you dont care)
"""
if self.settable is None:
self.settable = obj
self.lock.acquire()
self.lock.notify_all()
self.lock.release()
while len(self.listeners) > 0:
i = self.listeners.pop(0)
try:
i[0](*i[1], **i[2])
except Exception as exp:
print("Exception calling listener", i[0], exp)
while len(self.callables) > 0:
i = self.callables.pop(0)
try:
i[0](self.settable, *i[1], **i[2])
except Exception as exp:
print("Exception calling listener", i[0], exp)
else:
raise Exception("Already Set!")
def future_job(future, job):
"""
This is a simple helper function used to wrap a task on the Scheduler
in a future. Once the job runs the future will complete.
`future` The future that will be completed once the job finishes.
`job` The job to run before completing the future.
"""
try:
job[0](*job[1], **job[2])
future.setter(True)
except Exception as exp:
print("Error running futureJob:", exp)
future.setter(False)
| lwahlmeier/python-threadly | threadly/Futures.py | Python | unlicense | 4,445 |
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cyborg base exception handling.
SHOULD include dedicated exception logging.
"""
from oslo_log import log
import six
from six.moves import http_client
from cyborg.common.i18n import _
from cyborg.conf import CONF
LOG = log.getLogger(__name__)
class CyborgException(Exception):
"""Base Cyborg Exception
To correctly use this class, inherit from it and define
a '_msg_fmt' property. That message will get printf'd
with the keyword arguments provided to the constructor.
If you need to access the message from an exception you should use
six.text_type(exc)
"""
_msg_fmt = _("An unknown exception occurred.")
code = http_client.INTERNAL_SERVER_ERROR
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self._msg_fmt % kwargs
except Exception:
# kwargs doesn't match a variable in self._msg_fmt
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in kwargs.items():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise
else:
# at least get the core self._msg_fmt out if something
# happened
message = self._msg_fmt
super(CyborgException, self).__init__(message)
def __str__(self):
"""Encode to utf-8 then wsme api can consume it as well."""
if not six.PY3:
return unicode(self.args[0]).encode('utf-8')
return self.args[0]
def __unicode__(self):
"""Return a unicode representation of the exception message."""
return unicode(self.args[0])
class AttachHandleAlreadyExists(CyborgException):
_msg_fmt = _("AttachHandle with uuid %(uuid)s already exists.")
class ControlpathIDAlreadyExists(CyborgException):
_msg_fmt = _("ControlpathID with uuid %(uuid)s already exists.")
class ConfigInvalid(CyborgException):
_msg_fmt = _("Invalid configuration file. %(error_msg)s")
class DeviceAlreadyExists(CyborgException):
_msg_fmt = _("Device with uuid %(uuid)s already exists.")
class DeviceProfileAlreadyExists(CyborgException):
_msg_fmt = _("DeviceProfile with uuid %(uuid)s already exists.")
class DeployableAlreadyExists(CyborgException):
_msg_fmt = _("Deployable with uuid %(uuid)s already exists.")
class ExtArqAlreadyExists(CyborgException):
_msg_fmt = _("ExtArq with uuid %(uuid)s already exists.")
class Invalid(CyborgException):
_msg_fmt = _("Invalid parameters.")
code = http_client.BAD_REQUEST
class InvalidIdentity(Invalid):
_msg_fmt = _("Expected a uuid/id but received %(identity)s.")
class InvalidUUID(Invalid):
_msg_fmt = _("Expected a uuid but received %(uuid)s.")
class InvalidJsonType(Invalid):
_msg_fmt = _("%(value)s is not JSON serializable.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
_msg_fmt = _("%(err)s")
class PatchError(Invalid):
_msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s")
class NotAuthorized(CyborgException):
_msg_fmt = _("Not authorized.")
code = http_client.FORBIDDEN
class HTTPForbidden(NotAuthorized):
_msg_fmt = _("Access was denied to the following resource: %(resource)s")
class NotFound(CyborgException):
_msg_fmt = _("Resource could not be found.")
code = http_client.NOT_FOUND
class ServiceNotFound(NotFound):
msg_fmt = _("Service %(service_id)s could not be found.")
class AttachHandleNotFound(NotFound):
_msg_fmt = _("AttachHandle %(uuid)s could not be found.")
class ControlpathIDNotFound(NotFound):
_msg_fmt = _("ControlpathID %(uuid)s could not be found.")
class ConfGroupForServiceTypeNotFound(ServiceNotFound):
msg_fmt = _("No conf group name could be found for service type "
"%(stype)s.")
class DeviceNotFound(NotFound):
_msg_fmt = _("Device %(uuid)s could not be found.")
class DeviceProfileNotFound(NotFound):
_msg_fmt = _("DeviceProfile %(uuid)s could not be found.")
class DeployableNotFound(NotFound):
_msg_fmt = _("Deployable %(uuid)s could not be found.")
class ExtArqNotFound(NotFound):
_msg_fmt = _("ExtArq %(uuid)s could not be found.")
class InvalidDeployType(CyborgException):
_msg_fmt = _("Deployable have an invalid type")
class Conflict(CyborgException):
_msg_fmt = _('Conflict.')
code = http_client.CONFLICT
class DuplicateDeviceName(Conflict):
_msg_fmt = _("A device with name %(name)s already exists.")
class DuplicateDeviceProfileName(Conflict):
_msg_fmt = _("A device_profile with name %(name)s already exists.")
class DuplicateDeployableName(Conflict):
_msg_fmt = _("A deployable with name %(name)s already exists.")
class PlacementEndpointNotFound(NotFound):
message = _("Placement API endpoint not found")
class PlacementResourceProviderNotFound(NotFound):
message = _("Placement resource provider not found %(resource_provider)s.")
class PlacementInventoryNotFound(NotFound):
message = _("Placement inventory not found for resource provider "
"%(resource_provider)s, resource class %(resource_class)s.")
class PlacementInventoryUpdateConflict(Conflict):
message = _("Placement inventory update conflict for resource provider "
"%(resource_provider)s, resource class %(resource_class)s.")
class ObjectActionError(CyborgException):
_msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class AttributeNotFound(NotFound):
_msg_fmt = _("Attribute %(uuid)s could not be found.")
class AttributeInvalid(CyborgException):
_msg_fmt = _("Attribute is invalid")
class AttributeAlreadyExists(CyborgException):
_msg_fmt = _("Attribute with uuid %(uuid)s already exists.")
# An exception with this name is used on both sides of the placement/
# cyborg interaction.
class ResourceProviderInUse(CyborgException):
msg_fmt = _("Resource provider has allocations.")
class ResourceProviderRetrievalFailed(CyborgException):
msg_fmt = _("Failed to get resource provider with UUID %(uuid)s")
class ResourceProviderAggregateRetrievalFailed(CyborgException):
msg_fmt = _("Failed to get aggregates for resource provider with UUID"
" %(uuid)s")
class ResourceProviderTraitRetrievalFailed(CyborgException):
msg_fmt = _("Failed to get traits for resource provider with UUID"
" %(uuid)s")
class ResourceProviderCreationFailed(CyborgException):
msg_fmt = _("Failed to create resource provider %(name)s")
class ResourceProviderDeletionFailed(CyborgException):
msg_fmt = _("Failed to delete resource provider %(uuid)s")
class ResourceProviderUpdateFailed(CyborgException):
msg_fmt = _("Failed to update resource provider via URL %(url)s: "
"%(error)s")
class ResourceProviderNotFound(NotFound):
msg_fmt = _("No such resource provider %(name_or_uuid)s.")
class ResourceProviderSyncFailed(CyborgException):
msg_fmt = _("Failed to synchronize the placement service with resource "
"provider information supplied by the compute host.")
class PlacementAPIConnectFailure(CyborgException):
msg_fmt = _("Unable to communicate with the Placement API.")
class PlacementAPIConflict(CyborgException):
"""Any 409 error from placement APIs should use (a subclass of) this
exception.
"""
msg_fmt = _("A conflict was encountered attempting to invoke the "
"placement API at URL %(url)s: %(error)s")
class ResourceProviderUpdateConflict(PlacementAPIConflict):
"""A 409 caused by generation mismatch from attempting to update an
existing provider record or its associated data (aggregates, traits, etc.).
"""
msg_fmt = _("A conflict was encountered attempting to update resource "
"provider %(uuid)s (generation %(generation)d): %(error)s")
class InvalidResourceClass(Invalid):
msg_fmt = _("Resource class '%(resource_class)s' invalid.")
class InvalidResourceAmount(Invalid):
msg_fmt = _("Resource amounts must be integers. Received '%(amount)s'.")
class InvalidInventory(Invalid):
msg_fmt = _("Inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s' invalid.")
# An exception with this name is used on both sides of the placement/
# cyborg interaction.
class InventoryInUse(InvalidInventory):
# NOTE(mriedem): This message cannot change without impacting the
# cyborg.services.client.report._RE_INV_IN_USE regex.
msg_fmt = _("Inventory for '%(resource_classes)s' on "
"resource provider '%(resource_provider)s' in use.")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class GlanceConnectionFailed(CyborgException):
msg_fmt = _("Connection to glance host %(server)s failed: "
"%(reason)s")
class ImageUnacceptable(Invalid):
msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class ImageNotAuthorized(CyborgException):
msg_fmt = _("Not authorized for image %(image_id)s.")
class ImageNotFound(NotFound):
msg_fmt = _("Image %(image_id)s could not be found.")
class ImageBadRequest(Invalid):
msg_fmt = _("Request of image %(image_id)s got BadRequest response: "
"%(response)s")
class InvalidDriver(Invalid):
_msg_fmt = _("Found an invalid driver: %(name)s")
| openstack/nomad | cyborg/common/exception.py | Python | apache-2.0 | 10,839 |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import jinja2
def init_app(app):
app.jinja_loader = jinja2.loaders.FileSystemLoader(app.config['APP_TEMPLATES_FOLDER'])
def app_heartbeat():
pass
| La0/mozilla-relengapi | lib/backend_common/backend_common/templates.py | Python | mpl-2.0 | 382 |
import os.path
from django.contrib.admin.templatetags.admin_urls import admin_urlname
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from nose.tools import *
from bulkadmin import forms
from project.models import Place
class BulkUploadTest(TestCase):
def get_test_file_name(self):
dirname = os.path.dirname(__file__)
return os.path.join(dirname, 'import_test.csv')
@istest
def load_data_from_csv(self):
csvname = self.get_test_file_name()
with open(csvname) as csvfile:
data = forms.BulkUploadForm.load_csv(csvfile)
assert isinstance(data, list)
assert_equal(len(data), 25)
assert all([('lat' in place and 'lon' in place) for place in data])
@istest
def use_form_to_load_csv_data(self):
csvname = self.get_test_file_name()
with open(csvname) as csvfile:
form = forms.BulkUploadForm({},
{'data': SimpleUploadedFile('test.csv', csvfile.read())})
assert form.is_valid()
assert isinstance(form.cleaned_data['data'], list)
assert_equal(len(form.cleaned_data['data']), 25)
@istest
def test_BulkUploadFormAdminView(self):
username = 'test_admin'
email = '[email protected]'
password = 'pw'
client = Client()
csvname = self.get_test_file_name()
Place.objects.all().delete()
User.objects.all().delete()
User.objects.create_superuser(username, email, password)
loggedin = client.login(username=username, password=password)
assert (loggedin)
with open(csvname) as csvfile:
bulk_add_url = reverse(admin_urlname(Place._meta, 'bulk_add'))
res = client.post(
bulk_add_url,
{'data': csvfile},
follow=True)
assert_equal(Place.objects.count(), 25)
@istest
def test_BulkUploadFormAdminView_excluding_duplicates(self):
username = 'test_admin'
email = '[email protected]'
password = 'pw'
client = Client()
csvname = self.get_test_file_name()
Place.objects.all().delete()
User.objects.all().delete()
User.objects.create_superuser(username, email, password)
loggedin = client.login(username=username, password=password)
assert (loggedin)
with open(csvname) as csvfile:
bulk_add_url = reverse(admin_urlname(Place._meta, 'bulk_add'))
res = client.post(
bulk_add_url,
{'data': csvfile,
'exclude_duplicates': 'checked'},
follow=True)
assert_equal(Place.objects.count(), 20)
@istest
def test_BulkUploadFormAdminView_excluding_duplicates_with_existing_data(self):
username = 'test_admin'
email = '[email protected]'
password = 'pw'
client = Client()
csvname = self.get_test_file_name()
Place.objects.all().delete()
User.objects.all().delete()
User.objects.create_superuser(username, email, password)
loggedin = client.login(username=username, password=password)
assert (loggedin)
# First, insert some data, duplicates and all
with open(csvname) as csvfile:
bulk_add_url = reverse(admin_urlname(Place._meta, 'bulk_add'))
res = client.post(
bulk_add_url,
{'data': csvfile},
follow=True)
# Then, try inserting data, excluding duplicates
with open(csvname) as csvfile:
bulk_add_url = reverse(admin_urlname(Place._meta, 'bulk_add'))
res = client.post(
bulk_add_url,
{'data': csvfile,
'exclude_duplicates': 'checked'},
follow=True)
assert_equal(Place.objects.count(), 25)
| openplans/streetscore | street_score/project/test/test_bulkupload.py | Python | mit | 4,066 |
"""
This module implements a simple function to sort many list together.
It sorts a list given by sorting_index variable and sorts all lists the same way.
Usage example::
from PythonLibs.list_sorter import sort_lists_together
a = [2,1,3,4]
b = ["b","a","c","d"]
c = "BACD" #This is also iterable
aa,bb,cc = sort_lists_together(a, b, c, sorting_index=0, reverse=False)
print aa
print bb
print cc
"""
import sys
import collections
def sort_lists_together(*args, **dargs):
'''
Sorts the list together. Sort the list given by sorting_index and sort all the other lists the same way.
:param *args: the lists you want to sort
:param int sorting_index: the index of the reference list in the given lists. Default is the last list
:param bool reverse: to indicate if the sort should be reverse or not. Default is False
:returns: the sorted lists
'''
sorting_index = dargs["sorting_index"] if "sorting_index" in dargs else -1
reverse = dargs["reverse"] if "reverse" in dargs else False
try:
assert type(sorting_index) is int
assert type(reverse) is bool
x = len(args[0])
for l in args:
assert isinstance(l, collections.Iterable)
assert len(l) == x
assert type(l) is not set
except(AssertionError):
raise ValueError("Error in parameters. The lists must be iterable, sorting index must be an integer and reverse a boolean. All lists must have the same length. Impossible to sort sets\n")
sorted_lists = sorted(zip(*args), reverse=reverse, key=lambda x: x[sorting_index])
return ([x[i] for x in sorted_lists] for i in range(len(args)))
| pydupont/PersonalPythonLibs | PersonalLibs/list_sorter.py | Python | gpl-2.0 | 1,695 |
from sys import version_info
if version_info >= (2,6,0):
from os.path import dirname
import imp
fp = None
fp, pathname, description = imp.find_module('pyscanner', [dirname(__file__)])
try:
pyscanner = imp.load_module('pyscanner', fp, pathname, description)
finally:
if fp is not None:
fp.close()
del fp
del pathname
del description
else:
import pyscanner
del version_info
def list():
return pyscanner.list()
class scanner:
def __init__(self, name):
self.__scanner = pyscanner.get(name)
pyscanner.on(self.__scanner)
self.__caps = pyscanner.get_caps(self.__scanner)
def __del__(self):
pyscanner.off(self.__scanner)
del self.__scanner
def scan(self, timeout):
return pyscanner.scan(self.__scanner, timeout)
def image(self):
if not 'image_format' in self.__caps:
return None
return { 'format': self.__caps['image_format'],
'width': self.__caps['image_width'],
'height': self.__caps['image_height'],
'data': pyscanner.get_image(self.__scanner) }
def iso_template(self):
if not 'iso_template' in self.__caps or not self.__caps['iso_template']:
return None
return pyscanner.get_iso_template(self.__scanner)
| pawelmoll/scannerAPI | scanner/scanner.py | Python | bsd-2-clause | 1,360 |
"""
Classes allowing "generic" relations through ContentType and object-id fields.
"""
from __future__ import unicode_literals
from collections import defaultdict
from functools import partial
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db import models, router, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.fields.related import ForeignObject, ForeignObjectRel
from django.db.models.related import PathInfo
from django.db.models.sql.where import Constraint
from django.forms import ModelForm, ALL_FIELDS
from django.forms.models import (BaseModelFormSet, modelformset_factory, save_instance,
modelform_defines_fields)
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.contrib.contenttypes.models import ContentType
from django.utils import six
from django.utils.deprecation import RenameMethodsBase
from django.utils.encoding import smart_text
class RenameGenericForeignKeyMethods(RenameMethodsBase):
renamed_methods = (
('get_prefetch_query_set', 'get_prefetch_queryset', PendingDeprecationWarning),
)
class GenericForeignKey(six.with_metaclass(RenameGenericForeignKeyMethods)):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id"):
self.ct_field = ct_field
self.fk_field = fk_field
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# For some reason I don't totally understand, using weakrefs here doesn't work.
signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False)
# Connect myself as the descriptor for this field
setattr(cls, name, self)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instead of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(obj)
elif id:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances):
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(ForeignObject):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(
self, to, related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),)
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, to_fields=[],
from_fields=[self.object_id_field_name], **kwargs)
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.rel.to._meta.get_field_by_name(self.object_id_field_name)[0],
self.model._meta.pk)]
def get_reverse_path_info(self):
opts = self.rel.to._meta
target = opts.get_field_by_name(self.object_id_field_name)[0]
return [PathInfo(self.model._meta, opts, (target,), self.rel, True, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def get_joining_columns(self, reverse_join=False):
if not reverse_join:
# This error message is meant for the user, and from user
# perspective this is a reverse join along the GenericRelation.
raise ValueError('Joining in reverse direction not allowed.')
return super(GenericRelation, self).get_joining_columns(reverse_join)
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name, virtual_only=True)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self))
def contribute_to_related_class(self, cls, related):
pass
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Returns the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.rel.to._meta.get_field_by_name(self.content_type_field_name)[0]
contenttype_pk = self.get_content_type().pk
cond = where_class()
cond.add((Constraint(remote_alias, field.column, field), 'exact', contenttype_pk), 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name:
ContentType.objects.db_manager(using).get_for_model(self.model).pk,
"%s__in" % self.object_id_field_name:
[obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(instance)
join_cols = self.field.get_joining_columns(reverse_join=True)[0]
manager = RelatedManager(
model = rel_model,
instance = instance,
source_col_name = qn(join_cols[0]),
target_col_name = qn(join_cols[1]),
content_type = content_type,
content_type_field_name = self.field.content_type_field_name,
object_id_field_name = self.field.object_id_field_name,
prefetch_cache_name = self.field.attname,
)
return manager
def __set__(self, instance, value):
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, instance=None, symmetrical=None,
source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None,
prefetch_cache_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.prefetch_cache_name = prefetch_cache_name
self.pk_val = self.instance._get_pk_val()
self.core_filters = {
'%s__pk' % content_type_field_name: content_type.id,
'%s__exact' % object_id_field_name: instance._get_pk_val(),
}
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances):
db = self._db or router.db_for_read(self.model, instance=instances[0])
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name:
set(obj._get_pk_val() for obj in instances)
}
qs = super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**query)
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (qs,
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
for obj in objs:
obj.delete(using=db)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.model, instance=self.instance)
for obj in self.all():
obj.delete(using=db)
clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(field, to, related_name, limit_choices_to)
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None):
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.model_name,
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(self.instance),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix
)
@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join((opts.app_label, opts.model_name,
cls.ct_field.name, cls.ct_fk_field.name,
))
def save_new(self, form, commit=True):
kwargs = {
self.ct_field.get_attname(): ContentType.objects.get_for_model(self.instance).pk,
self.ct_fk_field.get_attname(): self.instance.pk,
}
new_obj = self.model(**kwargs)
return save_instance(form, new_obj, commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None,
formfield_callback=None, validate_max=False):
"""
Returns a ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``object_id`` if they different from the
defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num,
validate_max=validate_max)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
return FormSet
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, request, obj=None, **kwargs):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# GenericInlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.extra,
"can_delete": can_delete,
"can_order": False,
"fields": fields,
"max_num": self.max_num,
"exclude": exclude
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = ALL_FIELDS
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| postrational/django | django/contrib/contenttypes/generic.py | Python | bsd-3-clause | 20,759 |
import mango
from ._PValueTest import *
from ._GeneralisedChiSquaredTest import *
__all__ = [s for s in dir() if not s.startswith('_')]
if mango.haveRestricted:
from ._fmmTest import *
from ._BinnedGmmEmTest import *
from ._SummedBinnedGmmEmTest import *
| pymango/pymango | misc/python/mango/fmmTest/__init__.py | Python | bsd-2-clause | 316 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/abs/1412.2007
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
sys.path.append(os.path.realpath('..'))
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from chapter_05 import data_utils
from chapter_05 import seq2seq_model
tf.app.flags.DEFINE_float("learning_rate", 0.5, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("en_vocab_size", 40000, "Size of the english vocabulary.")
tf.app.flags.DEFINE_integer("fr_vocab_size", 40000, "Size of the french vocabulary.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_string("train_dir", os.path.realpath('../datasets/WMT'), "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200,
"How many training steps to do per checkpoint.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
_buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
def read_data(source_path, target_path, max_size=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size, FLAGS.fr_vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.initialize_all_variables())
return model
def train():
wmt = data_utils.prepare_wmt_dataset()
# en_train, fr_train, en_dev, fr_dev, _, _ = data_utils.prepare_wmt_dataset()
with tf.Session() as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(wmt.en_dev_ids_path, wmt.fr_dev_ids_path)
train_set = read_data(wmt.en_train_ids_path, wmt.fr_train_ids_path, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.exp(loss) if loss < 300 else float('inf')
print("global step %d learning rate %.4f step-time %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
for bucket_id in xrange(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
print(" eval: empty bucket %d" % (bucket_id))
continue
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
dev_set, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
print(" eval: bucket %d perplexity %.2f" % (bucket_id, eval_ppx))
sys.stdout.flush()
train()
| mlwithtf/mlwithtf | chapter_05/translate.py | Python | apache-2.0 | 9,841 |
import pytest
import smartsheet
@pytest.mark.usefixtures("smart_setup")
class TestCrossSheetReferences:
def test_create_cross_sheet_reference(self, smart_setup):
smart = smart_setup['smart']
xref = smart.models.CrossSheetReference()
xref.source_sheet_id = smart_setup['sheet_b'].id
xref.start_column_id = smart_setup['sheet_b'].columns[0].id
xref.end_column_id = smart_setup['sheet_b'].columns[0].id
action = smart.Sheets.create_cross_sheet_reference(smart_setup['sheet'].id, xref)
assert action.message == 'SUCCESS'
def test_list_cross_sheet_references(self, smart_setup):
smart = smart_setup['smart']
action = smart.Sheets.list_cross_sheet_references(smart_setup['sheet'].id)
assert isinstance(action.data[0], smart.models.CrossSheetReference)
def test_get_cross_sheet_references(self, smart_setup):
smart = smart_setup['smart']
action = smart.Sheets.get_sheet(smart_setup['sheet'].id, include='crossSheetReferences')
assert isinstance(action, smart.models.Sheet)
assert len(action.cross_sheet_references) == 1
action = smart.Sheets.get_cross_sheet_reference(smart_setup['sheet'].id, action.cross_sheet_references[0].id)
assert isinstance(action, smart.models.CrossSheetReference)
| smartsheet-platform/smartsheet-python-sdk | tests/integration/test_cross_sheet_references.py | Python | apache-2.0 | 1,330 |
"""Analyzes an MP3 file, gathering statistics and looking for errors."""
import cStringIO
import hashlib
import os
from chirp.common import mp3_frame
# Files with fewer than this many MPEG frames will be rejected as
# invalid. 100 frames is about 2.6s of audio.
_MINIMUM_FRAMES = 100
_MINIMUM_REASONABLE_FILE_SIZE = 100<<10 # Files should be larger than 100k...
_MAXIMUM_REASONABLE_FILE_SIZE = 20<<20 # ...and smaller than 20MB.
class InvalidFileError(Exception):
"""Raised when a file appears to be invalid or somehow corrupted."""
# TODO(trow): Some of the validity checks in this function might be
# too strict.
def analyze(file_obj, au_file, compute_fingerprint=True, get_payload=True):
"""Populate an AudioFile object with information extracted from a file.
Args:
file_obj: A file-like object.
au_file: An AudioFile object to store the results of the analysis in.
compute_fingerprint: If False, do not compute a fingerprint.
Returns:
The same AudioFile object that was passed in as au_file, which
should now have several fields set.
Raises:
InvalidFileError: if the file appears to be corrupted.
"""
au_file.frame_count = 0
au_file.frame_size = 0
au_file.duration_ms = 0
sha1_calc = hashlib.sha1() # unused if compute_fingerprint is False.
payload = cStringIO.StringIO() # unused if get_payload is False.
bit_rate_kbps_sum = 0
expected_hdr = None
first_bit_rate_kbps = None
is_vbr = False
for hdr, data_buffer in mp3_frame.split(file_obj):
if hdr is None:
continue
au_file.frame_count += 1
au_file.frame_size += len(data_buffer)
au_file.duration_ms += hdr.duration_ms
if compute_fingerprint:
sha1_calc.update(data_buffer)
if get_payload:
payload.write(data_buffer)
# If we've seen a valid header previously, make sure that all of the
# fields that should match do actually match.
if expected_hdr:
if not hdr.match(expected_hdr):
raise InvalidFileError(
"Bad header: found %s, expected %s (path=%s)" % (
hdr, expected_hdr, au_file.path))
# Keep track of if this is a variable bit-rate file.
if hdr.bit_rate_kbps != first_bit_rate_kbps:
is_vbr = True
# Add this frame's bit rate to our sum; we will use this to compute
# the average bit rate.
bit_rate_kbps_sum += hdr.bit_rate_kbps
# If this is the first header we've seen, make a copy and then blank
# out the fields that can vary. All future headers are expected to
# match this template.
if expected_hdr is None:
expected_hdr = hdr
first_bit_rate_kbps = expected_hdr.bit_rate_kbps
expected_hdr.bit_rate_kbps = None # Might be a VBR file.
expected_hdr.padding = None # Not all frames are padded.
expected_hdr.frame_size = None
# You'd think that this would be constant, but MP3s
# encountered in the wild prove otherwise.
expected_hdr.protected = None
if au_file.frame_count < _MINIMUM_FRAMES:
raise InvalidFileError("Found only %d MPEG frames"
% au_file.frame_count)
# Add the bit rate back into the template header, then return it.
# If this is a VBR file, use the average bit rate instead.
if is_vbr:
expected_hdr.bit_rate_kbps = (
float(bit_rate_kbps_sum) / au_file.frame_count)
else:
expected_hdr.bit_rate_kbps = first_bit_rate_kbps
# Finishing populating and then return the AudioFile object.
au_file.mp3_header = expected_hdr
# Round the duration down to an integral number of microseconds.
au_file.duration_ms = int(au_file.duration_ms)
if compute_fingerprint:
au_file.fingerprint = sha1_calc.hexdigest()
if get_payload:
au_file.payload = payload.getvalue()
return au_file
def sample_and_analyze(au_file, mp3_path_list):
"""Pick a representative file from a list of filenames and analyze it.
Args:
mp3_path_list: A list of paths to MP3 files.
Returns:
A representative MP3 header from a file whose size
is approximately equal to the the median of those in the list.
"""
if not mp3_path_list:
return None
sizes_and_paths = sorted((os.stat(path).st_size, path)
for path in mp3_path_list)
# Find the median element.
size, sample_path = sizes_and_paths[len(sizes_and_paths)/2]
# Complain if file is < 100k or > 20M
if (size < _MINIMUM_REASONABLE_FILE_SIZE
or size > _MAXIMUM_REASONABLE_FILE_SIZE):
raise InvalidFileError("Sample file has bad size: %s %d" % (
sample_path, size))
f_in = open(sample_path)
try:
analyze(f_in, au_file, compute_fingerprint=False)
finally:
f_in.close()
# We return only the MP3 header, since the rest of the au_file
# information is tied to that specific file.
return au_file.mp3_header
| chirpradio/chirpradio-machine | chirp/library/analyzer.py | Python | apache-2.0 | 5,178 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-15 20:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ClinSci', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Sample_metrics',
new_name='Ngs_test_metrics',
),
migrations.RenameField(
model_name='ngs_test',
old_name='virtual_panel_phenotype',
new_name='capture_profile',
),
migrations.AddField(
model_name='sample',
name='capture_number',
field=models.CharField(blank=True, max_length=10),
),
migrations.AddField(
model_name='sample',
name='comments',
field=models.CharField(blank=True, max_length=200),
),
migrations.AddField(
model_name='sample',
name='ex_number',
field=models.CharField(blank=True, max_length=200),
),
migrations.AddField(
model_name='sample',
name='gender',
field=models.CharField(blank=True, max_length=200),
),
migrations.AddField(
model_name='sample',
name='mody_number',
field=models.CharField(blank=True, max_length=200),
),
migrations.RemoveField(
model_name='sample',
name='batch',
),
migrations.AddField(
model_name='sample',
name='batch',
field=models.ManyToManyField(to='ClinSci.Batch'),
),
]
| rdemolgen/NGS-LIMS | ClinSci/migrations/0002_auto_20160715_2026.py | Python | mit | 1,675 |
"""Leave one out coding"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'hbghhy'
class LeaveOneOutEncoder(BaseEstimator, util.TransformerWithTargetMixin):
"""Leave one out coding for categorical features.
This is very similar to target encoding but excludes the current row's
target when calculating the mean target for a level to reduce the effect
of outliers.
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
handle_unknown: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
sigma: float
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing
data are untouched). Sigma gives the standard deviation (spread or "width") of the normal distribution.
The optimal value is commonly between 0.05 and 0.6. The default is to not add noise, but that leads
to significantly suboptimal results.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = LeaveOneOutEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] Strategies to encode categorical variables with many categories, from
https://www.kaggle.com/c/caterpillar-tube-pricing/discussion/15748#143154.
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, sigma=None):
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X
self.cols = cols
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._mean = None
self.random_state = random_state
self.sigma = sigma
self.feature_names = None
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# unite the input into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.use_default_cols:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
categories = self.fit_leave_one_out(
X, y,
cols=self.cols
)
self.mapping = categories
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# unite the input into pandas types
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# if we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not list(self.cols):
return X
X = self.transform_leave_one_out(
X, y,
mapping=self.mapping
)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def fit_leave_one_out(self, X_in, y, cols=None):
X = X_in.copy(deep=True)
if cols is None:
cols = X.columns.values
self._mean = y.mean()
return {col: self.fit_column_map(X[col], y) for col in cols}
def fit_column_map(self, series, y):
category = pd.Categorical(series)
categories = category.categories
codes = category.codes.copy()
codes[codes == -1] = len(categories)
categories = np.append(categories, np.nan)
return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)]))
result = y.groupby(codes).agg(['sum', 'count'])
return result.rename(return_map)
def transform_leave_one_out(self, X_in, y, mapping=None):
"""
Leave one out encoding uses a single column of floats to represent the means of the target variables.
"""
X = X_in.copy(deep=True)
random_state_ = check_random_state(self.random_state)
for col, colmap in mapping.items():
level_notunique = colmap['count'] > 1
unique_train = colmap.index
unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train], dtype=unique_train.dtype)
is_nan = X[col].isnull()
is_unknown_value = X[col].isin(unseen_values.dropna().astype(object))
if X[col].dtype.name == 'category': # Pandas 0.24 tries hard to preserve categorical data type
X[col] = X[col].astype(str)
if self.handle_unknown == 'error' and is_unknown_value.any():
raise ValueError('Columns to be encoded can not contain new values')
if y is None: # Replace level with its mean target; if level occurs only once, use global mean
level_means = (colmap['sum'] / colmap['count']).where(level_notunique, self._mean)
X[col] = X[col].map(level_means)
else: # Replace level with its mean target, calculated excluding this row's target
# The y (target) mean for this level is normally just the sum/count;
# excluding this row's y, it's (sum - y) / (count - 1)
level_means = (X[col].map(colmap['sum']) - y) / (X[col].map(colmap['count']) - 1)
# The 'where' fills in singleton levels (count = 1 -> div by 0) with the global mean
X[col] = level_means.where(X[col].map(colmap['count'][level_notunique]).notnull(), self._mean)
if self.handle_unknown == 'value':
X.loc[is_unknown_value, col] = self._mean
elif self.handle_unknown == 'return_nan':
X.loc[is_unknown_value, col] = np.nan
if self.handle_missing == 'value':
X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean
elif self.handle_missing == 'return_nan':
X.loc[is_nan, col] = np.nan
if self.sigma is not None and y is not None:
X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0])
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError('Must fit data first. Affected feature names are not known before.')
else:
return self.feature_names
| scikit-learn-contrib/categorical-encoding | category_encoders/leave_one_out.py | Python | bsd-3-clause | 11,063 |
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Verifies that all source files contain the necessary copyright boilerplate
# snippet.
# This is based on existing work
# https://github.com/kubernetes/test-infra/blob/master/hack
# /verify_boilerplate.py
# Please note that this file was generated from
# [terraform-google-module-template](https://github.com/terraform-google-modules/terraform-google-module-template).
# Please make sure to contribute relevant changes upstream!
from __future__ import print_function
import argparse
import glob
import os
import re
import sys
def get_args():
"""Parses command line arguments.
Configures and runs argparse.ArgumentParser to extract command line
arguments.
Returns:
An argparse.Namespace containing the arguments parsed from the
command line
"""
parser = argparse.ArgumentParser()
parser.add_argument("filenames",
help="list of files to check, "
"all files if unspecified",
nargs='*')
rootdir = os.path.abspath(os.getcwd())
parser.add_argument(
"--rootdir",
default=rootdir,
help="root directory to examine")
default_boilerplate_dir = os.path.join(os.path.dirname(__file__),
"boilerplate")
parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir)
return parser.parse_args()
def get_refs(ARGS):
"""Converts the directory of boilerplate files into a map keyed by file
extension.
Reads each boilerplate file's contents into an array, then adds that array
to a map keyed by the file extension.
Returns:
A map of boilerplate lines, keyed by file extension. For example,
boilerplate.py.txt would result in the k,v pair {".py": py_lines} where
py_lines is an array containing each line of the file.
"""
refs = {}
# Find and iterate over the absolute path for each boilerplate template
for path in glob.glob(os.path.join(
ARGS.boilerplate_dir,
"boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
# pylint: disable=too-many-locals
def has_valid_header(filename, refs):
"""Test whether a file has the correct boilerplate header.
Tests each file against the boilerplate stored in refs for that file type
(based on extension), or by the entire filename (eg Dockerfile, Makefile).
Some heuristics are applied to remove build tags and shebangs, but little
variance in header formatting is tolerated.
Args:
filename: A string containing the name of the file to test
refs: A map of boilerplate headers, keyed by file extension
Returns:
True if the file has the correct boilerplate header, otherwise returns
False.
"""
try:
with open(filename, 'r') as fp: # pylint: disable=invalid-name
data = fp.read()
except IOError:
print(filename)
return False
basename = os.path.basename(filename)
extension = get_file_extension(filename)
if extension:
ref = refs[extension]
else:
ref = refs[basename]
data = data.splitlines()
pattern_len = len(ref)
# if our test file is smaller than the reference it surely fails!
if pattern_len > len(data):
return False
copyright_regex = re.compile("Copyright 20\\d\\d")
substitute_string = "Copyright YYYY"
copyright_is_found = False
j = 0
for datum in data:
# if it's a copyright line
if not copyright_is_found and copyright_regex.search(datum):
copyright_is_found = True
# replace the actual year (e.g. 2019) with "YYYY" placeholder
# used in a boilerplate
datum = copyright_regex.sub(substitute_string, datum)
if datum == ref[j]:
j = j + 1
else:
j = 0
if j == pattern_len:
return copyright_is_found
return copyright_is_found and j == pattern_len
def get_file_extension(filename):
"""Extracts the extension part of a filename.
Identifies the extension as everything after the last period in filename.
Args:
filename: string containing the filename
Returns:
A string containing the extension in lowercase
"""
return os.path.splitext(filename)[1].split(".")[-1].lower()
# These directories will be omitted from header checks
SKIPPED_DIRS = [
'Godeps', 'third_party', '_gopath', '_output',
'.git', 'vendor', '__init__.py', 'node_modules'
]
def normalize_files(files):
"""Extracts the files that require boilerplate checking from the files
argument.
A new list will be built. Each path from the original files argument will
be added unless it is within one of SKIPPED_DIRS. All relative paths will
be converted to absolute paths by prepending the root_dir path parsed from
the command line, or its default value.
Args:
files: a list of file path strings
Returns:
A modified copy of the files list where any any path in a skipped
directory is removed, and all paths have been made absolute.
"""
newfiles = []
for pathname in files:
if any(x in pathname for x in SKIPPED_DIRS):
continue
newfiles.append(pathname)
for idx, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[idx] = os.path.join(ARGS.rootdir, pathname)
return newfiles
def get_files(extensions, ARGS):
"""Generates a list of paths whose boilerplate should be verified.
If a list of file names has been provided on the command line, it will be
treated as the initial set to search. Otherwise, all paths within rootdir
will be discovered and used as the initial set.
Once the initial set of files is identified, it is normalized via
normalize_files() and further stripped of any file name whose extension is
not in extensions.
Args:
extensions: a list of file extensions indicating which file types
should have their boilerplate verified
Returns:
A list of absolute file paths
"""
files = []
if ARGS.filenames:
files = ARGS.filenames
else:
for root, dirs, walkfiles in os.walk(ARGS.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for dpath in SKIPPED_DIRS:
if dpath in dirs:
dirs.remove(dpath)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = get_file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def main(args):
"""Identifies and verifies files that should have the desired boilerplate.
Retrieves the lists of files to be validated and tests each one in turn.
If all files contain correct boilerplate, this function terminates
normally. Otherwise it prints the name of each non-conforming file and
exists with a non-zero status code.
"""
refs = get_refs(args)
filenames = get_files(refs.keys(), args)
nonconforming_files = []
for filename in filenames:
if not has_valid_header(filename, refs):
nonconforming_files.append(filename)
if nonconforming_files:
print('%d files have incorrect boilerplate headers:' % len(
nonconforming_files))
for filename in sorted(nonconforming_files):
print(os.path.relpath(filename, args.rootdir))
sys.exit(1)
if __name__ == "__main__":
ARGS = get_args()
main(ARGS)
| GoogleCloudPlatform/cloud-foundation-toolkit | infra/build/developer-tools/build/verify_boilerplate/verify_boilerplate.py | Python | apache-2.0 | 8,823 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2013 Joe Harris
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Trivial example of how to update a DNS record for a zone. As this uses the
internal record ID (that need to be resolved with rec_load_all() first) this
is a chained double-lookup example to show it used a record name. See:
http://www.cloudflare.com/docs/client-api.html#s5.2
'''
import os, sys
# make sure our local copy of txcloudflare is in sys.path
PATH_TO_TXCF = '../txcloudflare/'
try:
import txcloudflare
except ImportError:
txcfpath = os.path.dirname(os.path.realpath(PATH_TO_TXCF))
if txcfpath not in sys.path:
sys.path.insert(0, txcfpath)
from twisted.internet import reactor
import txcloudflare
# shuffle these up a bit in this example to increase their scope
email_address = os.environ.get('TXCFEMAIL', '')
api_token = os.environ.get('TXCFAPI', '')
domain_name = os.environ.get('TXCFDOMAIN', '')
cloudflare = txcloudflare.client_api(email_address, api_token)
def got_record_list(response):
'''
'response' is a txcloudflare.response.Response() instance.
'''
print '< got first response (record id lookup)'
target_name = 'subdomain.' + domain_name
target_type = 'MX'
target = {}
for d in response.data:
print d['type'], d['name'], d['display_content']
if d['name'] == target_name and d['type'] == target_type:
target = d
def edited_record(response):
print '< got second response (edited record)'
for k,v in response.data.items():
print k, '->', v
reactor.stop()
if target:
# send the second request now we have the record id - parameters for rec_edit() other
# than 'id' are the same as for rec_new, see examples/record_new.py for details
update_to = 'mx.differentserver.com'
print '> requesting update of "{0}" record "{1}" changing to "{2}" id: {3}'.format(target['type'], target['name'], update_to, target['rec_id'])
cloudflare.rec_edit(
zone=domain_name,
record_id=target['rec_id'], # only new parameter
name=target['name'],
record_type=target['type'],
ttl=target['ttl'],
priority=target['prio'],
content=update_to
).addCallback(edited_record).addErrback(got_error)
else:
# no matching record found to delete
print '> no record matches "{0}" record "{1}"'.format(target['type'], target['name'])
reactor.stop()
def got_error(error):
'''
'error' is a twisted.python.failure.Failure() instance wrapping one of
the exceptions in txcloudflare.errors. The exceptions return the
CloudFlare error code, a plain text string and a response object
(txcloudflare.response.Response). The response object has a 'request'
parameter if you need to look at the reques that generated the error.
'''
print '< error'
print error.printTraceback()
reactor.stop()
if __name__ == '__main__':
print '> listing all records for zone: {0}'.format(domain_name)
cloudflare.rec_load_all(zone=domain_name).addCallback(got_record_list).addErrback(got_error)
reactor.run()
'''
EOF
'''
| meeb/txcloudflare | examples/record_edit.py | Python | apache-2.0 | 3,824 |
"""
Premium Question
Straightforward
"""
__author__ = 'Daniel'
class Solution(object):
def generatePossibleNextMoves(self, s):
"""
:type s: str
:rtype: List[str]
"""
ret = []
for i in xrange(len(s)-1):
if s[i:i+2] == "++":
ret.append(s[:i]+"--"+s[i+2:])
return ret | algorhythms/LeetCode | 293 Flip Game.py | Python | mit | 355 |
import json
import requests
url = "https://edamam-recipe-search-and-diet-v1.p.mashape.com/search?_app_id=2a7c2865&_app_key=9a3f87c1707e67f965284ca3eb613dae&q=korean"
headers = {
"X-Mashape-Key": "GgV2guTBbhmsh0sMA33KSrcMjuyMp1EqLsPjsnZlRHycWvV5Pt",
"Accept": "application/json"
}
r = requests.get(url, headers=headers)
j = json.loads(r.text)
print j['hits'][0]['recipe']['ingredientLines']
for i in j['hits'][0]['recipe']['ingredientLines'] :
print i
| jerrrytan/bitcamp | apis/mashapeAPI.py | Python | mit | 461 |
"""
Portable file locking utilities.
Based partially on example by Jonathan Feignberg <[email protected]> in the Python
Cookbook, licensed under the Python Software License.
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
Example Usage::
>>> from django.core.files import locks
>>> f = open('./file', 'wb')
>>> locks.lock(f, locks.LOCK_EX)
>>> f.write('Django')
>>> f.close()
"""
__all__ = ('LOCK_EX','LOCK_SH','LOCK_NB','lock','unlock')
system_type = None
try:
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
__overlapped = pywintypes.OVERLAPPED()
system_type = 'nt'
except (ImportError, AttributeError):
pass
try:
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
system_type = 'posix'
except (ImportError, AttributeError):
pass
def fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return hasattr(f, 'fileno') and f.fileno() or f
if system_type == 'nt':
def lock(file, flags):
hfile = win32file._get_osfhandle(fd(file))
win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(fd(file))
win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped)
elif system_type == 'posix':
def lock(file, flags):
fcntl.flock(fd(file), flags)
def unlock(file):
fcntl.flock(fd(file), fcntl.LOCK_UN)
else:
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = None
# Dummy functions that don't do anything.
def lock(file, flags):
pass
def unlock(file):
pass
| Shrews/PyGerrit | webapp/django/core/files/locks.py | Python | apache-2.0 | 1,791 |
# -*- coding: utf-8 -*-
#
# abx_numpy documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 21 01:57:21 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'numpydoc',
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'abx_numpy'
copyright = u'2015, Roland Thiolliere'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'abx_numpydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'abx_numpy.tex', u'abx\\_numpy Documentation',
u'Roland Thiolliere', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'abx_numpy', u'abx_numpy Documentation',
[u'Roland Thiolliere'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'abx_numpy', u'abx_numpy Documentation',
u'Roland Thiolliere', 'abx_numpy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bootphon/abx_numpy | docs/source/conf.py | Python | gpl-3.0 | 8,322 |
try:
import unittest2 as unittest
except ImportError:
import unittest
from testrail.casetype import CaseType
class TestCaseType(unittest.TestCase):
def setUp(self):
self.casetype = CaseType(
{
"id": 1,
"is_default": False,
"name": "Automated"
}
)
def test_get_id_type(self):
self.assertEqual(type(self.casetype.id), int)
def test_get_id(self):
self.assertEqual(self.casetype.id, 1)
def test_get_is_default_type(self):
self.assertEqual(type(self.casetype.is_default), bool)
def test_get_is_default(self):
self.assertEqual(self.casetype.is_default, False)
def test_get_name_type(self):
self.assertEqual(type(self.casetype.name), str)
def test_get_name(self):
self.assertEqual(self.casetype.name, 'Automated')
| travispavek/testrail | tests/test_casetype.py | Python | mit | 879 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.