code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-30 19:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0010_auto_20171030_1927'),
]
operations = [
migrations.AlterField(
model_name='alertausuario',
name='periodicidad',
field=models.FloatField(default=300.0),
),
]
| foxcarlos/decimemijobot | bot/respaldo/0011_auto_20171030_1928.py | Python | gpl-3.0 | 461 |
import os
import sys
import test_sys.gen_util as util
class test_runner_info:
test_type = "Test"
extra_includes = ["test-sys/run-test.hh"]
extra_globals = []
main_function_name = "run_tests"
test_function_name = "run_test"
args = [' const bool silent = find_silent_flag(argc, argv);',]
def write_function_call(self, out, func, file_name, max_width):
out.write(' test::run_test(%s, "%s", %d, numFailed, silent);\n'
% (func, file_name, max_width))
class bench_runner_info:
test_type = "Bench"
extra_includes = ["test-sys/run-bench.hh"]
extra_globals = ["std::vector<Measure> BENCH_MEASURES"]
main_function_name = "run_benchmarks"
test_function_name = "run_bench"
args = [' const std::string benchmarkName = find_test_name(argc, argv);']
def write_function_call(self, out, func, file_name, max_width):
out.write(' if (benchmarkName.empty() || benchmarkName == "%s"){\n' % file_name)
out.write(' run_bench(%s, "%s");\n'
% (func, file_name))
out.write(' }\n')
class image_runner_info:
test_type = "Image"
extra_includes = ["test-sys/run-image.hh"]
extra_globals = []
main_function_name = "run_image_tests"
test_function_name = "run_image"
args = [' const std::string testName = find_test_name(argc, argv);']
def write_function_call(self, out, func, file_name, max_width):
out.write(' if (testName.empty() || testName == "%s"){\n' % file_name)
out.write(' run_image(%s, "%s");\n'
% (func, file_name))
out.write(' }\n')
def gen_runner(root_dir, out_file, info):
all_files = [f for f in os.listdir(root_dir) if (
f.endswith(".cpp") and
not f.startswith('.'))]
files = [f for f in all_files if f != 'stubs.cpp' and f != 'main.cpp']
if not util.need_generate_single(out_file, __file__):
if not util.need_generate(root_dir, out_file, files):
print("* %s-runner up to date." % info.test_type)
return
print("* Generating %s-runner" % info.test_type)
out_dir = os.path.dirname(out_file)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
max_width = max([len(f) for f in files])
for file_name in files:
util.check_file(root_dir, file_name)
# If there is a user-supplied main.cpp, use "run_tests" as the
# test function to allow calling from there.
# Otherwise, define the main function.
main_function_name = (info.main_function_name
if 'main.cpp' in all_files else
"main")
with open(out_file, 'w') as out:
out.write('// Generated by %s\n' % os.path.basename(__file__))
out.write('#include <iostream>\n');
out.write('#include <iomanip>\n');
out.write('#include <sstream>\n');
out.write('#include <vector>\n');
out.write('#include "test-sys/test-sys.hh"\n')
for include in info.extra_includes:
out.write('#include "%s"\n' % include)
if not sys.platform.startswith('linux'):
# For disabling error dialogs on abort
out.write('#include "windows.h"\n')
out.write('\n');
out.write('namespace test{\n')
out.write('bool TEST_FAILED = false;\n')
out.write('std::stringstream TEST_OUT;\n')
out.write('std::vector<Checkable*> POST_CHECKS;\n')
out.write('int NUM_KNOWN_ERRORS = 0;\n')
out.write('} // namespace\n')
for v in info.extra_globals:
out.write("%s;\n" % v)
out.write('\n');
out.write('TestPlatform get_test_platform(){\n')
if sys.platform.startswith('linux'):
out.write(' return TestPlatform::LINUX;\n')
else:
out.write(' return TestPlatform::WINDOWS;\n')
out.write('}\n')
out.write('\n')
out.write('std::string g_test_name;\n\n');
out.write('void set_test_name(const std::string& name){\n')
out.write(' g_test_name = name;\n')
out.write('}\n')
out.write('\n')
out.write('std::string get_test_name(){\n')
out.write(' return g_test_name;\n');
out.write('}\n')
out.write('\n')
for f in files:
out.write('%s\n' % util.file_name_to_declaration(f))
out.write('\n')
out.write('std::string find_test_name(int argc, char** argv){\n')
out.write(' for (int i = 1; i < argc; i++){\n')
out.write(' if (argv[i][0] != \'-\'){\n')
out.write(' return argv[i];\n')
out.write(' }\n')
out.write(' }\n')
out.write(' return "";\n');
out.write('}\n')
out.write('bool find_silent_flag(int argc, char** argv){\n')
out.write(' for (int i = 1; i < argc; i++){\n')
out.write(' if (argv[i] == std::string("--silent")){\n')
out.write(' return true;\n')
out.write(' }\n')
out.write(' }\n')
out.write(' return false;\n')
out.write('}\n')
if len(info.args) != 0:
out.write('int %s(int argc, char** argv){\n' % main_function_name)
else:
out.write('int %s(int, char**){\n' % main_function_name)
if not sys.platform.startswith('linux'):
out.write(' SetErrorMode(GetErrorMode()|SEM_NOGPFAULTERRORBOX);\n')
out.write(' _set_abort_behavior( 0, _WRITE_ABORT_MSG);\n')
out.write('\n')
for arg in info.args:
out.write("%s\n" % arg)
out.write(' int numFailed = 0;\n')
for f in files:
func = util.file_name_to_function_pointer(f)
info.write_function_call(out, func, f, max_width)
out.write(' return test::print_test_summary(numFailed);\n')
out.write('}\n')
# Create defines.hh
folder = os.path.split(out_file)[0]
with open(os.path.join(folder, 'defines.hh'), 'w') as defs:
if sys.platform.startswith('linux'):
defs.write('#define TEST_PLATFORM_LINUX\n')
else:
defs.write('#define TEST_PLATFORM_WINDOWS\n')
def gen_test_runner(root_dir, out_file):
gen_runner(root_dir, out_file, test_runner_info())
def gen_bench_runner(root_dir, out_file):
gen_runner(root_dir, out_file, bench_runner_info())
def gen_image_runner(root_dir, out_file):
gen_runner(root_dir, out_file, image_runner_info())
| lukas-ke/faint-graphics-editor | test-sys/test_sys/gen_runner.py | Python | apache-2.0 | 6,673 |
from __future__ import annotations
from typing import Any, Dict, List, Optional
import dataclasses
import json
import logging
from ..config import ACResource, Config
from ..utils import dump_yaml, parse_yaml, dump_json
from .dependency import DependencyManager
from .k8sobject import KubernetesObjectScope, KubernetesObject
from .location import LocationManager
@dataclasses.dataclass
class NormalizedResource:
"""
Represents an Ambassador resource emitted after processing fetched data.
"""
object: dict
rkey: Optional[str] = None
@classmethod
def from_data(cls, kind: str, name: str, namespace: Optional[str] = None,
generation: Optional[int] = None, version: str = 'v2',
labels: Optional[Dict[str, Any]] = None,
spec: Dict[str, Any] = None, errors: Optional[str] = None,
rkey: Optional[str] = None) -> NormalizedResource:
if rkey is None:
rkey = f'{name}.{namespace}'
ir_obj = {}
if spec:
ir_obj.update(spec)
ir_obj['apiVersion'] = f'getambassador.io/{version}'
ir_obj['kind'] = kind
ir_obj['name'] = name
if namespace is not None:
ir_obj['namespace'] = namespace
if generation is not None:
ir_obj['generation'] = generation
ir_obj['metadata_labels'] = labels or {}
if errors:
ir_obj['errors'] = errors
return cls(ir_obj, rkey)
@classmethod
def from_kubernetes_object(cls, obj: KubernetesObject) -> NormalizedResource:
if obj.gvk.api_group != 'getambassador.io':
raise ValueError(f'Cannot construct resource from non-Ambassador Kubernetes object with API version {obj.gvk.api_version}')
if obj.namespace is None:
raise ValueError(f'Cannot construct resource from Kubernetes object {obj.key} without namespace')
labels = dict(obj.labels)
labels['ambassador_crd'] = f"{obj.name}.{obj.namespace}"
# When creating an Ambassador object from a Kubernetes object, we have to make
# sure that we pay attention to 'errors', which will be set IFF watt's validation
# finds errors.
return cls.from_data(
obj.kind,
obj.name,
errors=obj.get('errors'),
namespace=obj.namespace,
generation=obj.generation,
version=obj.gvk.version,
labels=labels,
spec=obj.spec,
)
@classmethod
def from_kubernetes_object_annotation(cls, obj: KubernetesObject) -> List[NormalizedResource]:
config = obj.annotations.get('getambassador.io/config')
if not config:
return []
def clean_normalize(r: Dict[str, Any]) -> NormalizedResource:
# Annotations should have to pass manual object validation.
r['_force_validation'] = True
if r.get('metadata_labels') is None and obj.labels:
r['metadata_labels'] = obj.labels
if r.get('namespace') is None and obj.scope == KubernetesObjectScope.NAMESPACE:
r['namespace'] = obj.namespace
return NormalizedResource(r, rkey=f'{obj.name}.{obj.namespace}')
return [clean_normalize(r) for r in parse_yaml(config) if r]
class ResourceManager:
"""
Holder for managed resources before they are processed and emitted as IR.
"""
logger: logging.Logger
aconf: Config
deps: DependencyManager
locations: LocationManager
elements: List[ACResource]
def __init__(self, logger: logging.Logger, aconf: Config, deps: DependencyManager):
self.logger = logger
self.aconf = aconf
self.deps = deps
self.locations = LocationManager()
self.elements = []
@property
def location(self) -> str:
return str(self.locations.current)
def _emit(self, resource: NormalizedResource) -> bool:
obj = resource.object
rkey = resource.rkey
if not isinstance(obj, dict):
# Bug!!
if not obj:
self.aconf.post_error("%s is empty" % self.location)
else:
self.aconf.post_error("%s is not a dictionary? %s" %
(self.location, dump_json(obj, pretty=True)))
return True
if not self.aconf.good_ambassador_id(obj):
self.logger.debug("%s ignoring object with mismatched ambassador_id" % self.location)
return True
if 'kind' not in obj:
# Bug!!
self.aconf.post_error("%s is missing 'kind'?? %s" %
(self.location, dump_json(obj, pretty=True)))
return True
# Is this a pragma object?
if obj['kind'] == 'Pragma':
# Why did I think this was a good idea? [ :) ]
new_source = obj.get('source', None)
if new_source:
# We don't save the old self.filename here, so this change will last until
# the next input source (or the next Pragma).
self.locations.current.filename = new_source
# Don't count Pragma objects, since the user generally doesn't write them.
return False
if not rkey:
rkey = self.locations.current.filename_default('unknown')
if obj['kind'] != 'Service':
# Services are unique and don't get an object count appended to
# them.
rkey = "%s.%d" % (rkey, self.locations.current.ocount)
serialization = dump_yaml(obj, default_flow_style=False)
try:
r = ACResource.from_dict(rkey, rkey, serialization, obj)
self.elements.append(r)
except Exception as e:
self.aconf.post_error(e.args[0])
self.logger.debug("%s PROCESS %s save %s: %s" % (self.location, obj['kind'], rkey, serialization))
return True
def emit(self, resource: NormalizedResource):
if self._emit(resource):
self.locations.current.ocount += 1
def emit_annotated(self, resources: List[NormalizedResource]):
with self.locations.mark_annotated():
for resource in resources:
self.emit(resource)
| datawire/ambassador | python/ambassador/fetch/resource.py | Python | apache-2.0 | 6,311 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('..')
import unittest
from src.book import book
class TestBook(unittest.TestCase):
def test_book(self):
test_1= ["Manager.book[site][hash_key]",
"U001 2016-06-02 22:00~22:00 A",
"U002 2017-08-01 19:00~22:00 A",
"U003 2017-08-02 13:00~17:00 B",
"U004 2017-08-03 15:00~16:00 C",
"U005 2017-08-05 09:00~11:00 D",
""]
self.assertRaises(Exception, book, test_1[0])
self.assertRaises(Exception, book, test_1[1])
book(test_1[2])
book(test_1[3])
book(test_1[4])
book(test_1[5])
book(test_1[6])
if __name__ == '__main__':
unittest.main()
| cwlseu/recipes | pyrecipes/thoughtsworks/test/test_main.py | Python | gpl-3.0 | 662 |
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2017 IBM
# Author: Abdul Haleem <[email protected]>
import os
import glob
import re
import platform
import multiprocessing
from avocado import Test
from avocado import main
from avocado.utils import process
from avocado.utils import memory
from avocado.utils.software_manager import SoftwareManager
blocks_hotpluggable = []
mem_path = '/sys/devices/system/memory'
errorlog = ['WARNING: CPU:', 'Oops',
'Segfault', 'soft lockup',
'Unable to handle paging request',
'rcu_sched detected stalls',
'NMI backtrace for cpu',
'Call Trace:', 'WARNING: at',
'INFO: possible recursive locking detected',
'Kernel BUG at', 'Kernel panic - not syncing:',
'double fault:', 'BUG: Bad page state in']
def online(block):
try:
memory.hotplug(block)
except IOError:
print "memory%s : Resource is busy" % block
pass
def offline(block):
try:
memory.hotunplug(block)
except IOError:
print "memory%s : Resource is busy" % block
pass
def get_hotpluggable_blocks(path):
mem_blocks = []
for mem_blk in glob.glob(path):
block = re.findall("\d+", os.path.basename(mem_blk))[0]
block = re.sub(r'^\s*$', '', block)
if memory.is_hot_pluggable(block):
mem_blocks.append(block)
return mem_blocks
def collect_dmesg(object):
object.whiteboard = process.system_output("dmesg")
class memstress(Test):
'''
Stress test to excersize memory component
This test performs memory hotunplug/hotplug tests with below scenarios:
1. hotunplug one by one in a loop for all
2. Toggle memory blocks by making off/on in a loop
3. hot unplug % of memory for different ratios
4. dlpar memory hotplug using drmgr
5. shared resource : dlpar in CMO mode
6. try hotplug each different numa node memblocks
7. run stress memory in background
:avocado: tags=memory,privileged
'''
def setUp(self):
if not memory.check_hotplug():
self.cancel("UnSupported : memory hotplug not enabled\n")
sm = SoftwareManager()
if not sm.check_installed('stress') and not sm.install('stress'):
self.cancel(' stress package is needed for the test to be run')
self.tests = self.params.get('test', default='all')
self.iteration = int(self.params.get('iteration', default='1'))
self.stresstime = int(self.params.get('stresstime', default='10'))
self.vmcount = int(self.params.get('vmcount', default='4'))
self.iocount = int(self.params.get('iocount', default='4'))
self.memratio = self.params.get('memratio', default=None)
self.blocks_hotpluggable = get_hotpluggable_blocks(
'%s/memory*' % mem_path)
@staticmethod
def hotunplug_all(blocks):
for block in blocks:
if memory._check_memory_state(block):
offline(block)
@staticmethod
def hotplug_all(blocks):
for block in blocks:
if not memory._check_memory_state(block):
online(block)
@staticmethod
def __clear_dmesg():
process.run("dmesg -c", sudo=True)
@staticmethod
def __error_check():
ERROR = []
logs = process.system_output("dmesg -Txl 1,2,3,4").splitlines()
for error in errorlog:
for log in logs:
if error in log:
ERROR.append(log)
return "\n".join(ERROR)
@staticmethod
def __is_auto_online():
with open('%s/auto_online_blocks' % mem_path, 'r') as auto_file:
if auto_file.read() == 'online\n':
return True
return False
def run_stress(self):
mem_free = memory.meminfo.MemFree.mb / 2
cpu_count = int(multiprocessing.cpu_count()) / 2
process.run("stress --cpu %s --io %s --vm %s --vm-bytes %sM --timeout %ss" %
(cpu_count, self.iocount, self.vmcount, mem_free, self.stresstime), ignore_status=True, sudo=True, shell=True)
def test(self):
if os.path.exists("%s/auto_online_blocks" % mem_path):
if not self.__is_auto_online():
self.hotplug_all(self.blocks_hotpluggable)
if 'all' in self.tests:
tests = ['hotplug_loop',
'hotplug_toggle',
'hotplug_ratio',
'dlpar_mem_hotplug',
'hotplug_per_numa_node']
else:
tests = self.tests.split()
for method in tests:
self.log.info("\nTEST: %s\n", method)
self.__clear_dmesg()
run_test = 'self.%s()' % method
eval(run_test)
msg = self.__error_check()
if msg:
collect_dmesg()
self.log.error('Test: %s. ERROR Message: %s', run_test, msg)
self.log.info("\nEND: %s\n", method)
def hotplug_loop(self):
self.log.info("\nTEST: hotunplug and hotplug in a loop\n")
for _ in range(self.iteration):
self.log.info("\nhotunplug all memory\n")
self.hotunplug_all(self.blocks_hotpluggable)
self.run_stress()
self.log.info("\nReclaim back memory\n")
self.hotplug_all(self.blocks_hotpluggable)
def hotplug_toggle(self):
self.log.info("\nTEST: Memory toggle\n")
for _ in range(self.iteration):
for block in self.blocks_hotpluggable:
offline(block)
self.log.info("memory%s block hotunplugged" % block)
self.run_stress()
online(block)
self.log.info("memory%s block hotplugged" % block)
def hotplug_ratio(self):
if not self.memratio:
self.memratio = ['25', '50', '75', '100']
for ratio in self.memratio:
target = 0
self.log.info("\nTEST : Hotunplug %s%% of memory\n" % ratio)
num = len(self.blocks_hotpluggable) * int(ratio)
if num % 100:
target = (num / 100) + 1
else:
target = num / 100
for block in self.blocks_hotpluggable:
if target > 0:
offline(block)
target -= 1
self.log.info("memory%s block offline" % block)
self.run_stress()
self.log.info("\nReclaim all memory\n")
self.hotplug_all(self.blocks_hotpluggable)
def dlpar_mem_hotplug(self):
if 'ppc' in platform.processor() and 'PowerNV' not in open('/proc/cpuinfo', 'r').read():
if "mem_dlpar=yes" in process.system_output("drmgr -C", ignore_status=True, shell=True):
init_mem = memory.meminfo.MemTotal.kb
self.log.info("\nDLPAR remove memory operation\n")
for _ in range(len(self.blocks_hotpluggable) / 2):
process.run(
"drmgr -c mem -d 5 -w 30 -r", shell=True, ignore_status=True, sudo=True)
if memory.meminfo.MemTotal.kb >= init_mem:
self.log.warn("dlpar mem could not complete")
self.run_stress()
init_mem = memory.meminfo.MemTotal.kb
self.log.info("\nDLPAR add memory operation\n")
for _ in range(len(self.blocks_hotpluggable) / 2):
process.run(
"drmgr -c mem -d 5 -w 30 -a", shell=True, ignore_status=True, sudo=True)
if init_mem < memory.meminfo.MemTotal.kb:
self.log.warn("dlpar mem could not complete")
else:
self.log.info('UNSUPPORTED: dlpar not configured..')
else:
self.log.info("UNSUPPORTED: Test not supported on this platform")
def hotplug_per_numa_node(self):
self.log.info("\nTEST: Numa Node memory off on\n")
with open('/sys/devices/system/node/has_normal_memory', 'r') as node_file:
nodes = node_file.read()
for node in nodes.split('-'):
node = node.strip('\n')
self.log.info("Hotplug all memory in Numa Node %s" % node)
mem_blocks = get_hotpluggable_blocks(
'/sys/devices/system/node/node%s/memory*' % node)
for block in mem_blocks:
self.log.info(
"offline memory%s in numa node%s" % (block, node))
offline(block)
self.run_stress()
self.hotplug_all(self.blocks_hotpluggable)
if __name__ == "__main__":
main()
| vrbagalkote/avocado-misc-tests-1 | memory/memhotplug.py | Python | gpl-2.0 | 9,169 |
import logging
import warnings
from dataclasses import dataclass
from hashlib import sha1
from typing import Any, AnyStr, Dict, List, Mapping, NewType, Optional, Type
import saml2.server
import six
from saml2.s_utils import UnknownPrincipal, UnknownSystemEntity, UnravelError, UnsupportedBinding
from saml2.saml import Issuer
from saml2.samlp import RequestedAuthnContext
from saml2.sigver import verify_redirect_signature
from werkzeug.exceptions import HTTPException
from eduid_webapp.idp.mischttp import HttpArgs
ResponseArgs = NewType('ResponseArgs', Dict[str, Any])
# TODO: Rename to logger
module_logger = logging.getLogger(__name__)
class SAMLParseError(Exception):
pass
class SAMLValidationError(Exception):
pass
def gen_key(something: AnyStr) -> str:
"""
Generate a unique (not strictly guaranteed) key based on `something'.
:param something: object
:return:
"""
if isinstance(something, six.binary_type):
return sha1(something).hexdigest()
return sha1(something.encode('UTF-8')).hexdigest()
@dataclass
class AuthnInfo(object):
""" Information about what AuthnContextClass etc. to put in SAML Authn responses."""
class_ref: str
authn_attributes: Dict[str, Any] # these are added to the user attributes
instant: Optional[int] = None
SamlResponse = NewType('SamlResponse', str)
class IdP_SAMLRequest(object):
def __init__(
self,
request: str,
binding: str,
idp: saml2.server.Server,
logger: Optional[logging.Logger] = None,
debug: bool = False,
):
self._request = request
self._binding = binding
self._idp = idp
self._logger = logger
self._debug = debug
if self._logger is not None:
warnings.warn('Object logger deprecated, using module_logger', DeprecationWarning)
try:
self._req_info = idp.parse_authn_request(request, binding)
except UnravelError as exc:
module_logger.info(f'Failed parsing SAML request ({len(request)} bytes)')
module_logger.debug(f'Failed parsing SAML request:\n{request}\nException {exc}')
raise SAMLParseError('Failed parsing SAML request')
if not self._req_info:
# Either there was no request, or pysaml2 found it to be unacceptable.
# For example, the IssueInstant might have been out of bounds.
module_logger.debug('No valid SAMLRequest returned by pysaml2')
raise SAMLValidationError('No valid SAMLRequest returned by pysaml2')
# Only perform expensive parse/pretty-print if debugging
if debug:
# Local import to avoid circular imports
from eduid_webapp.idp.util import maybe_xml_to_string
xmlstr = maybe_xml_to_string(self._req_info.xmlstr)
module_logger.debug(
f'Decoded SAMLRequest into AuthnRequest {repr(self._req_info.message)}:\n\n{xmlstr}\n\n'
)
@property
def binding(self) -> str:
return self._binding
def verify_signature(self, sig_alg: str, signature: str) -> bool:
info = {
'SigAlg': sig_alg,
'Signature': signature,
'SAMLRequest': self.request,
}
_certs = self._idp.metadata.certs(self.sp_entity_id, 'any', 'signing')
verified_ok = False
# Make sure at least one certificate verifies the signature
for cert in _certs:
if verify_redirect_signature(info, cert):
verified_ok = True
break
if not verified_ok:
_key = gen_key(info['SAMLRequest'])
module_logger.info('{!s}: SAML request signature verification failure'.format(_key))
return verified_ok
@property
def request(self) -> str:
"""The original SAMLRequest XML string."""
return self._request
@property
def raw_requested_authn_context(self) -> Optional[RequestedAuthnContext]:
return self._req_info.message.requested_authn_context
def get_requested_authn_context(self) -> Optional[str]:
"""
SAML requested authn context.
TODO: Don't just return the first one, but the most relevant somehow.
"""
if self.raw_requested_authn_context:
_res = self.raw_requested_authn_context.authn_context_class_ref[0].text
if not isinstance(_res, str):
raise ValueError(f'Unknown class_ref text type ({type(_res)})')
return _res
return None
@property
def raw_sp_entity_id(self) -> Issuer:
_res = self._req_info.message.issuer
if not isinstance(_res, Issuer):
raise ValueError(f'Unknown issuer type ({type(_res)})')
return _res
@property
def sp_entity_id(self) -> str:
"""The entity ID of the service provider as a string."""
_res = self.raw_sp_entity_id.text
if not isinstance(_res, str):
raise ValueError(f'Unknown SP entity id type ({type(_res)})')
return _res
@property
def force_authn(self) -> Optional[bool]:
_res = self._req_info.message.force_authn
if _res is None:
return False
if not isinstance(_res, str):
raise ValueError(f'Unknown force authn type ({type(_res)})')
return _res.lower() == 'true'
@property
def request_id(self) -> str:
_res = self._req_info.message.id
if not isinstance(_res, str):
raise ValueError(f'Unknown request id type ({type(_res)})')
return _res
@property
def sp_entity_attributes(self) -> Mapping[str, Any]:
"""Return the entity attributes for the SP that made the request from the metadata."""
res: Dict[str, Any] = {}
try:
_attrs = self._idp.metadata.entity_attributes(self.sp_entity_id)
for k, v in _attrs.items():
if not isinstance(k, str):
raise ValueError(f'Unknown entity attribute type ({type(k)})')
_attrs[k] = v
except KeyError:
return {}
return res
@property
def sp_digest_algs(self) -> List[str]:
"""Return the best signing algorithm that both the IdP and SP supports"""
res: List[str] = []
try:
_algs = self._idp.metadata.supported_algorithms(self.sp_entity_id)['digest_methods']
for this in _algs:
if not isinstance(this, str):
raise ValueError(f'Unknown digest_methods type ({type(this)})')
res += [this]
except KeyError:
return []
return res
@property
def sp_sign_algs(self) -> List[str]:
"""Return the best signing algorithm that both the IdP and SP supports"""
res: List[str] = []
try:
_algs = self._idp.metadata.supported_algorithms(self.sp_entity_id)['signing_methods']
for this in _algs:
if not isinstance(this, str):
raise ValueError(f'Unknown signing_methods type ({type(this)})')
res += [this]
except KeyError:
return []
return res
def get_response_args(self, bad_request: Type[HTTPException], key: str) -> ResponseArgs:
try:
resp_args = self._idp.response_args(self._req_info.message)
# not sure if we need to call pick_binding again (already done in response_args()),
# but it is what we've always done
binding_out, destination = self._idp.pick_binding('assertion_consumer_service', entity_id=self.sp_entity_id)
module_logger.debug(f'Binding: {binding_out}, destination: {destination}')
resp_args['binding_out'] = binding_out
resp_args['destination'] = destination
except UnknownPrincipal as excp:
module_logger.info(f'{key}: Unknown service provider: {excp}')
raise bad_request('Don\'t know the SP that referred you here')
except UnsupportedBinding as excp:
module_logger.info(f'{key}: Unsupported SAML binding: {excp}')
raise bad_request('Don\'t know how to reply to the SP that referred you here')
except UnknownSystemEntity as exc:
# TODO: Validate refactoring didn't move this exception handling to the wrong place.
# Used to be in an exception handler in _redirect_or_post around perform_login().
module_logger.info(f'{key}: Service provider not known: {exc}')
raise bad_request('SAML_UNKNOWN_SP')
return ResponseArgs(resp_args)
def make_saml_response(
self, attributes: Mapping[str, Any], userid: str, response_authn: AuthnInfo, resp_args: ResponseArgs
) -> SamlResponse:
# Create pysaml2 dict with the authn information
authn = dict(class_ref=response_authn.class_ref, authn_instant=response_authn.instant,)
saml_response = self._idp.create_authn_response(
attributes, userid=userid, authn=authn, sign_response=True, **resp_args
)
if not isinstance(saml_response, str):
raise ValueError(f'Unknown saml_response type ({type(saml_response)})')
return SamlResponse(saml_response)
def apply_binding(self, resp_args: ResponseArgs, relay_state: str, saml_response: SamlResponse) -> HttpArgs:
""" Create the Javascript self-posting form that will take the user back to the SP with a SAMLResponse.
"""
binding_out = resp_args.get('binding_out')
destination = resp_args.get('destination')
module_logger.debug(
'Applying binding_out {!r}, destination {!r}, relay_state {!r}'.format(
binding_out, destination, relay_state
)
)
_args = self._idp.apply_binding(binding_out, str(saml_response), destination, relay_state, response=True)
# _args is one of these pysaml2 dicts with HTML data, e.g.:
# {'headers': [('Content-type', 'text/html')],
# 'data': '...<body onload="document.forms[0].submit()">,
# 'url': 'https://sp.example.edu/saml2/acs/',
# 'method': 'POST'
# }
return HttpArgs.from_pysaml2_dict(_args)
| SUNET/eduid-webapp | src/eduid_webapp/idp/idp_saml.py | Python | bsd-3-clause | 10,303 |
import atmPy.general.timeseries as _timeseries
import matplotlib.pylab as plt
from matplotlib.colors import LogNorm as _LogNorm
import numpy as _np
from copy import deepcopy as _deepcopy
class Reflectivity(_timeseries.TimeSeries_2D):
def __init__(self, *args, parent= None, **kwargs):
super().__init__(*args ,**kwargs)
self._parent = parent
def plot(self, snr_max = None, norm = 'linear', **kwargs):
if 'pc_kwargs' in kwargs:
pc_kwargs = kwargs['pc_kwargs']
else:
pc_kwargs = {}
if 'cmap' not in pc_kwargs:
pc_kwargs['cmap'] = plt.cm.gist_gray_r
if 'norm' not in pc_kwargs:
if norm == 'log':
print(norm)
pc_kwargs['norm'] = _LogNorm()
# if 'vmin' not in pc_kwargs:
# pc_kwargs['vmin'] = vmin
kwargs['pc_kwargs'] = pc_kwargs
if snr_max:
refl = self.copy()
refl.data[self._parent.signal2noise_ratio.data < snr_max] = _np.nan
out = refl.plot(norm = norm, **kwargs)
else:
out = super().plot(**kwargs)
return out
class Kazr(object):
def __init__(self):
self._reflectivity = None
self._signal2noise_ratio = None
def average_time(self, window):
"""
Averages each of the relevant properties. See timeseries.TimeSeries.average_time for details.
Parameters
----------
window: tuple
e.g. (1,'m')
Returns
-------
Kazr instances with changes applied
"""
kzr = self.copy()
kzr.reflectivity = kzr.reflectivity.average_time(window)
return kzr
def zoom_time(self, start=None, end=None, copy=True):
kazrnew = self.copy()
kazrnew.reflectivity = self.reflectivity.zoom_time(start=start, end=end, copy=copy)
kazrnew.signal2noise_ratio = self.signal2noise_ratio.zoom_time(start=start, end=end, copy=copy)
return kazrnew
def discriminate_by_signal2noise_ratio(self, minimu_snr):
"""I know there is that kwarg in the plot function which allows me to do this. This was necessary in order to
average over time and still be able to discriminate through the snr. After averaging over time the snr is
useless.
Parameters
----------
minimu_snr: float
All values of reflectivity where the snr is smaller then that value are set to nan.
Returns
-------
Kazr instance with changes applied
"""
kzr = self.copy()
kzr.reflectivity.data[self.signal2noise_ratio.data < minimu_snr] = _np.nan
return kzr
@property
def reflectivity(self):
return self._reflectivity
@reflectivity.setter
def reflectivity(self, value, **kwargs):
if type(value).__name__ == 'Reflectivity':
self._reflectivity = value
else:
self._reflectivity = Reflectivity(value, parent = self, **kwargs)
@property
def signal2noise_ratio(self):
return self._signal2noise_ratio
@signal2noise_ratio.setter
def signal2noise_ratio(self, value, **kwargs):
if type(value).__name__ == 'TimeSeries_2D':
self._signal2noise_ratio = value
else:
self._signal2noise_ratio = _timeseries.TimeSeries_2D(value, **kwargs)
def copy(self):
return _deepcopy(self)
| hagne/atm-py | atmPy/precipitation/radar.py | Python | mit | 3,475 |
# coding=utf-8
from psycopg2.extras import NamedTupleCursor, Json
from tornado.web import Application, HTTPError
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.httpserver import HTTPServer
from tornado.options import parse_command_line
import momoko
import os
from bank import SelectQuestion, get_level_one_item
from base import BaseHandler, SessionBaseHandler
from settings import MAX_ANSWER_COUNT, DSN, COOKIE_SECRET
from utils import Flow, get_quiz_stage, Que, session_reset, CheckChoice
class QuestionnaireListHandler(BaseHandler):
@gen.coroutine
def get(self):
# 问卷列表
cursor = yield self.db.execute("SELECT id, name FROM questionnaire;")
q_list = cursor.fetchall()
self.render('index.html', q_list=q_list)
class QuestionHandler(SessionBaseHandler):
@gen.coroutine
def _check_q_exist_n_get_q_a(self, q_id):
"""
:param q_id:
:raise gen.Return: 返回去q_a,q是questionnaire,a是answer
"""
session_key = self.session_key
cursor = yield self.db.execute(
"""
SELECT answer.id as aid, answer.score_answer, answer.old_answer,
answer.order_answer, answer.try_count,
answer.has_finished, questionnaire.id, questionnaire.type, questionnaire.second,
questionnaire.flow, questionnaire.level_one_count from answer
INNER JOIN questionnaire ON answer.questionnaire_id = questionnaire.id
WHERE answer.questionnaire_id=%s
AND answer.session_key=%s;
""", (q_id, session_key)
)
# q_a的意思是questionnaire and answer
q_a = cursor.fetchone()
if not q_a:
cursor = yield self.db.execute("SELECT id, type, flow, level_one_count, second "
"FROM questionnaire WHERE id=%s;",
(q_id,))
q = cursor.fetchone()
if q:
cursor = yield self.db.execute("INSERT INTO answer (questionnaire_id, session_key, "
"score_answer, order_answer, old_answer) VALUES (%s, %s, %s, %s, %s)"
"RETURNING id AS aid, score_answer, "
"order_answer, old_answer, try_count, "
"has_finished;",
(q_id, session_key, Json({}), Json({}), Json({})))
ans = cursor.fetchone()
raise gen.Return((q, ans))
else:
raise HTTPError(404)
else:
raise gen.Return((q_a, q_a))
@gen.coroutine
def get(self, q_id):
session = self.session
q_a = yield self._check_q_exist_n_get_q_a(q_id)
q, ans = q_a
# 下面是session的键值
is_re_start = 'is_%s_re_start' % q_id
step = '%s_step' % q_id
stage = '%s_stage' % q_id
next_item = '%s_next_item' % q_id
step_count = '%s_step_count' % q_id
# 被试答题的过程
flow = Flow(flow=q.flow, name=session.session_key)
# 如果session不存在is_X_start_id,说明被试可能关闭了浏览器,所以重新启动测验
if not session.get(is_re_start, True):
# 判断测验的第一阶段是否处于结束位置
if session[stage] == 1:
next_item_list = session[next_item]
que = Que(*next_item_list.pop(0))
else:
next_item = session[next_item]
que = Que(*next_item)
# 将是否重新测验设定为真,则若关闭浏览器或刷新页面,则重启测验
session[is_re_start] = True
session[step] += 1
session[stage] = get_quiz_stage(session[step], session[stage], flow)
else:
# 开始测验或重启测验,session恢复出厂设置
session_reset(session, q_id)
# 测验作答次数+1
if ans.try_count > (MAX_ANSWER_COUNT - 1):
raise HTTPError(403)
# 之前的旧答案存入old_answer中
if ans.score_answer:
ans.old_answer.update(ans.score_answer)
ans.score_answer.clear()
ans.order_answer.clear()
# 第一阶段需要回答的题量
count = flow.get_level_item_count(1)
# 给用户展现的第一道试题
que = yield get_level_one_item(ans, session, q, count, self.db)
yield self.db.execute(
"UPDATE answer SET has_finished = false, try_count = try_count + 1, score_answer=%s, order_answer=%s, "
"old_answer=%s WHERE id=%s",
(Json(ans.score_answer), Json(ans.order_answer), Json(ans.old_answer), ans.aid)
)
# 总共答题量
session[step_count] = flow.total_item_count
yield self.db.execute("UPDATE question SET count = count + 1 WHERE id=%s", (que.id, ))
total_step_count = session[step_count]
current_step = session[step]
current_progress = int((current_step * 1.0 / total_step_count) * 100)
second = q.second
session['q_%s_id' % q_id] = que
yield self.save()
self.render('cat.html', que=que, current_progress=current_progress,
total_step_count=total_step_count, current_step=current_step,
q_id=q_id, second=second)
@gen.coroutine
def post(self, q_id):
session = self.session
q_a = yield self._check_q_exist_n_get_q_a(q_id)
q, ans = q_a
q_type = q.type
que = Que(*session.get('q_%s_id' % q_id))
que_choice = self.get_argument('question')
check_choice = CheckChoice(que_choice, que)
if check_choice.is_valid():
# 保存作答结果
value = check_choice.value
session['%s_score' % q_id].append(int(value))
ans.score_answer[str(que.id)]['score'] = value
ans.score_answer[str(que.id)]['choice'] = que_choice
# 生成重定向URL
SelectQuestionClass = getattr(SelectQuestion, q_type)
url = yield SelectQuestionClass(session=session, q=q, que_id=que.id,
ans=ans, db=self.db).get_que_then_redirect()
yield self.save()
self.redirect(url)
else:
# 数据不合格则返回原作答页面
current_step = session['%s_step' % q_id]
total_step_count = session['%s_step_count' % q_id]
current_progress = int((current_step * 1.0 / total_step_count) * 100)
second = q.second
self.render('cat.html', que=que, current_progress=current_progress,
total_step_count=total_step_count, current_step=current_step,
q_id=q_id, second=second)
class ResultHandler(BaseHandler):
@gen.coroutine
def _check_result_exist_n_get_q_a(self, q_id):
session_key = self.get_cookie('sessionid')
if not session_key:
raise HTTPError(404)
cursor = yield self.db.execute(
"""
SELECT answer.score_answer, answer.order_answer, answer.has_finished from answer
INNER JOIN questionnaire ON answer.questionnaire_id = questionnaire.id
WHERE answer.questionnaire_id=%s
AND answer.session_key=%s;
""", (q_id, session_key)
)
# q_a的意思是questionnaire and answer
q_a = cursor.fetchone()
if (not q_a) or (not q_a.has_finished):
raise HTTPError(404)
else:
raise gen.Return(q_a)
@gen.coroutine
def get(self, q_id):
q_a = yield self._check_result_exist_n_get_q_a(q_id)
self.render('result.html', q_a=q_a, q_id=q_id)
if __name__ == "__main__":
parse_command_line()
ioloop = IOLoop.instance()
application = Application([
(r"/", QuestionnaireListHandler),
(r"/cat/(\d+)", QuestionHandler),
(r"/result/(\d+)", ResultHandler)
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret=COOKIE_SECRET,
debug=True,
xsrf_cookies=True,
)
application.db = momoko.Pool(
dsn=DSN,
size=1,
ioloop=ioloop,
cursor_factory=NamedTupleCursor,
)
future = application.db.connect()
ioloop.add_future(future, lambda f: ioloop.stop())
ioloop.start()
future.result()
http_server = HTTPServer(application)
http_server.listen(8000, 'localhost')
ioloop.start()
| inuyasha2012/tornado-cat-example | example/main.py | Python | mit | 8,865 |
# -*- encoding: utf-8 -*-
from supriya.tools.systemtools.SupriyaObject import SupriyaObject
class Envelope(SupriyaObject):
r'''An envelope.
::
>>> from supriya.tools import *
>>> envelope = synthdeftools.Envelope()
>>> envelope
Envelope(
amplitudes=(0.0, 1.0, 0.0),
durations=(1.0, 1.0),
curves=('linear',),
offset=0.0
)
::
>>> tuple(envelope)
(0.0, 2, -99, -99, 1.0, 1.0, 1, 0.0, 0.0, 1.0, 1, 0.0)
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Main Classes'
__slots__ = (
'_amplitudes',
'_curves',
'_durations',
'_loop_node',
'_offset',
'_release_node',
)
### INITIALIZER ###
def __init__(
self,
amplitudes=(0, 1, 0),
durations=(1, 1),
curves='linear',
release_node=None,
loop_node=None,
offset=0.,
):
assert len(amplitudes)
assert len(durations) and len(durations) == (len(amplitudes) - 1)
self._amplitudes = tuple(float(x) for x in amplitudes)
self._durations = tuple(float(x) for x in durations)
if isinstance(curves, (int, float, str)):
curves = (curves,)
elif curves is None:
curves = ()
self._curves = tuple(curves)
if release_node is not None:
release_node = int(release_node)
assert 0 <= release_node < len(amplitudes)
self._release_node = release_node
if loop_node is not None:
assert self._release_node is not None
loop_node = int(loop_node)
assert 0 <= loop_node <= release_node
self._loop_node = loop_node
self._offset = float(offset)
### SPECIAL METHODS ###
def __eq__(self, expr):
from abjad.tools import systemtools
return systemtools.StorageFormatManager.compare(self, expr)
def __hash__(self, expr):
from abjad.tools import systemtools
hash_values = systemtools.StorageFormatManager.get_hash_values(self)
return hash(hash_values)
def __iter__(self):
from supriya.tools import synthdeftools
result = []
result.append(self.amplitudes[0])
result.append(len(self.durations))
release_node = self.release_node
if release_node is None:
release_node = -99
result.append(release_node)
loop_node = self.loop_node
if loop_node is None:
loop_node = -99
result.append(loop_node)
for i in range(len(self.durations)):
result.append(self.amplitudes[i + 1])
result.append(self.durations[i])
curve = self.curves[i % len(self.curves)]
if isinstance(curve, str):
shape = synthdeftools.EnvelopeShape.from_expr(curve)
shape = int(shape)
curve = 0.
else:
shape = 5
result.append(shape)
result.append(curve)
for x in result:
yield x
### PUBLIC METHODS ###
@staticmethod
def asr(
attack_time=0.01,
release_time=1.0,
amplitude=1.0,
curve=-4.0,
):
amplitudes = (0, float(amplitude), 0)
durations = (float(attack_time), float(release_time))
curves = (float(curve),)
release_node = 1
return Envelope(
amplitudes=amplitudes,
durations=durations,
curves=curves,
release_node=release_node,
)
@staticmethod
def percussive(
attack_time=0.01,
release_time=1.0,
amplitude=1.0,
curve=-4.0,
):
r'''Make a percussion envelope.
::
>>> from supriya.tools import synthdeftools
>>> envelope = synthdeftools.Envelope.percussive()
>>> envelope
Envelope(
amplitudes=(0.0, 1.0, 0.0),
durations=(0.01, 1.0),
curves=(-4.0,),
offset=0.0
)
::
>>> tuple(envelope)
(0.0, 2, -99, -99, 1.0, 0.01, 5, -4.0, 0.0, 1.0, 5, -4.0)
'''
amplitudes = (0, float(amplitude), 0)
durations = (float(attack_time), float(release_time))
curves = (float(curve),)
return Envelope(
amplitudes=amplitudes,
durations=durations,
curves=curves,
)
@staticmethod
def triangle(
duration=1.0,
amplitude=1.0,
):
r'''Make a triangle envelope.
::
>>> from supriya.tools import synthdeftools
>>> envelope = synthdeftools.Envelope.triangle()
>>> envelope
Envelope(
amplitudes=(0.0, 1.0, 0.0),
durations=(0.5, 0.5),
curves=('linear',),
offset=0.0
)
::
>>> tuple(envelope)
(0.0, 2, -99, -99, 1.0, 0.5, 1, 0.0, 0.0, 0.5, 1, 0.0)
'''
amplitudes = (0, float(amplitude), 0)
duration = float(duration) / 2.
durations = (duration, duration)
return Envelope(
amplitudes=amplitudes,
durations=durations,
)
### PUBLIC PROPERTIES ###
@property
def amplitudes(self):
return self._amplitudes
@property
def curves(self):
return self._curves
@property
def durations(self):
return self._durations
@property
def loop_node(self):
return self._loop_node
@property
def offset(self):
return self._offset
@property
def release_node(self):
return self._release_node | andrewyoung1991/supriya | supriya/tools/synthdeftools/Envelope.py | Python | mit | 5,848 |
# Copyright 2013 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from neutron.agent.common import config as a_cfg
from neutron.tests import base
from neutron.tests.unit import test_api_v2
import neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas as fwaas
_uuid = test_api_v2._uuid
FAKE_SRC_PREFIX = '10.0.0.0/24'
FAKE_DST_PREFIX = '20.0.0.0/24'
FAKE_PROTOCOL = 'tcp'
FAKE_SRC_PORT = 5000
FAKE_DST_PORT = 22
FAKE_FW_ID = 'fake-fw-uuid'
class IptablesFwaasTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFwaasTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
self.iptables_cls_p.start()
self.firewall = fwaas.IptablesFwaasDriver()
def _fake_rules_v4(self, fwid, apply_list):
rule_list = []
rule1 = {'enabled': True,
'action': 'allow',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '80',
'source_ip_address': '10.24.4.2'}
rule2 = {'enabled': True,
'action': 'deny',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '22'}
ingress_chain = ('iv4%s' % fwid)[:11]
egress_chain = ('ov4%s' % fwid)[:11]
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
v4filter_inst.chains.append(ingress_chain)
v4filter_inst.chains.append(egress_chain)
rule_list.append(rule1)
rule_list.append(rule2)
return rule_list
def _fake_firewall_no_rule(self):
rule_list = []
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall_with_admin_down(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': False,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_apply_list(self, router_count=1, distributed=False,
distributed_mode=None):
apply_list = []
while router_count > 0:
iptables_inst = mock.Mock()
router_inst = {'distributed': distributed}
v4filter_inst = mock.Mock()
v6filter_inst = mock.Mock()
v4filter_inst.chains = []
v6filter_inst.chains = []
iptables_inst.ipv4 = {'filter': v4filter_inst}
iptables_inst.ipv6 = {'filter': v6filter_inst}
router_info_inst = mock.Mock()
router_info_inst.iptables_manager = iptables_inst
router_info_inst.snat_iptables_manager = iptables_inst
if distributed_mode == 'dvr':
router_info_inst.dist_fip_count = 1
router_info_inst.router = router_inst
apply_list.append(router_info_inst)
router_count -= 1
return apply_list
def _setup_firewall_with_rules(self, func, router_count=1,
distributed=False, distributed_mode=None):
apply_list = self._fake_apply_list(router_count=router_count,
distributed=distributed, distributed_mode=distributed_mode)
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall(rule_list)
if distributed:
if distributed_mode == 'dvr_snat':
if_prefix = 'sg-+'
if distributed_mode == 'dvr':
if_prefix = 'rfp-+'
else:
if_prefix = 'qr-+'
distributed_mode = 'legacy'
func(distributed_mode, apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
rule1 = '-p tcp --dport 80 -s 10.24.4.2 -j ACCEPT'
rule2 = '-p tcp --dport 22 -j DROP'
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
bname = fwaas.iptables_manager.binary_name
ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11])
ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11])
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule(ingress_chain, rule1),
mock.call.add_rule(egress_chain, rule1),
mock.call.add_rule(ingress_chain, rule2),
mock.call.add_rule(egress_chain, rule2),
mock.call.add_rule('FORWARD',
'-o %s -j %s' % (if_prefix,
ipt_mgr_ichain)),
mock.call.add_rule('FORWARD',
'-i %s -j %s' % (if_prefix,
ipt_mgr_echain)),
mock.call.add_rule('FORWARD',
'-o %s -j %s-fwaas-defau' % (if_prefix,
bname)),
mock.call.add_rule('FORWARD',
'-i %s -j %s-fwaas-defau' % (if_prefix,
bname))]
v4filter_inst.assert_has_calls(calls)
def test_create_firewall_no_rules(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.create_firewall('legacy', apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
bname = fwaas.iptables_manager.binary_name
for ip_version in (4, 6):
ingress_chain = ('iv%s%s' % (ip_version, firewall['id']))
egress_chain = ('ov%s%s' % (ip_version, firewall['id']))
calls = [mock.call.remove_chain(
'iv%sfake-fw-uuid' % ip_version),
mock.call.remove_chain(
'ov%sfake-fw-uuid' % ip_version),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule('FORWARD',
'-o qr-+ -j %s-fwaas-defau' % bname),
mock.call.add_rule('FORWARD',
'-i qr-+ -j %s-fwaas-defau' % bname)]
if ip_version == 4:
v4filter_inst = apply_list[0].iptables_manager.ipv4['filter']
v4filter_inst.assert_has_calls(calls)
else:
v6filter_inst = apply_list[0].iptables_manager.ipv6['filter']
v6filter_inst.assert_has_calls(calls)
def test_create_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.create_firewall)
def test_create_firewall_with_rules_two_routers(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
router_count=2)
def test_update_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.update_firewall)
def test_delete_firewall(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.delete_firewall('legacy', apply_list, firewall)
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
calls = [mock.call.remove_chain(ingress_chain),
mock.call.remove_chain(egress_chain),
mock.call.remove_chain('fwaas-default-policy')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_admin_down(self):
apply_list = self._fake_apply_list()
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall_with_admin_down(rule_list)
self.firewall.create_firewall('legacy', apply_list, firewall)
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_update_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_create_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr')
def test_update_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr')
| citrix-openstack-build/neutron-fwaas | neutron_fwaas/tests.skip/unit/services/firewall/drivers/linux/test_iptables_fwaas.py | Python | apache-2.0 | 11,580 |
#!/usr/bin/python3
def odds_minus_evens(l):
''' Returns the sum of odd numbers in the list minus the sum of evns '''
sum = 0
for x in l:
if x % 2 == 0:
sum -= x
else:
sum += x
return sum
print(odds_minus_evens(range(10)))
| nonZero/demos-python | src/exercises/basic/odds_minus_evens/solution5.py | Python | gpl-3.0 | 281 |
import json
import logging
import random
import re
import requests
import sys
import time
import traceback
from websocket import WebSocketConnectionClosedException
from markdownify import MarkdownConverter
from will import settings
from .base import IOBackend
from will.utils import Bunch, UNSURE_REPLIES, clean_for_pickling
from will.mixins import SleepMixin, StorageMixin
from multiprocessing import Process
from will.abstractions import Event, Message, Person, Channel
from slackclient import SlackClient
from slackclient.server import SlackConnectionError
SLACK_SEND_URL = "https://slack.com/api/chat.postMessage"
SLACK_SET_TOPIC_URL = "https://slack.com/api/channels.setTopic"
SLACK_PRIVATE_SET_TOPIC_URL = "https://slack.com/api/groups.setTopic"
class SlackMarkdownConverter(MarkdownConverter):
def convert_strong(self, el, text):
return '*%s*' % text if text else ''
class SlackBackend(IOBackend, SleepMixin, StorageMixin):
friendly_name = "Slack"
internal_name = "will.backends.io_adapters.slack"
required_settings = [
{
"name": "SLACK_API_TOKEN",
"obtain_at": """1. Go to https://api.slack.com/custom-integrations/legacy-tokens and sign in as yourself (or a user for Will).
2. Find the workspace you want to use, and click "Create token."
3. Set this token as SLACK_API_TOKEN."""
}
]
def get_channel_from_name(self, name):
for k, c in self.channels.items():
if c.name.lower() == name.lower() or c.id.lower() == name.lower():
return c
def normalize_incoming_event(self, event):
if (
"type" in event
and event["type"] == "message"
and ("subtype" not in event or event["subtype"] != "message_changed")
# Ignore thread summary events (for now.)
# TODO: We should stack these into the history.
and ("subtype" not in event or ("message" in event and "thread_ts" not in event["message"]))
):
# print("slack: normalize_incoming_event - %s" % event)
# Sample of group message
# {u'source_team': u'T5ACF70KV', u'text': u'test',
# u'ts': u'1495661121.838366', u'user': u'U5ACF70RH',
# u'team': u'T5ACF70KV', u'type': u'message', u'channel': u'C5JDAR2S3'}
# Sample of 1-1 message
# {u'source_team': u'T5ACF70KV', u'text': u'test',
# u'ts': u'1495662397.335424', u'user': u'U5ACF70RH',
# u'team': u'T5ACF70KV', u'type': u'message', u'channel': u'D5HGP0YE7'}
# Threaded message
# {u'event_ts': u'1507601477.000073', u'ts': u'1507601477.000073',
# u'subtype': u'message_replied', u'message':
# {u'thread_ts': u'1507414046.000010', u'text': u'hello!',
# u'ts': u'1507414046.000010', u'unread_count': 2,
# u'reply_count': 2, u'user': u'U5GUL9D9N', u'replies':
# [{u'user': u'U5ACF70RH', u'ts': u'1507601449.000007'}, {
# u'user': u'U5ACF70RH', u'ts': u'1507601477.000063'}],
# u'type': u'message', u'bot_id': u'B5HL9ABFE'},
# u'type': u'message', u'hidden': True, u'channel': u'D5HGP0YE7'}
sender = self.people[event["user"]]
channel = clean_for_pickling(self.channels[event["channel"]])
# print "channel: %s" % channel
interpolated_handle = "<@%s>" % self.me.id
real_handle = "@%s" % self.me.handle
will_is_mentioned = False
will_said_it = False
is_private_chat = False
thread = None
if "thread_ts" in event:
thread = event["thread_ts"]
# If the parent thread is a 1-1 between Will and I, also treat that as direct.
# Since members[] still comes in on the thread event, we can trust this, even if we're
# in a thread.
if channel.id == channel.name:
is_private_chat = True
# <@U5GUL9D9N> hi
# TODO: if there's a thread with just will and I on it, treat that as direct.
is_direct = False
if is_private_chat or event["text"].startswith(interpolated_handle) or event["text"].startswith(real_handle):
is_direct = True
if event["text"].startswith(interpolated_handle):
event["text"] = event["text"][len(interpolated_handle):].strip()
if event["text"].startswith(real_handle):
event["text"] = event["text"][len(real_handle):].strip()
if interpolated_handle in event["text"] or real_handle in event["text"]:
will_is_mentioned = True
if event["user"] == self.me.id:
will_said_it = True
m = Message(
content=event["text"],
type=event["type"],
is_direct=is_direct,
is_private_chat=is_private_chat,
is_group_chat=not is_private_chat,
backend=self.internal_name,
sender=sender,
channel=channel,
thread=thread,
will_is_mentioned=will_is_mentioned,
will_said_it=will_said_it,
backend_supports_acl=True,
original_incoming_event=clean_for_pickling(event),
)
return m
else:
# An event type the slack ba has no idea how to handle.
pass
def set_topic(self, event):
headers = {'Accept': 'text/plain'}
data = self.set_data_channel_and_thread(event)
data.update({
"token": settings.SLACK_API_TOKEN,
"as_user": True,
"topic": event.content,
})
if data["channel"].startswith("G"):
url = SLACK_PRIVATE_SET_TOPIC_URL
else:
url = SLACK_SET_TOPIC_URL
r = requests.post(
url,
headers=headers,
data=data,
**settings.REQUESTS_OPTIONS
)
self.handle_request(r, data)
def handle_outgoing_event(self, event):
if event.type in ["say", "reply"]:
if "kwargs" in event and "html" in event.kwargs and event.kwargs["html"]:
event.content = SlackMarkdownConverter().convert(event.content)
event.content = event.content.replace("&", "&")
event.content = event.content.replace(r"\_", "_")
kwargs = {}
if "kwargs" in event:
kwargs.update(**event.kwargs)
if hasattr(event, "source_message") and event.source_message and "channel" not in kwargs:
self.send_message(event)
else:
# Came from webhook/etc
# TODO: finish this.
target_channel = kwargs.get("room", kwargs.get("channel", None))
if target_channel:
event.channel = self.get_channel_from_name(target_channel)
if event.channel:
self.send_message(event)
else:
logging.error(
"I was asked to post to the slack %s channel, but it doesn't exist.",
target_channel
)
if self.default_channel:
event.channel = self.get_channel_from_name(self.default_channel)
event.content = event.content + " (for #%s)" % target_channel
self.send_message(event)
elif self.default_channel:
event.channel = self.get_channel_from_name(self.default_channel)
self.send_message(event)
else:
logging.critical(
"I was asked to post to a slack default channel, but I'm nowhere."
"Please invite me somewhere with '/invite @%s'", self.me.handle
)
if event.type in ["topic_change", ]:
self.set_topic(event)
elif (
event.type == "message.no_response"
and event.data.is_direct
and event.data.will_said_it is False
):
event.content = random.choice(UNSURE_REPLIES)
self.send_message(event)
def handle_request(self, r, data):
resp_json = r.json()
if not resp_json["ok"]:
if resp_json["error"] == "not_in_channel":
channel = self.get_channel_from_name(data["channel"])
if not hasattr(self, "me") or not hasattr(self.me, "handle"):
self.people
logging.critical(
"I was asked to post to the slack %s channel, but I haven't been invited. "
"Please invite me with '/invite @%s'" % (channel.name, self.me.handle)
)
else:
logging.error("Error sending to slack: %s" % resp_json["error"])
logging.error(resp_json)
assert resp_json["ok"]
def set_data_channel_and_thread(self, event, data={}):
if "channel" in event:
# We're coming off an explicit set.
channel_id = event.channel.id
else:
if "source_message" in event:
# Mentions that come back via self.say()
if hasattr(event.source_message, "data"):
channel_id = event.source_message.data.channel.id
if hasattr(event.source_message.data, "thread"):
data.update({
"thread_ts": event.source_message.data.thread
})
else:
# Mentions that come back via self.say() with a specific room (I think)
channel_id = event.source_message.channel.id
if hasattr(event.source_message, "thread"):
data.update({
"thread_ts": event.source_message.thread
})
else:
# Mentions that come back via self.reply()
if hasattr(event.data, "original_incoming_event"):
if hasattr(event.data.original_incoming_event.channel, "id"):
channel_id = event.data.original_incoming_event.channel.id
else:
channel_id = event.data.original_incoming_event.channel
else:
if hasattr(event.data["original_incoming_event"].data.channel, "id"):
channel_id = event.data["original_incoming_event"].data.channel.id
else:
channel_id = event.data["original_incoming_event"].data.channel
try:
# If we're starting a thread
if "kwargs" in event and "start_thread" in event.kwargs and event.kwargs["start_thread"] and ("thread_ts" not in data or not data["thread_ts"]):
if hasattr(event.source_message, "original_incoming_event"):
data.update({
"thread_ts": event.source_message.original_incoming_event["ts"]
})
elif (
hasattr(event.source_message, "data")
and hasattr(event.source_message.data, "original_incoming_event")
and "ts" in event.source_message.data.original_incoming_event
):
logging.error(
"Hm. I was told to start a new thread, but while using .say(), instead of .reply().\n"
"This doesn't really make sense, but I'm going to make the best of it by pretending you "
"used .say() and threading off of your message.\n"
"Please update your plugin to use .reply() when you have a second!"
)
data.update({
"thread_ts": event.source_message.data.original_incoming_event["ts"]
})
else:
if hasattr(event.data.original_incoming_event, "thread_ts"):
data.update({
"thread_ts": event.data.original_incoming_event.thread_ts
})
elif "thread" in event.data.original_incoming_event.data:
data.update({
"thread_ts": event.data.original_incoming_event.data.thread
})
except:
logging.info(traceback.format_exc().split(" ")[-1])
pass
data.update({
"channel": channel_id,
})
return data
def send_message(self, event):
data = {}
if hasattr(event, "kwargs"):
data.update(event.kwargs)
# Add slack-specific functionality
if "color" in event.kwargs:
data.update({
"attachments": json.dumps([
{
"fallback": event.content,
"color": self._map_color(event.kwargs["color"]),
"text": event.content,
}
]),
})
elif "attachments" in event.kwargs:
data.update({
"text": event.content,
"attachments": json.dumps(event.kwargs["attachments"])
})
else:
data.update({
"text": event.content,
})
else:
data.update({
"text": event.content,
})
data = self.set_data_channel_and_thread(event, data=data)
# Auto-link mention names
if "text" in data:
if data["text"].find("<@") != -1:
data["text"] = data["text"].replace("<@", "<@")
data["text"] = data["text"].replace(">", ">")
elif "attachments" in data and "text" in data["attachments"][0]:
if data["attachments"][0]["text"].find("<@") != -1:
data["attachments"][0]["text"] = data["attachments"][0]["text"].replace("<@", "<@")
data["attachments"][0]["text"] = data["attachments"][0]["text"].replace(">", ">")
data.update({
"token": settings.SLACK_API_TOKEN,
"as_user": True,
})
if hasattr(event, "kwargs") and "html" in event.kwargs and event.kwargs["html"]:
data.update({
"parse": "full",
})
headers = {'Accept': 'text/plain'}
r = requests.post(
SLACK_SEND_URL,
headers=headers,
data=data,
**settings.REQUESTS_OPTIONS
)
self.handle_request(r, data)
def _map_color(self, color):
# Turn colors into hex values, handling old slack colors, etc
if color == "red":
return "danger"
elif color == "yellow":
return "warning"
elif color == "green":
return "good"
return color
def join_channel(self, channel_id):
return self.client.api_call(
"channels.join",
channel=channel_id,
)
@property
def people(self):
if not hasattr(self, "_people") or self._people is {}:
self._update_people()
return self._people
@property
def default_channel(self):
if not hasattr(self, "_default_channel") or not self._default_channel:
self._decide_default_channel()
return self._default_channel
@property
def channels(self):
if not hasattr(self, "_channels") or self._channels is {}:
self._update_channels()
return self._channels
@property
def client(self):
if not hasattr(self, "_client"):
self._client = SlackClient(settings.SLACK_API_TOKEN)
return self._client
def _decide_default_channel(self):
self._default_channel = None
if not hasattr(self, "complained_about_default"):
self.complained_about_default = False
self.complained_uninvited = False
# Set self.me
self.people
if hasattr(settings, "SLACK_DEFAULT_CHANNEL"):
channel = self.get_channel_from_name(settings.SLACK_DEFAULT_CHANNEL)
if channel:
if self.me.id in channel.members:
self._default_channel = channel.id
return
elif not self.complained_about_default:
self.complained_about_default = True
logging.error("The defined default channel(%s) does not exist!",
settings.SLACK_DEFAULT_CHANNEL)
for c in self.channels.values():
if c.name != c.id and self.me.id in c.members:
self._default_channel = c.id
if not self._default_channel and not self.complained_uninvited:
self.complained_uninvited = True
logging.critical("No channels with me invited! No messages will be sent!")
def _update_channels(self):
channels = {}
for c in self.client.server.channels:
members = {}
for m in c.members:
members[m] = self.people[m]
channels[c.id] = Channel(
id=c.id,
name=c.name,
source=clean_for_pickling(c),
members=members
)
if len(channels.keys()) == 0:
# Server isn't set up yet, and we're likely in a processing thread,
if self.load("slack_channel_cache", None):
self._channels = self.load("slack_channel_cache", None)
else:
self._channels = channels
self.save("slack_channel_cache", channels)
def _update_people(self):
people = {}
self.handle = self.client.server.username
for k, v in self.client.server.users.items():
user_timezone = None
if v.tz:
user_timezone = v.tz
people[k] = Person(
id=v.id,
mention_handle="<@%s>" % v.id,
handle=v.name,
source=clean_for_pickling(v),
name=v.real_name,
)
if v.name == self.handle:
self.me = Person(
id=v.id,
mention_handle="<@%s>" % v.id,
handle=v.name,
source=clean_for_pickling(v),
name=v.real_name,
)
if user_timezone and user_timezone != 'unknown':
people[k].timezone = user_timezone
if v.name == self.handle:
self.me.timezone = user_timezone
if len(people.keys()) == 0:
# Server isn't set up yet, and we're likely in a processing thread,
if self.load("slack_people_cache", None):
self._people = self.load("slack_people_cache", None)
if not hasattr(self, "me") or not self.me:
self.me = self.load("slack_me_cache", None)
if not hasattr(self, "handle") or not self.handle:
self.handle = self.load("slack_handle_cache", None)
else:
self._people = people
self.save("slack_people_cache", people)
self.save("slack_me_cache", self.me)
self.save("slack_handle_cache", self.handle)
def _update_backend_metadata(self):
self._update_people()
self._update_channels()
def _watch_slack_rtm(self):
while True:
try:
if self.client.rtm_connect(auto_reconnect=True):
self._update_backend_metadata()
num_polls_between_updates = 30 / settings.EVENT_LOOP_INTERVAL # Every 30 seconds
current_poll_count = 0
while True:
events = self.client.rtm_read()
if len(events) > 0:
# TODO: only handle events that are new.
# print(len(events))
for e in events:
self.handle_incoming_event(e)
# Update channels/people/me/etc every 10s or so.
current_poll_count += 1
if current_poll_count > num_polls_between_updates:
self._update_backend_metadata()
current_poll_count = 0
self.sleep_for_event_loop()
except (WebSocketConnectionClosedException, SlackConnectionError):
logging.error('Encountered connection error attempting reconnect in 2 seconds')
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
break
except:
logging.critical("Error in watching slack RTM: \n%s" % traceback.format_exc())
break
def bootstrap(self):
# Bootstrap must provide a way to to have:
# a) self.normalize_incoming_event fired, or incoming events put into self.incoming_queue
# b) any necessary threads running for a)
# c) self.me (Person) defined, with Will's info
# d) self.people (dict of People) defined, with everyone in an organization/backend
# e) self.channels (dict of Channels) defined, with all available channels/rooms.
# Note that Channel asks for members, a list of People.
# f) A way for self.handle, self.me, self.people, and self.channels to be kept accurate,
# with a maximum lag of 60 seconds.
# Property, auto-inits.
self.client
self.rtm_thread = Process(target=self._watch_slack_rtm)
self.rtm_thread.start()
def terminate(self):
if hasattr(self, "rtm_thread"):
self.rtm_thread.terminate()
while self.rtm_thread.is_alive():
time.sleep(0.2)
| wontonst/will | will/backends/io_adapters/slack.py | Python | mit | 22,435 |
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds sitelinks to a campaign.
To create a campaign, run add_campaigns.py.
DEPRECATION WARNING!
THIS USAGE IS DEPRECATED AND WILL BE REMOVED IN AN UPCOMING API VERSION.
All extensions should migrate to Assets. See add_sitelinks_using_assets.py
"""
import argparse
import datetime
import sys
from collections import namedtuple
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
_DateRange = namedtuple("_DateRange", ["start_datetime", "end_datetime"])
_date_format = "%Y-%m-%d %H:%M:%S"
# [START add_sitelinks_1]
def main(client, customer_id, campaign_id):
"""The main method that creates all necessary entities for the example."""
# Create an extension setting.
campaign_service = client.get_service("CampaignService")
campaign_ext_setting_service = client.get_service(
"CampaignExtensionSettingService"
)
campaign_resource_name = campaign_service.campaign_path(
customer_id, campaign_id
)
feed_item_resource_names = _create_extension_feed_items(
client, customer_id, campaign_resource_name
)
campaign_ext_setting_operation = client.get_type(
"CampaignExtensionSettingOperation"
)
extension_type_enum = client.enums.ExtensionTypeEnum
campaign_ext_setting = campaign_ext_setting_operation.create
campaign_ext_setting.campaign = campaign_resource_name
campaign_ext_setting.extension_type = extension_type_enum.SITELINK
campaign_ext_setting.extension_feed_items.extend(feed_item_resource_names)
# Add campaign extension setting with site link feed items.
response = campaign_ext_setting_service.mutate_campaign_extension_settings(
customer_id=customer_id, operations=[campaign_ext_setting_operation]
)
print(
"Created CampaignExtensionSetting: "
f"'{response.results[0].resource_name}'."
)
# [END add_sitelinks_1]
# [START add_sitelinks]
def _create_extension_feed_items(client, customer_id, campaign_resource_name):
"""Helper method that creates extension feed items.
Args:
client: a GoogleAdsClient instance.
customer_id: a str Google Ads customer ID, that the extension feed items
will be created for.
campaign_resource_name: a str resource name for the campaign that will
be tracked by the created extension feed items.
Returns:
A list containing resource names for the created extension feed items.
"""
extension_feed_item_service = client.get_service("ExtensionFeedItemService")
geo_target_constant_service = client.get_service("GeoTargetConstantService")
extension_type_enum = client.enums.ExtensionTypeEnum
feed_item_target_device_enum = client.enums.FeedItemTargetDeviceEnum
day_of_week_enum = client.enums.DayOfWeekEnum
minute_of_hour_enum = client.enums.MinuteOfHourEnum
extension_feed_item_operation1 = client.get_type(
"ExtensionFeedItemOperation"
)
extension_feed_item1 = extension_feed_item_operation1.create
extension_feed_item1.extension_type = extension_type_enum.SITELINK
extension_feed_item1.sitelink_feed_item.link_text = "Store Hours"
extension_feed_item1.targeted_campaign = campaign_resource_name
extension_feed_item1.sitelink_feed_item.final_urls.append(
"http://www.example.com/storehours"
)
extension_feed_item_operation2 = client.get_type(
"ExtensionFeedItemOperation"
)
date_range = _get_thanksgiving_string_date_range()
extension_feed_item2 = extension_feed_item_operation2.create
extension_feed_item2.extension_type = extension_type_enum.SITELINK
extension_feed_item2.sitelink_feed_item.link_text = "Thanksgiving Specials"
extension_feed_item2.targeted_campaign = campaign_resource_name
extension_feed_item2.start_date_time = date_range.start_datetime
extension_feed_item2.end_date_time = date_range.end_datetime
# Targets this sitelink for the United States only.
# A list of country codes can be referenced here:
# https://developers.google.com/google-ads/api/reference/data/geotargets
united_states = geo_target_constant_service.geo_target_constant_path(2048)
extension_feed_item2.targeted_geo_target_constant = united_states
extension_feed_item2.sitelink_feed_item.final_urls.append(
"http://www.example.com/thanksgiving"
)
extension_feed_item_operation3 = client.get_type(
"ExtensionFeedItemOperation"
)
extension_feed_item3 = extension_feed_item_operation3.create
extension_feed_item3.extension_type = extension_type_enum.SITELINK
extension_feed_item3.sitelink_feed_item.link_text = "Wifi available"
extension_feed_item3.targeted_campaign = campaign_resource_name
extension_feed_item3.device = feed_item_target_device_enum.MOBILE
extension_feed_item3.sitelink_feed_item.final_urls.append(
"http://www.example.com/mobile/wifi"
)
extension_feed_item_operation4 = client.get_type(
"ExtensionFeedItemOperation"
)
extension_feed_item4 = extension_feed_item_operation4.create
extension_feed_item4.extension_type = extension_type_enum.SITELINK
extension_feed_item4.sitelink_feed_item.link_text = "Happy hours"
extension_feed_item4.targeted_campaign = campaign_resource_name
extension_feed_item4.device = feed_item_target_device_enum.MOBILE
extension_feed_item4.sitelink_feed_item.final_urls.append(
"http://www.example.com/happyhours"
)
for day_of_week in [
day_of_week_enum.MONDAY,
day_of_week_enum.TUESDAY,
day_of_week_enum.WEDNESDAY,
day_of_week_enum.THURSDAY,
day_of_week_enum.FRIDAY,
]:
ad_schedule = client.get_type("AdScheduleInfo")
_populate_ad_schedule(
ad_schedule,
day_of_week,
18,
minute_of_hour_enum.ZERO,
21,
minute_of_hour_enum.ZERO,
)
extension_feed_item4.ad_schedules.append(ad_schedule)
# Add extension feed items
feed_response = extension_feed_item_service.mutate_extension_feed_items(
customer_id=customer_id,
operations=[
extension_feed_item_operation1,
extension_feed_item_operation2,
extension_feed_item_operation3,
extension_feed_item_operation4,
],
)
print("Created ExtensionFeedItems:")
for feed_item in feed_response.results:
print(f"\tResource name: {feed_item.resource_name}")
return [result.resource_name for result in feed_response.results]
# [END add_sitelinks]
def _get_thanksgiving_string_date_range():
"""Retrieves a _DateRange with formatted datetime start/end strings."""
now = datetime.datetime.now()
start_dt = datetime.datetime(now.year, 11, 20, 0, 0, 0)
if start_dt < now:
# Move start_dt to next year if the current date is past November 20th.
start_dt = start_dt + datetime.timedelta(days=365)
end_dt = datetime.datetime(start_dt.year, 11, 27, 23, 59, 59)
return _DateRange(
start_dt.strftime(_date_format), end_dt.strftime(_date_format)
)
def _populate_ad_schedule(
ad_schedule, day_of_week, start_hour, start_minute, end_hour, end_minute
):
"""Helper method to populate a given AdScheduleInfo instance."""
ad_schedule.day_of_week = day_of_week
ad_schedule.start_hour = start_hour
ad_schedule.start_minute = start_minute
ad_schedule.end_hour = end_hour
ad_schedule.end_minute = end_minute
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v10")
parser = argparse.ArgumentParser(
description="Adds sitelinks to the specified campaign."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID",
)
parser.add_argument(
"-i",
"--campaign_id",
type=str,
required=True,
help="The campaign ID sitelinks will be added to.",
)
args = parser.parse_args()
try:
main(googleads_client, args.customer_id, args.campaign_id)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print("\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| googleads/google-ads-python | examples/extensions/add_sitelinks.py | Python | apache-2.0 | 9,516 |
try:
frozenset
except NameError:
#Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
class MethodDispatcher(dict):
"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name,value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
def __getitem__(self, key):
return dict.get(self, key, self.default)
#Pure python implementation of deque taken from the ASPN Python Cookbook
#Original code by Raymond Hettinger
class deque(object):
def __init__(self, iterable=(), maxsize=-1):
if not hasattr(self, 'data'):
self.left = self.right = 0
self.data = {}
self.maxsize = maxsize
self.extend(iterable)
def append(self, x):
self.data[self.right] = x
self.right += 1
if self.maxsize != -1 and len(self) > self.maxsize:
self.popleft()
def appendleft(self, x):
self.left -= 1
self.data[self.left] = x
if self.maxsize != -1 and len(self) > self.maxsize:
self.pop()
def pop(self):
if self.left == self.right:
raise IndexError('cannot pop from empty deque')
self.right -= 1
elem = self.data[self.right]
del self.data[self.right]
return elem
def popleft(self):
if self.left == self.right:
raise IndexError('cannot pop from empty deque')
elem = self.data[self.left]
del self.data[self.left]
self.left += 1
return elem
def clear(self):
self.data.clear()
self.left = self.right = 0
def extend(self, iterable):
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
for elem in iterable:
self.appendleft(elem)
def rotate(self, n=1):
if self:
n %= len(self)
for i in xrange(n):
self.appendleft(self.pop())
def __getitem__(self, i):
if i < 0:
i += len(self)
try:
return self.data[i + self.left]
except KeyError:
raise IndexError
def __setitem__(self, i, value):
if i < 0:
i += len(self)
try:
self.data[i + self.left] = value
except KeyError:
raise IndexError
def __delitem__(self, i):
size = len(self)
if not (-size <= i < size):
raise IndexError
data = self.data
if i < 0:
i += size
for j in xrange(self.left+i, self.right-1):
data[j] = data[j+1]
self.pop()
def __len__(self):
return self.right - self.left
def __cmp__(self, other):
if type(self) != type(other):
return cmp(type(self), type(other))
return cmp(list(self), list(other))
def __repr__(self, _track=[]):
if id(self) in _track:
return '...'
_track.append(id(self))
r = 'deque(%r)' % (list(self),)
_track.remove(id(self))
return r
def __getstate__(self):
return (tuple(self),)
def __setstate__(self, s):
self.__init__(s[0])
def __hash__(self):
raise TypeError
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo={}):
from copy import deepcopy
result = self.__class__()
memo[id(self)] = result
result.__init__(deepcopy(tuple(self), memo))
return result
#Some utility functions to dal with weirdness around UCS2 vs UCS4
#python builds
def encodingType():
if len() == 2:
return "UCS2"
else:
return "UCS4"
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
| wangtaoking1/found_website | 项目代码/html5lib/utils.py | Python | gpl-2.0 | 4,959 |
# ------------------------------------------------------------------------------
#
# Copyright 2011, 2012, 2013 Brent L. Brock and the Craighead Institute
#
# This file is part of Wild Planner.
#
# Wild Planner is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wild Planner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Wild Planner in the file named LICENSE.TXT. If not, see <http://www.gnu.org/licenses/>.
#
# ------------------------------------------------------------------------------
# connected_export.py
# Created on: 2012-11-07 15:52:12.00000
# (generated by ArcGIS/ModelBuilder)
# Description:
# ---------------------------------------------------------------------------
# Set the necessary product code
# import arcinfo
# Import arcpy module
import arcpy
# Local variables:
Bobcat_Hab_merge = "Bobcat_Hab_merge"
bobcat_scen1_lnk_100_img = "bobcat_scen1_lnk_100.img"
RasterT_badg_li1_shp = "C:\\WORKSPACE\\RasterT_bobcat_2.shp"
Output_Feature_Class = "C:\\WORKSPACE\\RasterT_bobcat_2_Erase.shp"
Output_Feature_Class__3_ = "C:\\WORKSPACE\\RasterT_bobcat_2_Erase_Multi.shp"
Output_Layer = "RasterT_bobcat_2_Erase_Multi"
Output_Layer__2_ = "Bobcat_Hab_merge_Layer"
Bobcat_Hab_merge__2_ = "Bobcat_Hab_merge_Layer"
RasterT_badg_li1_Erase_Layer = "RasterT_bobcat_2_Erase_Multi"
Bobcat_Hab_merge__3_ = "Bobcat_Hab_merge_Layer"
RasterT_badg_li1_Erase_Layer__2_ = "RasterT_bobcat_2_Erase_Multi"
RasterT_bobcat_2_Erase_Multi = "RasterT_bobcat_2_Erase_Multi"
Output_Feature_Class__2_ = "C:\\WORKSPACE\\RasterT_bobcat_2_Erase_Multi1.shp"
# Process: Raster to Polygon
arcpy.RasterToPolygon_conversion(bobcat_scen1_lnk_100_img, RasterT_badg_li1_shp, "NO_SIMPLIFY", "VALUE")
# Process: Erase
arcpy.Erase_analysis(RasterT_badg_li1_shp, Bobcat_Hab_merge, Output_Feature_Class, "")
# Process: Multipart To Singlepart
arcpy.MultipartToSinglepart_management(Output_Feature_Class, Output_Feature_Class__3_)
# Process: Make Feature Layer
arcpy.MakeFeatureLayer_management(Output_Feature_Class__3_, Output_Layer, "", "", "Shape Shape VISIBLE NONE;FID FID VISIBLE NONE;ID ID VISIBLE NONE;GRIDCODE GRIDCODE VISIBLE NONE;ORIG_FID ORIG_FID VISIBLE NONE")
# Process: Make Feature Layer (2)
arcpy.MakeFeatureLayer_management(Bobcat_Hab_merge, Output_Layer__2_, "", "", "FID FID VISIBLE NONE;Shape Shape VISIBLE NONE;ID ID VISIBLE NONE;GRIDCODE GRIDCODE VISIBLE NONE;area area VISIBLE NONE;sizeclass sizeclass VISIBLE NONE;sqkm sqkm VISIBLE NONE")
# Process: Select Layer By Attribute
arcpy.SelectLayerByAttribute_management(Output_Layer__2_, "NEW_SELECTION", "\"GRIDCODE\" = 1")
# Process: Select Layer By Location
arcpy.SelectLayerByLocation_management(Output_Layer, "BOUNDARY_TOUCHES", Bobcat_Hab_merge__2_, "", "NEW_SELECTION")
# Process: Select Layer By Attribute (2)
arcpy.SelectLayerByAttribute_management(Output_Layer__2_, "NEW_SELECTION", "\"GRIDCODE\" = 2")
# Process: Select Layer By Location (2)
arcpy.SelectLayerByLocation_management(RasterT_badg_li1_Erase_Layer, "BOUNDARY_TOUCHES", Bobcat_Hab_merge__3_, "", "SUBSET_SELECTION")
# Process: Select Layer By Attribute (3)
arcpy.SelectLayerByAttribute_management(RasterT_badg_li1_Erase_Layer__2_, "SUBSET_SELECTION", "GRIDCODE = 1")
# Process: Copy Features
arcpy.CopyFeatures_management(RasterT_bobcat_2_Erase_Multi, Output_Feature_Class__2_, "", "0", "0", "0")
| blbrock/WildPlanner-10-Archive- | Scripts/connected_export.py | Python | lgpl-3.0 | 3,799 |
class Solution(object):
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
length = len(nums)
nums.sort()
loss = target - (nums[0] + nums[1] + nums[2])
for i in range(length-2):
left = i + 1
right = length - 1
while left < right:
new_loss = target - nums[i] - nums[left] - nums[right]
if new_loss > 0:
left += 1
elif new_loss < 0:
right -= 1
else:
return target
if abs(new_loss) < abs(loss):
loss = new_loss
return target - loss
| xiezhq-hermann/LeetCode-in-Python | src/three_sum_closest_16.py | Python | mit | 760 |
import tensorflow as tf
from numpy import sqrt
def fully_connected_layer(inputs, input_dim, output_dim, nonlinearity=tf.nn.relu):
weights = tf.Variable(initial_value=tf.truncated_normal(shape=[input_dim, output_dim],
stddev=sqrt(2) / (input_dim +
output_dim) ** 0.5),
name='weights')
biases = tf.Variable(initial_value=tf.zeros([output_dim]),
name='biases')
outputs = nonlinearity(tf.matmul(inputs, weights) + biases)
return outputs
def dropout_layer(inputs, keep_prob):
outputs = tf.nn.dropout(inputs, keep_prob=keep_prob, name='dropout')
return outputs
def convolutional_layer(input_tensor, kernel_height, kernel_width, num_channels, num_filters,
padding='SAME'):
# For setting weight initial variance.
shape = input_tensor.get_shape().as_list()
fan_in = shape[1] * shape[2] * shape[3]
# Input has size batch x rows x cols x in_channels
# The kernel of size rows x cols x in_channels x out_channels
kernel = tf.Variable(initial_value=tf.truncated_normal(shape=[kernel_height,
kernel_width,
num_channels,
num_filters],
stddev=1 / fan_in ** 0.5),
name='kernel')
# Convolution output has variable size depending on padding.
# With zero_padding it will have the same shape as the input tensor.
conv = tf.nn.conv2d(input=input_tensor,
filter=kernel,
strides=[1, 1, 1, 1],
padding=padding)
biases = tf.Variable(initial_value=tf.zeros([num_filters]),
name='biases')
outputs = tf.nn.bias_add(conv, biases)
return outputs
def max_pooling_layer(input_tensor):
"""
We only really need to define a 2x2 pooling filter, as larger down-sampling
can be done just stringing these together.
Note we only do this channel by channel. i.e ksize[-1] == 1
"""
pool = tf.nn.max_pool(input_tensor,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name='pooling')
return pool
def batch_normalization_layer(input_tensor, axes=(0, 1, 2)):
shape = input_tensor.get_shape().as_list()
depth = shape[-1]
mean, variance = tf.nn.moments(input_tensor,
axes=axes,
shift=None,
keep_dims=False,
name='moments')
offset = tf.Variable(initial_value=tf.zeros([depth]),
name='offset')
scale = tf.Variable(initial_value=tf.ones([depth]),
name='scale')
outputs = tf.nn.batch_normalization(input_tensor,
mean=mean,
variance=variance,
offset=offset,
scale=scale,
variance_epsilon=1e-5,
name='batch_norm')
return outputs
def fully_connected_layer_bn(inputs, input_dim, output_dim, nonlinearity=tf.nn.relu):
weights = tf.Variable(initial_value=tf.truncated_normal(shape=[input_dim, output_dim],
stddev=sqrt(2) / (input_dim +
output_dim) ** 0.5),
name='weights')
biases = tf.Variable(initial_value=tf.zeros([output_dim]),
name='biases')
intermediate = tf.matmul(inputs, weights) + biases
normed = batch_normalization_layer(intermediate, axes=[0])
outputs = nonlinearity(normed)
return outputs
def convolution_1d(input_tensor, kernel_shape, padding='SAME', bias_flg=False):
# For setting weight initial variance.
input_shape = input_tensor.get_shape().as_list()
in_channels = input_shape[2]
stddev = sqrt(2 / (kernel_shape[1] ** 2 * in_channels))
# Input has size batch x samples x in_channels
# The kernel of size width x in_channels x out_channels
kernel = tf.Variable(initial_value=tf.truncated_normal(shape=kernel_shape,
stddev=stddev),
name='kernel')
# Convolution output has variable size depending on padding.
# With zero_padding it will have the same shape as the input tensor.
conv = tf.nn.conv1d(value=input_tensor,
filters=kernel,
stride=1,
padding=padding)
if bias_flg:
pass
# biases = tf.Variable(initial_value=tf.zeros([num_filters])),
# name='biases')
# outputs = tf.nn.bias_add(conv, biases)
# else:
# outputs = conv
outputs = conv
return outputs
| PaddyT/waveform-asr | waveasr/models/layers.py | Python | mit | 5,397 |
'''
Autor: Gurkirt Singh
Start data: 15th May 2016
purpose: of this file is read frame level predictions and process them to produce a label per video
'''
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import pickle
import os,h5py
import time,json
#import pylab as plt
#######baseDir = "/mnt/sun-alpha/actnet/";
baseDir = "/data/shared/solar-machines/actnet/";
########imgDir = "/mnt/sun-alpha/actnet/rgb-images/";
######## imgDir = "/mnt/DATADISK2/ss-workspace/actnet/rgb-images/";
annotPklFile = "../Evaluation/data/actNet200-V1-3.pkl"
def power_normalize(xx, alpha=0.5):
"""Computes a alpha-power normalization for the matrix xx."""
return np.sign(xx) * np.abs(xx) ** alpha
def readannos():
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
return actionIDs,taxonomy,database
def getnames():
fname = baseDir+'data/lists/gtnames.list'
with open(fname,'rb') as f:
lines = f.readlines()
names = []
for name in lines:
name = name.rstrip('\n')
names.append(name)
# print names
return names
def getpredications(subset,imgtype,weight,vidname):
predictionfile = '{}predictions/{}-{}-{}/{}.list'.format(baseDir,subset,imgtype,str(weight).zfill(5),vidname)
with open(predictionfile) as f:
lines = f.readlines()
preds = np.zeros((201,len(lines)),dtype = 'float32')
labels = np.zeros(len(lines))
lcount = 0;
for line in lines:
splitedline = line.split(' ');
labels[lcount] = int(splitedline[0])
wcount = 0;
# print 'line length ', len(splitedline)
# print splitedline
for word in splitedline[1:-1]:
# print word,
preds[wcount,lcount] = float(word)
wcount+=1
lcount +=1
return labels,preds
def gettopklabel(preds,k,classtopk):
scores = np.zeros(200)
topk = min(classtopk,np.shape(preds)[1]);
for i in range(200):
values = preds[i,:];
values = np.sort(values);
values = values[::-1]
scores[i] = np.mean(values[:topk])
# print scores
sortedlabel = np.argsort(scores)[::-1]
# print sortedlabel
sortedscores = scores[sortedlabel]
# print sortedlabel[:k],sortedscores[:k]
return sortedlabel[:k],sortedscores[:k]
def readpkl(filename):
with open(filename) as f:
data = pickle.load(f)
return data
def getdataVal(database,indexs,gtlabels,subset,featType):
if featType == 'MBH':
filename = baseDir+'data/MBH_Videos_features.hdf5';
x = np.zeros((18000,65536))
else:
filename = baseDir+'data/ImageNetShuffle2016_features.hdf5';
x = np.zeros((18000,1024))
file = h5py.File(filename,'r')
features = file['features']
#print np.shape(features)
count = 0;
y = np.zeros(18000)
#features = power_normalize(features)
for videoId in database.keys():
videoInfo = database[videoId]
if not videoInfo['subset'] == 'testing':
vkey = 'v_'+videoId;
ind = indexs[vkey]
label = gtlabels[videoId]
#feat = features[ind,:]
x[count,:] = features[ind,:];
y[count] = label
count+=1
file.close()
return x[:count],y[:count]
def processMBHval():
for featType in ['MBH']:
names = getnames()
gtlabels = readpkl('{}data/labels.pkl'.format(baseDir))
indexs = readpkl('{}data/indexs.pkl'.format(baseDir))
actionIDs,taxonomy,database = readannos()
print 'getting training data.... ',
xtrain,ytrain = getdataVal(database,indexs,gtlabels,'training',featType)
print 'got it!! and shape is ',np.shape(xtrain)
#print 'getting validation data.... ',
#xval,yval = getdata(database,indexs,gtlabels,'validation',featType)
#print 'got it!! and shape is ',np.shape(xval)
if featType == 'IMS':
jobs = 16
c = 0.01;
else:
jobs = 16
c = 10;
clf = LinearSVC(C = c)
clf = clf.fit(xtrain, ytrain)
saveName = '{}data/train-valSVM-{}.pkl'.format(baseDir,featType)
with open(saveName,'w') as f:
pickle.dump(clf,f)
if __name__=="__main__":
#processPredictions()
processMBHval()
| gurkirt/actNet-inAct | processing/saveCLFs.py | Python | mit | 4,517 |
import colorsys
import sys
import xml.etree.cElementTree as ET
# from io import BytesIO
from gi.repository import Gtk, Gdk, GObject, Pango
from gi.repository.GdkPixbuf import Pixbuf
from pychess.System import conf
from pychess.System.Log import log
from pychess.System.prefix import addDataPrefix
def createCombo(combo, data=[], name=None, ellipsize_mode=None):
if name is not None:
combo.set_name(name)
lst_store = Gtk.ListStore(Pixbuf, str)
for row in data:
lst_store.append(row)
combo.clear()
combo.set_model(lst_store)
crp = Gtk.CellRendererPixbuf()
crp.set_property('xalign', 0)
crp.set_property('xpad', 2)
combo.pack_start(crp, False)
combo.add_attribute(crp, 'pixbuf', 0)
crt = Gtk.CellRendererText()
crt.set_property('xalign', 0)
crt.set_property('xpad', 4)
combo.pack_start(crt, True)
combo.add_attribute(crt, 'text', 1)
if ellipsize_mode is not None:
crt.set_property('ellipsize', ellipsize_mode)
def updateCombo(combo, data):
def get_active(combobox):
model = combobox.get_model()
active = combobox.get_active()
if active < 0:
return None
return model[active][1]
last_active = get_active(combo)
lst_store = combo.get_model()
lst_store.clear()
new_active = 0
for i, row in enumerate(data):
lst_store.append(row)
if last_active == row[1]:
new_active = i
combo.set_active(new_active)
def genColor(n, startpoint=0):
assert n >= 1
# This splits the 0 - 1 segment in the pizza way
hue = (2 * n - 1) / (2.**(n - 1).bit_length()) - 1
hue = (hue + startpoint) % 1
# We set saturation based on the amount of green, scaled to the interval
# [0.6..0.8]. This ensures a consistent lightness over all colors.
rgb = colorsys.hsv_to_rgb(hue, 1, 1)
rgb = colorsys.hsv_to_rgb(hue, 1, (1 - rgb[1]) * 0.2 + 0.6)
# This algorithm ought to balance colors more precisely, but it overrates
# the lightness of yellow, and nearly makes it black
# yiq = colorsys.rgb_to_yiq(*rgb)
# rgb = colorsys.yiq_to_rgb(.125, yiq[1], yiq[2])
return rgb
def keepDown(scrolledWindow):
def changed(vadjust):
if not hasattr(vadjust, "need_scroll") or vadjust.need_scroll:
vadjust.set_value(vadjust.get_upper() - vadjust.get_page_size())
vadjust.need_scroll = True
scrolledWindow.get_vadjustment().connect("changed", changed)
def value_changed(vadjust):
vadjust.need_scroll = abs(vadjust.get_value() + vadjust.get_page_size() -
vadjust.get_upper()) < vadjust.get_step_increment()
scrolledWindow.get_vadjustment().connect("value-changed", value_changed)
# wrap analysis text column. thanks to
# http://www.islascruz.org/html/index.php?blog/show/Wrap-text-in-a-TreeView-column.html
def appendAutowrapColumn(treeview, name, **kvargs):
cell = Gtk.CellRendererText()
# cell.props.wrap_mode = Pango.WrapMode.WORD
# TODO:
# changed to ellipsize instead until "never ending grow" bug gets fixed
# see https://github.com/pychess/pychess/issues/1054
cell.props.ellipsize = Pango.EllipsizeMode.END
column = Gtk.TreeViewColumn(name, cell, **kvargs)
treeview.append_column(column)
def callback(treeview, allocation, column, cell):
otherColumns = [c for c in treeview.get_columns() if c != column]
newWidth = allocation.width - sum(c.get_width() for c in otherColumns)
hsep = GObject.Value()
hsep.init(GObject.TYPE_INT)
hsep.set_int(0)
treeview.style_get_property("horizontal-separator", hsep)
newWidth -= hsep.get_int() * (len(otherColumns) + 1) * 2
if cell.props.wrap_width == newWidth or newWidth <= 0:
return
cell.props.wrap_width = newWidth
store = treeview.get_model()
store_iter = store.get_iter_first()
while store_iter and store.iter_is_valid(store_iter):
store.row_changed(store.get_path(store_iter), store_iter)
store_iter = store.iter_next(store_iter)
treeview.set_size_request(0, -1)
# treeview.connect_after("size-allocate", callback, column, cell)
scroll = treeview.get_parent()
if isinstance(scroll, Gtk.ScrolledWindow):
scroll.set_policy(Gtk.PolicyType.NEVER, scroll.get_policy()[1])
return cell
METHODS = (
# Gtk.SpinButton should be listed prior to Gtk.Entry, as it is a
# subclass, but requires different handling
(Gtk.SpinButton, ("get_value", "set_value", "value-changed")),
(Gtk.Entry, ("get_text", "set_text", "changed")),
(Gtk.Expander, ("get_expanded", "set_expanded", "notify::expanded")),
(Gtk.ComboBox, ("get_active", "set_active", "changed")),
(Gtk.IconView, ("_get_active", "_set_active", "selection-changed")),
(Gtk.ToggleButton, ("get_active", "set_active", "toggled")),
(Gtk.CheckMenuItem, ("get_active", "set_active", "toggled")),
(Gtk.Range, ("get_value", "set_value", "value-changed")),
(Gtk.TreeSortable, ("get_value", "set_value", "sort-column-changed")),
(Gtk.Paned, ("get_position", "set_position", "notify::position")),
)
def keep(widget, key, get_value_=None, set_value_=None): # , first_value=None):
if widget is None:
raise AttributeError("key '%s' isn't in widgets" % key)
for class_, methods_ in METHODS:
# Use try-except just to make spinx happy...
try:
if isinstance(widget, class_):
getter, setter, signal = methods_
break
except TypeError:
getter, setter, signal = methods_
break
else:
raise AttributeError("I don't have any knowledge of type: '%s'" %
widget)
if get_value_:
def get_value():
return get_value_(widget)
else:
get_value = getattr(widget, getter)
if set_value_:
def set_value(v):
return set_value_(widget, v)
else:
set_value = getattr(widget, setter)
def setFromConf():
try:
v = conf.get(key)
except TypeError:
log.warning("uistuff.keep.setFromConf: Key '%s' from conf had the wrong type '%s', ignored" %
(key, type(conf.get(key))))
# print("uistuff.keep TypeError %s %s" % (key, conf.get(key)))
else:
set_value(v)
def callback(*args):
if not conf.hasKey(key) or conf.get(key) != get_value():
conf.set(key, get_value())
widget.connect(signal, callback)
conf.notify_add(key, lambda *args: setFromConf())
if conf.hasKey(key):
setFromConf()
elif conf.get(key) is not None:
conf.set(key, conf.get(key))
# loadDialogWidget() and saveDialogWidget() are similar to uistuff.keep() but are needed
# for saving widget values for Gtk.Dialog instances that are loaded with different
# sets of values/configurations and which also aren't instant save like in
# uistuff.keep(), but rather are saved later if and when the user clicks
# the dialog's OK button
def loadDialogWidget(widget,
widget_name,
config_number,
get_value_=None,
set_value_=None,
first_value=None):
key = widget_name + "-" + str(config_number)
if widget is None:
raise AttributeError("key '%s' isn't in widgets" % widget_name)
for class_, methods_ in METHODS:
if isinstance(widget, class_):
getter, setter, signal = methods_
break
else:
if set_value_ is None:
raise AttributeError("I don't have any knowledge of type: '%s'" %
widget)
if get_value_:
def get_value():
return get_value_(widget)
else:
get_value = getattr(widget, getter)
if set_value_:
def set_value(v):
return set_value_(widget, v)
else:
set_value = getattr(widget, setter)
if conf.hasKey(key):
try:
v = conf.get(key)
except TypeError:
log.warning("uistuff.loadDialogWidget: Key '%s' from conf had the wrong type '%s', ignored" %
(key, type(conf.get(key))))
if first_value is not None:
conf.set(key, first_value)
else:
conf.set(key, get_value())
else:
set_value(v)
elif first_value is not None:
conf.set(key, first_value)
set_value(conf.get(key))
else:
log.warning("Didn't load widget \"%s\": no conf value and no first_value arg" % widget_name)
def saveDialogWidget(widget, widget_name, config_number, get_value_=None):
key = widget_name + "-" + str(config_number)
if widget is None:
raise AttributeError("key '%s' isn't in widgets" % widget_name)
for class_, methods_ in METHODS:
if isinstance(widget, class_):
getter, setter, signal = methods_
break
else:
if get_value_ is None:
raise AttributeError("I don't have any knowledge of type: '%s'" %
widget)
if get_value_:
def get_value():
return get_value_(widget)
else:
get_value = getattr(widget, getter)
if not conf.hasKey(key) or conf.get(key) != get_value():
conf.set(key, get_value())
POSITION_NONE, POSITION_CENTER, POSITION_GOLDEN = range(3)
def keepWindowSize(key,
window,
defaultSize=None,
defaultPosition=POSITION_NONE):
""" You should call keepWindowSize before show on your windows """
key = key + "window"
def savePosition(window, *event):
log.debug("keepWindowSize.savePosition: %s" % window.get_title())
width = window.get_allocation().width
height = window.get_allocation().height
x_loc, y_loc = window.get_position()
if width <= 0:
log.error("Setting width = '%d' for %s to conf" % (width, key))
if height <= 0:
log.error("Setting height = '%d' for %s to conf" % (height, key))
log.debug("Saving window position width=%s height=%s x=%s y=%s" %
(width, height, x_loc, y_loc))
conf.set(key + "_width", width)
conf.set(key + "_height", height)
conf.set(key + "_x", x_loc)
conf.set(key + "_y", y_loc)
return False
window.connect("delete-event", savePosition, "delete-event")
def loadPosition(window):
# log.debug("keepWindowSize.loadPosition: %s" % window.title)
# Just to make sphinx happy...
try:
width, height = window.get_size_request()
except TypeError:
pass
if conf.hasKey(key + "_width") and conf.hasKey(key + "_height"):
width = conf.get(key + "_width")
height = conf.get(key + "_height")
log.debug("Resizing window to width=%s height=%s" %
(width, height))
window.resize(width, height)
elif defaultSize:
width, height = defaultSize
log.debug("Resizing window to width=%s height=%s" %
(width, height))
window.resize(width, height)
elif key == "mainwindow":
monitor_x, monitor_y, monitor_width, monitor_height = getMonitorBounds()
width = int(monitor_width / 2)
height = int(monitor_height / 4) * 3
log.debug("Resizing window to width=%s height=%s" %
(width, height))
window.resize(width, height)
elif key == "preferencesdialogwindow":
monitor_x, monitor_y, monitor_width, monitor_height = getMonitorBounds()
width = int(monitor_width / 2)
height = int(monitor_height / 4) * 3
window.resize(1, 1)
else:
monitor_x, monitor_y, monitor_width, monitor_height = getMonitorBounds()
width = int(monitor_width / 2)
height = int(monitor_height / 4) * 3
if conf.hasKey(key + "_x") and conf.hasKey(key + "_y"):
x = max(0, conf.get(key + "_x"))
y = max(0, conf.get(key + "_y"))
log.debug("Moving window to x=%s y=%s" % (x, y))
window.move(x, y)
elif defaultPosition in (POSITION_CENTER, POSITION_GOLDEN):
monitor_x, monitor_y, monitor_width, monitor_height = getMonitorBounds()
x_loc = int(monitor_width / 2 - width / 2) + monitor_x
if defaultPosition == POSITION_CENTER:
y_loc = int(monitor_height / 2 - height / 2) + monitor_y
else:
# Place the window on the upper golden ratio line
y_loc = int(monitor_height / 2.618 - height / 2) + monitor_y
log.debug("Moving window to x=%s y=%s" % (x_loc, y_loc))
window.move(x_loc, y_loc)
loadPosition(window)
# In rare cases, gtk throws some gtk_size_allocation error, which is
# probably a race condition. To avoid the window forgets its size in
# these cases, we add this extra hook
def callback(window):
loadPosition(window)
onceWhenReady(window, callback)
# Some properties can only be set, once the window is sufficiently initialized,
# This function lets you queue your request until that has happened.
def onceWhenReady(window, func, *args, **kwargs):
def cb(window, alloc, func, *args, **kwargs):
func(window, *args, **kwargs)
window.disconnect(handler_id)
handler_id = window.connect_after("size-allocate", cb, func, *args, **
kwargs)
def getMonitorBounds():
screen = Gdk.Screen.get_default()
root_window = screen.get_root_window()
# Just to make sphinx happy...
try:
ptr_window, mouse_x, mouse_y, mouse_mods = root_window.get_pointer()
current_monitor_number = screen.get_monitor_at_point(mouse_x, mouse_y)
monitor_geometry = screen.get_monitor_geometry(current_monitor_number)
return monitor_geometry.x, monitor_geometry.y, monitor_geometry.width, monitor_geometry.height
except TypeError:
return (0, 0, 0, 0)
def makeYellow(box):
def on_box_expose_event(box, context):
# box.style.paint_flat_box (box.window,
# Gtk.StateType.NORMAL, Gtk.ShadowType.NONE, None, box, "tooltip",
# box.allocation.x, box.allocation.y,
# box.allocation.width, box.allocation.height)
pass
def cb(box):
tooltip = Gtk.Window(Gtk.WindowType.POPUP)
tooltip.set_name('gtk-tooltip')
tooltip.ensure_style()
tooltipStyle = tooltip.get_style()
box.set_style(tooltipStyle)
box.connect("draw", on_box_expose_event)
onceWhenReady(box, cb)
class GladeWidgets:
""" A simple class that wraps a the glade get_widget function
into the python __getitem__ version """
def __init__(self, filename):
# TODO: remove this when upstream fixes translations with Python3+Windows
if sys.platform == "win32" and not conf.no_gettext:
tree = ET.parse(addDataPrefix("glade/%s" % filename))
for node in tree.iter():
if 'translatable' in node.attrib:
node.text = _(node.text)
del node.attrib['translatable']
if node.get('name') in ('pixbuf', 'logo'):
node.text = addDataPrefix("glade/%s" % node.text)
xml_text = ET.tostring(tree.getroot(), encoding='unicode', method='xml')
self.builder = Gtk.Builder.new_from_string(xml_text, -1)
else:
self.builder = Gtk.Builder()
if not conf.no_gettext:
self.builder.set_translation_domain("pychess")
self.builder.add_from_file(addDataPrefix("glade/%s" % filename))
def __getitem__(self, key):
return self.builder.get_object(key)
def getGlade(self):
return self.builder
| pychess/pychess | lib/pychess/System/uistuff.py | Python | gpl-3.0 | 16,160 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient la configuration par défaut du module 'joueur'."""
cfg_joueur = r"""
# Ce fichier contient la configuration du module primaire joueur.
# Il contient diverses options en rapport avec la création d'un personnage.
## Taille du nom
# Cette variable correspond à la taille minimale d'un nom de personnage :
taille_min = 3
# Taille maximale d'un nom :
taille_max = 15
## Groupe par défaut
# Quand un joueur se crée, dans quel groupe doit-il être placé ?
# Rappel : les groupes déterminent les droits des joueurs à utiliser
# certaines commandes, ainsi que certains flags.
# Par défaut, trois groupes existent : "pnj", "joueur" et "administrateur"
# Les joueurs sont placés par défaut dans le groupe "joueur".
groupe_par_defaut = "joueur"
## Configuration du joueur système
# Le joueur système est un joueur créé par le système qui peut être amené
# à effectuer des tâches d'administration automatisées.
# On ne doit pas pouvoir se logger sur ce joueur, mais il peut servir
# à envoyer de façon automatisée des messages.
# Par exemple, quand une erreur survient lors de l'interprétation du scripting,
# c'est le joueur système qui envoie le message au bâtisseur pour l'en avertir.
# La variable ci-dessous configure le nom du compte système :
compte_systeme = "systeme"
# Nom du joueur système :
joueur_systeme = "système"
## Choix des contextes de création d'un joueur
# Quand un client veut créer un joueur, il passe par plusieurs
# étapes (appelées contextes) qui lui permettent de sélectionner
# différentes informations sur le joueur (son nom, race, genre,
# etc). Vous pouvez changer l'ordre des contextes de création dans
# cette configuration en éditant la liste suivante. Précisez le nom
# des contextes tels qu'indiqués ci-dessous :
# "choix_genre" : choix du genre (doit venir après "choix_race")
# "choix_race" : choix de la race
# "langue_cmd" : choix de la langue des commandes
# "nouveau_nom" : choix du nom du joueur
# "presenter_tips" : présentation des messages tips à suivre
ordre_creation = ["nouveau_nom", "langue_cmd", "choix_race", "choix_genre"]
"""
| vlegoff/tsunami | src/primaires/joueur/config.py | Python | bsd-3-clause | 3,710 |
# -*- coding: utf-8 -*-
"""
Internal module for formatting output data in csv, html,
and latex files. This module also applies to display formatting.
"""
from __future__ import print_function
# pylint: disable=W0141
from functools import partial
import numpy as np
from pandas._libs import lib
from pandas._libs.tslibs import NaT, iNaT, Timestamp, Timedelta
from pandas._libs.tslib import format_array_from_datetime
from pandas import compat
from pandas.compat import StringIO, lzip, map, zip, u
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_float_dtype,
is_period_arraylike,
is_integer_dtype,
is_interval_dtype,
is_datetimetz,
is_integer,
is_float,
is_scalar,
is_numeric_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
is_list_like)
from pandas.core.dtypes.generic import ABCSparseArray, ABCMultiIndex
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.index import Index, ensure_index
from pandas.core.config import get_option, set_option
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.io.formats.terminal import get_terminal_size
from pandas.io.common import _expand_user, _stringify_path
from pandas.io.formats.printing import adjoin, justify, pprint_thing
common_docstring = """
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
%(header)s.
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
"""
_VALID_JUSTIFY_PARAMETERS = ("left", "right", "center", "justify",
"justify-all", "start", "end", "inherit",
"match-parent", "initial", "unset")
return_docstring = """
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
"""
docstring_to_string = common_docstring + return_docstring
class CategoricalFormatter(object):
def __init__(self, categorical, buf=None, length=True, na_rep='NaN',
footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO(u(""))
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ''
if self.length:
if footer:
footer += ', '
footer += "Length: {length}".format(length=len(self.categorical))
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += '\n'
footer += level_info
return compat.text_type(footer)
def _get_formatted_values(self):
return format_array(self.categorical.get_values(), None,
float_format=None, na_rep=self.na_rep)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return u('')
fmt_values = self._get_formatted_values()
result = [u('{i}').format(i=i) for i in fmt_values]
result = [i.strip() for i in result]
result = u(', ').join(result)
result = [u('[') + result + u(']')]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return compat.text_type(u('\n').join(result))
class SeriesFormatter(object):
def __init__(self, series, buf=None, length=True, header=True, index=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
max_rows=None):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.index = index
self.max_rows = max_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self.adj = _get_adjustment()
self._chk_truncate()
def _chk_truncate(self):
from pandas.core.reshape.concat import concat
max_rows = self.max_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num],
series.iloc[-row_num:]))
self.tr_row_num = row_num
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = u('')
if getattr(self.series.index, 'freq', None) is not None:
footer += 'Freq: {freq}'.format(freq=self.series.index.freqstr)
if self.name is not False and name is not None:
if footer:
footer += ', '
series_name = pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
footer += ((u"Name: {sname}".format(sname=series_name))
if name is not None else "")
if (self.length is True or
(self.length == 'truncate' and self.truncate_v)):
if footer:
footer += ', '
footer += 'Length: {length}'.format(length=len(self.series))
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
footer += u'dtype: {typ}'.format(typ=pprint_thing(name))
# level infos are added to the end and in a new line, like it is done
# for Categoricals
if is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return compat.text_type(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, ABCMultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
values_to_format = self.tr_series._formatting_values()
return format_array(values_to_format, None,
float_format=self.float_format, na_rep=self.na_rep)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return 'Series([], ' + footer + ')'
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = self.adj.len(fmt_values[row_num - 1])
if width > 3:
dot_str = '...'
else:
dot_str = '..'
# Series uses mode=center because it has single value columns
# DataFrame uses mode=left
dot_str = self.adj.justify([dot_str], width, mode='center')[0]
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, '')
if self.index:
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
else:
result = self.adj.adjoin(3, fmt_values)
if self.header and have_header:
result = fmt_index[0] + '\n' + result
if footer:
result += '\n' + footer
return compat.text_type(u('').join(result))
class TextAdjustment(object):
def __init__(self):
self.encoding = get_option("display.encoding")
def len(self, text):
return compat.strlen(text, encoding=self.encoding)
def justify(self, texts, max_len, mode='right'):
return justify(texts, max_len, mode=mode)
def adjoin(self, space, *lists, **kwargs):
return adjoin(space, *lists, strlen=self.len,
justfunc=self.justify, **kwargs)
class EastAsianTextAdjustment(TextAdjustment):
def __init__(self):
super(EastAsianTextAdjustment, self).__init__()
if get_option("display.unicode.ambiguous_as_wide"):
self.ambiguous_width = 2
else:
self.ambiguous_width = 1
def len(self, text):
return compat.east_asian_len(text, encoding=self.encoding,
ambiguous_width=self.ambiguous_width)
def justify(self, texts, max_len, mode='right'):
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == 'left':
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == 'center':
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def _get_adjustment():
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class TableFormatter(object):
is_truncated = False
show_dimensions = None
@property
def should_show_dimensions(self):
return (self.show_dimensions is True or
(self.show_dimensions == 'truncate' and self.is_truncated))
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
return self.formatters[i]
else:
return None
else:
if is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ''
__doc__ += common_docstring + return_docstring
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False, decimal='.',
table_id=None, **kwds):
self.frame = frame
if buf is not None:
self.buf = _expand_user(_stringify_path(buf))
else:
self.buf = StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.decimal = decimal
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions
self.table_id = table_id
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self):
"""
Checks whether the frame should be truncated. If so, slices
the frame up.
"""
from pandas.core.reshape.concat import concat
# Column of which first element is used to determine width of a dot col
self.tr_size_col = -1
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal
# (why else = 0)
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = (self.header + dot_row + show_dimension_rows +
prompt_row)
# rows available to fill with actual data
max_rows_adj = self.h - n_add_rows
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the
# screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = concat((frame.iloc[:, :col_num],
frame.iloc[:, -col_num:]), axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :],
frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
if not is_list_like(self.header) and not self.header:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=(self.col_space or 0),
adj=self.adj)
stringified.append(fmt_values)
else:
if is_list_like(self.header):
if len(self.header) != len(self.columns):
raise ValueError(('Writing {ncols} cols but got {nalias} '
'aliases'
.format(ncols=len(self.columns),
nalias=len(self.header))))
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(frame)
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
header_colwidth = max(self.col_space or 0,
*(self.adj.len(x) for x in cheader))
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=header_colwidth,
adj=self.adj)
max_len = max(max(self.adj.len(x) for x in fmt_values),
header_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
# infer from column header
col_width = self.adj.len(strcols[self.tr_size_col][0])
strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] *
(len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
# infer from above row
cwidth = self.adj.len(strcols[ix][row_num])
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = '...'
else:
my_str = '..'
if ix == 0:
dot_mode = 'left'
elif is_dot_col:
cwidth = self.adj.len(strcols[self.tr_size_col][0])
dot_mode = 'center'
else:
dot_mode = 'right'
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty {name}\nColumns: {col}\nIndex: {idx}')
.format(name=type(self.frame).__name__,
col=pprint_thing(frame.columns),
idx=pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print
# the whole frame
text = self.adj.adjoin(1, *strcols)
elif (not isinstance(self.max_cols, int) or
self.max_cols > 0): # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = self.adj.adjoin(1, *strcols).split('\n')
max_len = Series(text).str.len().max()
headers = [ele[0] for ele in strcols]
# Size of last col determines dot col size. See
# `self._to_str_columns
size_tr_col = len(headers[self.tr_size_col])
max_len += size_tr_col # Need to make space for largest row
# plus truncate dot col
dif = max_len - self.w
# '+ 1' to avoid too wide repr (GH PR #17023)
adj_dif = dif + 1
col_lens = Series([Series(ele).apply(len).max()
for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
# adjoin adds one
adj_dif -= (col_len + 1)
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
# subtract index column
max_cols_adj = n_cols - self.index
# GH-21180. Ensure that we print at least two.
max_cols_adj = max(max_cols_adj, 2)
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write("\n\n[{nrows} rows x {ncols} columns]"
.format(nrows=len(frame), ncols=len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x)
for x in idx]).max() + adjoin_width
col_widths = [np.array([self.adj.len(x) for x in col]).max() if
len(col) > 0 else 0 for col in strcols]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
if self.index:
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([' \\'] + [' '] * (nrows - 1))
else:
row.append([' '] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
def to_latex(self, column_format=None, longtable=False, encoding=None,
multicolumn=False, multicolumn_format=None, multirow=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
from pandas.io.formats.latex import LatexFormatter
latex_renderer = LatexFormatter(self, column_format=column_format,
longtable=longtable,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if encoding is None:
encoding = 'ascii' if compat.PY2 else 'utf-8'
if hasattr(self.buf, 'write'):
latex_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
import codecs
with codecs.open(self.buf, 'w', encoding=encoding) as f:
latex_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
'method')
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
values_to_format = frame.iloc[:, i]._formatting_values()
return format_array(values_to_format, formatter,
float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space, decimal=self.decimal)
def to_html(self, classes=None, notebook=False, border=None):
"""
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
from pandas.io.formats.html import HTMLFormatter
html_renderer = HTMLFormatter(self, classes=classes,
max_rows=self.max_rows,
max_cols=self.max_cols,
notebook=notebook,
border=border,
table_id=self.table_id)
if hasattr(self.buf, 'write'):
html_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
html_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
' method')
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
columns = frame.columns
if isinstance(columns, ABCMultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = lzip(*fmt_columns)
dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any(l.is_floating for l in columns.levels)
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if (y not in self.formatters and
need_leadsp[x] and not restrict_formatting):
return ' ' + y
return y
str_columns = list(zip(*[[space_format(x, y) for y in x]
for x in fmt_columns]))
if self.sparsify and len(str_columns):
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [[' ' + x if not self._get_formatter(i) and
need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns,
fmt_columns))]
if self.show_index_names and self.has_index_names:
for x in str_columns:
x.append('')
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by
# to_html().
index = frame.index
columns = frame.columns
show_index_names = self.show_index_names and self.has_index_names
show_col_names = (self.show_index_names and self.has_column_names)
fmt = self._get_formatter('__index__')
if isinstance(index, ABCMultiIndex):
fmt_index = index.format(sparsify=self.sparsify, adjoin=False,
names=show_index_names, formatter=fmt)
else:
fmt_index = [index.format(name=show_index_names, formatter=fmt)]
fmt_index = [tuple(_make_fixed_width(list(x), justify='left',
minimum=(self.col_space or 0),
adj=self.adj)) for x in fmt_index]
adjoined = self.adj.adjoin(1, *fmt_index).split('\n')
# empty space for columns
if show_col_names:
col_header = ['{x}'.format(x=x)
for x in self._get_column_name_list()]
else:
col_header = [''] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, ABCMultiIndex):
names.extend('' if name is None else name
for name in columns.names)
else:
names.append('' if columns.name is None else columns.name)
return names
# ----------------------------------------------------------------------
# Array formatters
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right', decimal='.'):
if is_categorical_dtype(values):
fmt_klass = CategoricalArrayFormatter
elif is_interval_dtype(values):
fmt_klass = IntervalArrayFormatter
elif is_float_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_period_arraylike(values):
fmt_klass = PeriodArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
elif is_datetimetz(values):
fmt_klass = Datetime64TZFormatter
elif is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format, formatter=formatter,
space=space, justify=justify, decimal=decimal)
return fmt_obj.get_result()
class GenericArrayFormatter(object):
def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
space=12, float_format=None, justify='right', decimal='.',
quoting=None, fixed_width=True):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
self.decimal = decimal
self.quoting = quoting
self.fixed_width = fixed_width
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = ('{{x: .{prec:d}g}}'
.format(prec=get_option("display.precision")))
float_format = lambda x: fmt_str.format(x=x)
else:
float_format = self.float_format
formatter = (
self.formatter if self.formatter is not None else
(lambda x: pprint_thing(x, escape_chars=('\t', '\r', '\n'))))
def _format(x):
if self.na_rep is not None and is_scalar(x) and isna(x):
if x is None:
return 'None'
elif x is NaT:
return 'NaT'
return self.na_rep
elif isinstance(x, PandasObject):
return u'{x}'.format(x=x)
else:
# object dtype
return u'{x}'.format(x=formatter(x))
vals = self.values
if isinstance(vals, Index):
vals = vals._values
elif isinstance(vals, ABCSparseArray):
vals = vals.values
is_float_type = lib.map_infer(vals, is_float) & notna(vals)
leading_space = is_float_type.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
fmt_values.append(u' {v}'.format(v=_format(v)))
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
fmt_values.append(u' {v}'.format(v=_format(v)))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
# float_format is expected to be a string
# formatter should be used to pass a function
if self.float_format is not None and self.formatter is None:
if callable(self.float_format):
self.formatter = self.float_format
self.float_format = None
def _value_formatter(self, float_format=None, threshold=None):
"""Returns a function to be applied on each value to format it
"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
return float_format(value=v) if notna(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != '.':
def decimal_formatter(v):
return base_formatter(v).replace('.', self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# separate the wheat from the chaff
values = self.values
mask = isna(values)
if hasattr(values, 'to_dense'): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype='object')
values[mask] = self.na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array([formatter(val)
for val in values.ravel()[imask]])
if self.fixed_width:
return _trim_zeros(values, self.na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None:
if self.fixed_width:
float_format = partial('{value: .{digits:d}f}'.format,
digits=self.digits)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid='ignore'):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = ((abs_vals < 10**(-self.digits)) &
(abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
float_format = partial('{value: .{digits:d}e}'.format,
digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
def _format_strings(self):
# shortcut
if self.formatter is not None:
return [self.formatter(x) for x in self.values]
return list(self.get_result_as_array())
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: '{x: d}'.format(x=x))
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
super(Datetime64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
""" we by definition have DO NOT have a TZ """
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if self.formatter is not None and callable(self.formatter):
return [self.formatter(x) for x in values]
fmt_values = format_array_from_datetime(
values.asi8.ravel(),
format=_get_format_datetime64_from_values(values,
self.date_format),
na_rep=self.nat_rep).reshape(values.shape)
return fmt_values.tolist()
class IntervalArrayFormatter(GenericArrayFormatter):
def __init__(self, values, *args, **kwargs):
GenericArrayFormatter.__init__(self, values, *args, **kwargs)
def _format_strings(self):
formatter = self.formatter or str
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
class PeriodArrayFormatter(IntArrayFormatter):
def _format_strings(self):
from pandas.core.indexes.period import IncompatibleFrequency
try:
values = PeriodIndex(self.values).to_native_types()
except IncompatibleFrequency:
# periods may contains different freq
values = Index(self.values, dtype='object').to_native_types()
formatter = self.formatter or (lambda x: '{x}'.format(x=x))
fmt_values = [formatter(x) for x in values]
return fmt_values
class CategoricalArrayFormatter(GenericArrayFormatter):
def __init__(self, values, *args, **kwargs):
GenericArrayFormatter.__init__(self, values, *args, **kwargs)
def _format_strings(self):
fmt_values = format_array(self.values.get_values(), self.formatter,
float_format=self.float_format,
na_rep=self.na_rep, digits=self.digits,
space=self.space, justify=self.justify)
return fmt_values
def format_percentiles(percentiles):
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid='ignore'):
if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) \
or not np.all(percentiles <= 1):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = (percentiles.astype(int) == percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + '%' for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(np.log10(np.min(
np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)
))).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + '%' for i in out]
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % int(one_day_nanos) != 0).sum() == 0
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep='NaT'):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
x = Timestamp(x, tz=tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(
x, nat_rep=nat_rep, date_format=date_format)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return date_format
class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self):
""" we by definition have a TZ """
values = self.values.astype(object)
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or
_get_format_datetime64(is_dates_only,
date_format=self.date_format))
fmt_values = [formatter(x) for x in values]
return fmt_values
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
super(Timedelta64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = (self.formatter or
_get_format_timedelta64(self.values, nat_rep=self.nat_rep,
box=self.box))
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(
consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = None
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{res}'".format(res=result)
return result
return _formatter
def _make_fixed_width(strings, justify='right', minimum=None, adj=None):
if len(strings) == 0 or justify == 'all':
return strings
if adj is None:
adj = _get_adjustment()
max_len = max(adj.len(x) for x in strings)
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
def just(x):
if conf_max is not None:
if (conf_max > 3) & (adj.len(x) > max_len):
x = x[:max_len - 3] + '...'
return x
strings = [just(x) for x in strings]
result = adj.justify(strings, max_len, mode=justify)
return result
def _trim_zeros(str_floats, na_rep='NaN'):
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
def _cond(values):
non_na = [x for x in values if x != na_rep]
return (len(non_na) > 0 and all(x.endswith('0') for x in non_na) and
not (any(('e' in x) or ('E' in x) for x in non_na)))
while _cond(trimmed):
trimmed = [x[:-1] if x != na_rep else x for x in trimmed]
# leave one 0 after the decimal points if need be.
return [x + "0" if x.endswith('.') and x != na_rep else x for x in trimmed]
def _has_names(index):
if isinstance(index, ABCMultiIndex):
return com._any_not_none(*index.names)
else:
return index.name is not None
class EngFormatter(object):
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
if decimal.Decimal.is_nan(dnum):
return 'NaN'
if decimal.Decimal.is_infinite(dnum):
return 'inf'
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = 'E-{pow10:02d}'.format(pow10=-int_pow10)
else:
prefix = 'E+{pow10:02d}'.format(pow10=int_pow10)
mant = sign * dnum / (10**pow10)
if self.accuracy is None: # pragma: no cover
format_str = u("{mant: g}{prefix}")
else:
format_str = (u("{{mant: .{acc:d}f}}{{prefix}}")
.format(acc=self.accuracy))
formatted = format_str.format(mant=mant, prefix=prefix)
return formatted # .strip()
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
def get_level_lengths(levels, sentinel=''):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
----------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True] * len(levels[0])
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
def buffer_put_lines(buf, lines):
"""
Appends lines to a buffer.
Parameters
----------
buf
The buffer to write to
lines
The lines to append.
"""
if any(isinstance(x, compat.text_type) for x in lines):
lines = [compat.text_type(x) for x in lines]
buf.write('\n'.join(lines))
| harisbal/pandas | pandas/io/formats/format.py | Python | bsd-3-clause | 54,799 |
from __future__ import absolute_import
import logging
from sentry.api.base import Endpoint
from sentry.api.bases.project import ProjectPermission
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.app import raven
from sentry.models import Group, get_group_with_redirect
logger = logging.getLogger(__name__)
class GroupPermission(ProjectPermission):
scope_map = {
'GET': ['event:read', 'event:write', 'event:delete'],
'POST': ['event:write', 'event:delete'],
'PUT': ['event:write', 'event:delete'],
'DELETE': ['event:delete'],
}
def has_object_permission(self, request, view, group):
return super(GroupPermission, self).has_object_permission(
request, view, group.project)
class GroupEndpoint(Endpoint):
permission_classes = (GroupPermission,)
def convert_args(self, request, issue_id, *args, **kwargs):
# TODO(tkaemming): Ideally, this would return a 302 response, rather
# than just returning the data that is bound to the new group. (It
# technically shouldn't be a 301, since the response could change again
# as the result of another merge operation that occurs later. This
# wouldn't break anything though -- it will just be a "permanent"
# redirect to *another* permanent redirect.) This would require
# rebuilding the URL in one of two ways: either by hacking it in with
# string replacement, or making the endpoint aware of the URL pattern
# that caused it to be dispatched, and reversing it with the correct
# `issue_id` keyword argument.
try:
group, _ = get_group_with_redirect(
issue_id,
queryset=Group.objects.select_related('project'),
)
except Group.DoesNotExist:
raise ResourceDoesNotExist
self.check_object_permissions(request, group)
raven.tags_context({
'project': group.project_id,
'organization': group.project.organization_id,
})
kwargs['group'] = group
return (args, kwargs)
| alexm92/sentry | src/sentry/api/bases/group.py | Python | bsd-3-clause | 2,127 |
#issue in tensortools
from time import sleep, time
import chi
import tensortools as tt
import chi.rl.wrappers
import gym
import numpy as np
import tensorflow as tf
from tensortools import Function
from chi.rl.memory import ReplayMemory
from chi.rl.core import Agent
from chi.rl.memory import ShardedMemory
from chi.rl.wrappers import get_wrapper
from gym import wrappers
from gym.wrappers import Monitor
from tensorflow.contrib import layers
class DQN:
"""
An implementation of
Human Level Control through Deep Reinforcement Learning
http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
and
Deep Reinforcement Learning with Double Q-learning
https://arxiv.org/abs/1509.06461
"""
def __init__(self, n_actions, observation_shape, q_network: tt.Model, double_dqn=True,
replay_start=50000, clip_td=False, logdir="", clip_gradients=10):
self.logdir = logdir
self.replay_start = replay_start
self.n_actions = n_actions
self.observation_shape = observation_shape
self.memory = ShardedMemory()
self.discount = .99
self.step = 0
@tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005), # TODO: replace with original weight freeze
optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01))
def q_network(x):
x /= 255
x = layers.conv2d(x, 32, 8, 4)
x = layers.conv2d(x, 64, 4, 2)
x = layers.conv2d(x, 64, 3, 1)
x = layers.flatten(x)
xv = layers.fully_connected(x, 512)
val = layers.fully_connected(xv, 1, activation_fn=None)
# val = tf.squeeze(val, 1)
xa = layers.fully_connected(x, 512)
adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None)
q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True)
q = tf.identity(q, name='Q')
return q, x
def act(x: [observation_shape]):
qs = q_network(x)
a = tf.argmax(qs, axis=1)
# qm = tf.reduce_max(qs, axis=1)
return a, qs
self.act = Function(act)
def train_step(o: [observation_shape], a: (tf.int32, [[]]), r, t: tf.bool, o2: [observation_shape]):
q = q_network(o)
# ac = tf.argmax(q, axis=1)
# compute targets
q2 = q_network.tracked(o2)
if double_dqn:
a2 = tf.argmax(q_network(o2), axis=1) # yep, that's really the only difference
else:
a2 = tf.argmax(q2, axis=1)
mask2 = tf.one_hot(a2, n_actions, 1.0, 0.0, axis=1)
q_target = tf.where(t, r, r + self.discount * tf.reduce_sum(q2 * mask2, axis=1))
q_target = tf.stop_gradient(q_target)
# compute loss
mask = tf.one_hot(a, n_actions, 1.0, 0.0, axis=1)
qs = tf.reduce_sum(q * mask, axis=1, name='q_max')
td = tf.subtract(q_target, qs, name='td')
if clip_td:
td = tf.clip_by_value(td, -.5, .5, name='clipped_td')
# loss = tf.reduce_mean(tf.abs(td), axis=0, name='mae')
# loss = tf.where(tf.abs(td) < 1.0, 0.5 * tf.square(td), tf.abs(td) - 0.5, name='mse_huber')
loss = tf.reduce_mean(tf.square(td), axis=0, name='mse')
gav = q_network.compute_gradients(loss)
if clip_gradients:
gav = [(tf.clip_by_norm(g, clip_gradients), v) for g, v in gav]
loss_update = q_network.apply_gradients(gav)
# logging
layers.summarize_tensors([td, loss, r, o, a,
tf.subtract(o2, o, name='state_dif'),
tf.reduce_mean(tf.cast(t, tf.float32), name='frac_terminal'),
tf.subtract(tf.reduce_max(q, 1, True), q, name='av_advantage')])
# layers.summarize_tensors(chi.activations())
# layers.summarize_tensors(chi.gradients())
return loss_update
self.train_step = Function(train_step,
prefetch_fctn=lambda: self.memory.sample_batch()[:-1],
prefetch_capacity=10,
prefetch_threads=3)
def log_weigths():
v = q_network.trainable_variables()
# print(f'log weights {v}')
f = q_network.tracker_variables
# print(f'log weights EMA {f}')
difs = []
for g in v:
a = q_network.tracker.average(g)
difs.append(tf.subtract(g, a, name=f'ema/dif{g.name[:-2]}'))
layers.summarize_tensors(v + f + difs)
self.log_weights = Function(log_weigths, async=True)
def train(self, timesteps=10000000, tter=.25):
saver = tf.train.Saver(keep_checkpoint_every_n_hours=5)
# saver.restore()
debugged = False
wt = 0.
while self.step < timesteps:
if self.step % 50000 == 0:
saver.save(tt.get_session(), self.logdir + '/dqn_checkpoint', global_step=self.step)
train_debug = not debugged and self.memory.t > 512 # it is assumed the batch size is smaller than that
debugged = debugged or train_debug
curb = self.step > self.memory.t * tter
if (self.memory.t > self.replay_start and not curb) or train_debug:
if self.step % 500 == 0:
print(f"{self.step} steps of training after {self.memory.t} steps of experience (idle for {wt} s)")
wt = 0.
self.train_step()
if self.step % 50000 == 0:
self.log_weights()
self.step += 1
else:
sleep(.1)
wt += .1
def make_agent(self, test=False, memory_size=50000, name=None, logdir=None):
return Agent(self.agent(test, memory_size), name, logdir)
def agent(self, test=False, memory_size=50000):
if test:
def log_returns(rret: [], ret: [], qs, q_minus_ret, duration: []):
layers.summarize_tensors([rret, ret, qs, q_minus_ret, duration])
log_returns = Function(log_returns, async=True)
memory = None
else:
memory = ReplayMemory(memory_size, batch_size=None)
self.memory.children.append(memory)
t = 0
for ep in range(10000000000000):
done = False
annealing_time = 1000000
qs = []
unwrapped_rewards = []
rewards = []
ob = yield # get initial observation
annealing_factor = max(0, 1 - self.memory.t / annealing_time)
tt = 0
while not done:
# select actions according to epsilon-greedy policy
action, q = self.act(ob)
if not test and (self.step == 0 or np.random.rand() < 1 * annealing_factor + .1):
action = np.random.randint(0, self.n_actions)
qs.append(q[action])
meta = {'action_values': q}
if len(qs) > 1:
td = qs[-2] - (rewards[-1] - self.discount * qs[-1])
meta.update(td=td)
ob2, r, done, info = yield action, meta # return action and meta information and receive environment outputs
if not test:
memory.enqueue(ob, action, r, done, info)
ob = ob2
rewards.append(r)
unwrapped_rewards.append(info.get('unwrapped_reward', r))
t += 1
tt += 1
if test:
wrapped_return = sum(rewards)
unwrapped_return = sum(unwrapped_rewards)
discounted_returns = [sum(rewards[i:] * self.discount ** np.arange(len(rewards)-i)) for i, _ in enumerate(rewards)]
q_minus_ret = np.subtract(qs, discounted_returns)
log_returns(unwrapped_return, wrapped_return, qs, q_minus_ret, tt)
def deep_q_network():
""" Architecture according to:
http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
"""
@tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005), # TODO: replace with original weight freeze
optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
def q_network(x):
x /= 255
x = layers.conv2d(x, 32, 8, 4)
x = layers.conv2d(x, 64, 4, 2)
x = layers.conv2d(x, 64, 3, 1)
x = layers.flatten(x)
x = layers.fully_connected(x, 512)
x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
x = tf.identity(x, name='Q')
return x
return q_network
def delling_network():
""" Architecture according to Duelling DQN:
https://arxiv.org/abs/1511.06581
"""
@tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005), # TODO: replace with original weight freeze
optimizer=tf.train.RMSPropOptimizer(6.25e-5, .95, .95, .01))
def q_network(x):
x /= 255
x = layers.conv2d(x, 32, 8, 4)
x = layers.conv2d(x, 64, 4, 2)
x = layers.conv2d(x, 64, 3, 1)
x = layers.flatten(x)
xv = layers.fully_connected(x, 512)
val = layers.fully_connected(xv, 1, activation_fn=None)
# val = tf.squeeze(val, 1)
xa = layers.fully_connected(x, 512)
adv = layers.fully_connected(xa, env.action_space.n, activation_fn=None)
q = val + adv - tf.reduce_mean(adv, axis=1, keep_dims=True)
q = tf.identity(q, name='Q')
return q
# Tests
def dqn_test(env='OneRoundDeterministicReward-v0'):
def make_env(env=env):
e = gym.make(env)
e = ObservationShapeWrapper(e)
return e
env = make_env()
env_test = make_env()
@tt.model(tracker=tf.train.ExponentialMovingAverage(1-.01),
optimizer=tf.train.AdamOptimizer(.001))
def q_network(x):
x = layers.fully_connected(x, 32)
x = layers.fully_connected(x, env.action_space.n, activation_fn=None,
weights_initializer=tf.random_normal_initializer(0, 1e-4))
return x
dqn = DQN(env.action_space.n, env.observation_space.shape, q_network)
agent = dqn.make_agent()
agent_test = dqn.make_agent(test=True)
for ep in range(4000):
r = agent.run_episode(env)
if ep > 64:
dqn.train_step()
if ep % 100 == 0:
rs = [agent_test.run_episode(env) for _ in range(100)]
print(f'Return after episode {ep} is {sum(rs)/len(rs)}')
def test_dqn():
with tf.Graph().as_default(), tf.Session().as_default():
dqn_test() # optimal return = 1
with tf.Graph().as_default(), tf.Session().as_default():
dqn_test('OneRoundNondeterministicReward-v0') # optimal return = 1
with tf.Graph().as_default(), tf.Session().as_default():
dqn_test('TwoRoundDeterministicReward-v0') # optimal return = 3
# Test Utils
class ObservationShapeWrapper(gym.ObservationWrapper):
def __init__(self, env):
from gym.spaces import Box
super().__init__(env)
self.observation_space = Box(1, 1, [1])
def _observation(self, observation):
return [observation]
if __name__ == '__main__':
# chi.chi.tf_debug = True
test_dqn()
| rmst/chi | chi/rl/dqn_m.py | Python | mit | 11,810 |
"""
Email message and email sending related helper functions.
"""
import socket
# Cache the hostname, but do it lazily: socket.getfqdn() can take a couple of
# seconds, which slows down the restart of the server.
class CachedDnsName(object):
def __str__(self):
return self.get_fqdn()
def get_fqdn(self):
if not hasattr(self, '_fqdn'):
self._fqdn = socket.getfqdn()
return self._fqdn
DNS_NAME = CachedDnsName()
| BitWriters/Zenith_project | zango/lib/python3.5/site-packages/django/core/mail/utils.py | Python | mit | 459 |
#!/usr/bin/env python
'''Menu
This example demonstrates the use of various menu types in gtk. It
demonstrates the new submenu navigation and scrolling menu features of
gtk 2.0.'''
import gtk
def create_menu(depth, length=5):
if depth < 1:
return None
menu = gtk.Menu()
group= None
for i in range(length):
menuitem = gtk.RadioMenuItem(group, 'item %2d - %d' % (depth, i))
group = menuitem
menu.add(menuitem)
menuitem.show()
if depth > 1:
submenu = create_menu(depth - 1)
menuitem.set_submenu(submenu)
return menu
class MenuDemo(gtk.Window):
def __init__(self, parent=None):
# Create the toplevel window
gtk.Window.__init__(self)
try:
self.set_screen(parent.get_screen())
except AttributeError:
self.connect('destroy', lambda *w: gtk.main_quit())
self.set_title(self.__class__.__name__)
vbox = gtk.VBox()
self.add(vbox)
menubar = gtk.MenuBar()
vbox.pack_start(menubar, expand=False)
menuitem = gtk.MenuItem('test\nline2')
menuitem.set_submenu(create_menu(2, 50))
menubar.add(menuitem)
menuitem = gtk.MenuItem('foo')
menuitem.set_submenu(create_menu(2))
menubar.add(menuitem)
menuitem = gtk.MenuItem('bar')
menuitem.set_submenu(create_menu(2))
menuitem.set_right_justified(True)
menubar.add(menuitem)
vbox2 = gtk.VBox(spacing=10)
vbox2.set_border_width(10)
vbox.pack_start(vbox2)
combo_box = gtk.combo_box_new_text()
combo_box.set_wrap_width(2)
for i in range(50):
combo_box.append_text('item - %d' % i)
combo_box.set_active(0)
vbox2.pack_start(combo_box)
separator = gtk.HSeparator()
vbox.pack_start(separator, expand=False)
vbox2 = gtk.VBox(spacing=10)
vbox2.set_border_width(10)
vbox.pack_start(vbox2, expand=False)
button = gtk.Button('close')
button.connect('clicked', lambda button, w=self: w.destroy())
vbox2.pack_start(button)
button.set_flags(gtk.CAN_DEFAULT)
button.grab_default()
self.show_all()
def main():
MenuDemo()
gtk.main()
if __name__ == '__main__':
main()
| chriskmanx/qmole | QMOLEDEV/pygtk-2.16.0/examples/pygtk-demo/demos/menu.py | Python | gpl-3.0 | 2,341 |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import socket
import struct
import json
from webob import Response
from ryu.app.wsgi import ControllerBase
from ryu.app.wsgi import WSGIApplication
from ryu.base import app_manager
from ryu.controller import dpset
from ryu.controller import ofp_event
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.exception import OFPUnknownVersion
from ryu.exception import RyuException
from ryu.lib import dpid as dpid_lib
from ryu.lib import hub
from ryu.lib import mac as mac_lib
from ryu.lib import addrconv
from ryu.lib.packet import arp
from ryu.lib.packet import ethernet
from ryu.lib.packet import icmp
from ryu.lib.packet import ipv4
from ryu.lib.packet import packet
from ryu.lib.packet import tcp
from ryu.lib.packet import udp
from ryu.lib.packet import vlan
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
#=============================
# REST API
#=============================
#
# Note: specify switch and vlan group, as follows.
# {switch_id} : 'all' or switchID
# {vlan_id} : 'all' or vlanID
#
#
## 1. get address data and routing data.
#
# * get data of no vlan
# GET /router/{switch_id}
#
# * get data of specific vlan group
# GET /router/{switch_id}/{vlan_id}
#
#
## 2. set address data or routing data.
#
# * set data of no vlan
# POST /router/{switch_id}
#
# * set data of specific vlan group
# POST /router/{switch_id}/{vlan_id}
#
# case1: set address data.
# parameter = {"address": "A.B.C.D/M"}
# case2-1: set static route.
# parameter = {"destination": "A.B.C.D/M", "gateway": "E.F.G.H"}
# case2-2: set default route.
# parameter = {"gateway": "E.F.G.H"}
#
#
## 3. delete address data or routing data.
#
# * delete data of no vlan
# DELETE /router/{switch_id}
#
# * delete data of specific vlan group
# DELETE /router/{switch_id}/{vlan_id}
#
# case1: delete address data.
# parameter = {"address_id": "<int>"} or {"address_id": "all"}
# case2: delete routing data.
# parameter = {"route_id": "<int>"} or {"route_id": "all"}
#
#
UINT16_MAX = 0xffff
UINT32_MAX = 0xffffffff
UINT64_MAX = 0xffffffffffffffff
ETHERNET = ethernet.ethernet.__name__
VLAN = vlan.vlan.__name__
IPV4 = ipv4.ipv4.__name__
ARP = arp.arp.__name__
ICMP = icmp.icmp.__name__
TCP = tcp.tcp.__name__
UDP = udp.udp.__name__
MAX_SUSPENDPACKETS = 50 # Threshold of the packet suspends thread count.
ARP_REPLY_TIMER = 2 # sec
OFP_REPLY_TIMER = 1.0 # sec
CHK_ROUTING_TBL_INTERVAL = 1800 # sec
SWITCHID_PATTERN = dpid_lib.DPID_PATTERN + r'|all'
VLANID_PATTERN = r'[0-9]{1,4}|all'
VLANID_NONE = 0
VLANID_MIN = 2
VLANID_MAX = 4094
COOKIE_DEFAULT_ID = 0
COOKIE_SHIFT_VLANID = 32
COOKIE_SHIFT_ROUTEID = 16
DEFAULT_ROUTE = '0.0.0.0/0'
IDLE_TIMEOUT = 1800 # sec
DEFAULT_TTL = 64
REST_COMMAND_RESULT = 'command_result'
REST_RESULT = 'result'
REST_DETAILS = 'details'
REST_OK = 'success'
REST_NG = 'failure'
REST_ALL = 'all'
REST_SWITCHID = 'switch_id'
REST_VLANID = 'vlan_id'
REST_NW = 'internal_network'
REST_ADDRESSID = 'address_id'
REST_ADDRESS = 'address'
REST_ROUTEID = 'route_id'
REST_ROUTE = 'route'
REST_DESTINATION = 'destination'
REST_GATEWAY = 'gateway'
PRIORITY_VLAN_SHIFT = 1000
PRIORITY_NETMASK_SHIFT = 32
PRIORITY_NORMAL = 0
PRIORITY_ARP_HANDLING = 1
PRIORITY_DEFAULT_ROUTING = 1
PRIORITY_MAC_LEARNING = 2
PRIORITY_STATIC_ROUTING = 2
PRIORITY_IMPLICIT_ROUTING = 3
PRIORITY_L2_SWITCHING = 4
PRIORITY_IP_HANDLING = 5
PRIORITY_TYPE_ROUTE = 'priority_route'
def get_priority(priority_type, vid=0, route=None):
log_msg = None
priority = priority_type
if priority_type == PRIORITY_TYPE_ROUTE:
assert route is not None
if route.dst_ip:
priority_type = PRIORITY_STATIC_ROUTING
priority = priority_type + route.netmask
log_msg = 'static routing'
else:
priority_type = PRIORITY_DEFAULT_ROUTING
priority = priority_type
log_msg = 'default routing'
if vid or priority_type == PRIORITY_IP_HANDLING:
priority += PRIORITY_VLAN_SHIFT
if priority_type > PRIORITY_STATIC_ROUTING:
priority += PRIORITY_NETMASK_SHIFT
if log_msg is None:
return priority
else:
return priority, log_msg
def get_priority_type(priority, vid):
if vid:
priority -= PRIORITY_VLAN_SHIFT
return priority
class NotFoundError(RyuException):
message = 'Router SW is not connected. : switch_id=%(switch_id)s'
class CommandFailure(RyuException):
pass
class RestRouterAPI(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_2.OFP_VERSION]
_CONTEXTS = {'dpset': dpset.DPSet,
'wsgi': WSGIApplication}
def __init__(self, *args, **kwargs):
super(RestRouterAPI, self).__init__(*args, **kwargs)
# logger configure
RouterController.set_logger(self.logger)
wsgi = kwargs['wsgi']
self.waiters = {}
self.data = {'waiters': self.waiters}
mapper = wsgi.mapper
wsgi.registory['RouterController'] = self.data
requirements = {'switch_id': SWITCHID_PATTERN,
'vlan_id': VLANID_PATTERN}
# For no vlan data
path = '/router/{switch_id}'
mapper.connect('router', path, controller=RouterController,
requirements=requirements,
action='get_data',
conditions=dict(method=['GET']))
mapper.connect('router', path, controller=RouterController,
requirements=requirements,
action='set_data',
conditions=dict(method=['POST']))
mapper.connect('router', path, controller=RouterController,
requirements=requirements,
action='delete_data',
conditions=dict(method=['DELETE']))
# For vlan data
path = '/router/{switch_id}/{vlan_id}'
mapper.connect('router', path, controller=RouterController,
requirements=requirements,
action='get_vlan_data',
conditions=dict(method=['GET']))
mapper.connect('router', path, controller=RouterController,
requirements=requirements,
action='set_vlan_data',
conditions=dict(method=['POST']))
mapper.connect('router', path, controller=RouterController,
requirements=requirements,
action='delete_vlan_data',
conditions=dict(method=['DELETE']))
@set_ev_cls(dpset.EventDP, dpset.DPSET_EV_DISPATCHER)
def datapath_handler(self, ev):
if ev.enter:
RouterController.register_router(ev.dp)
else:
RouterController.unregister_router(ev.dp)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
RouterController.packet_in_handler(ev.msg)
def _stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if (dp.id not in self.waiters
or msg.xid not in self.waiters[dp.id]):
return
event, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
if msg.flags & dp.ofproto.OFPSF_REPLY_MORE:
return
del self.waiters[dp.id][msg.xid]
event.set()
# for OpenFlow version1.0
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def stats_reply_handler_v1_0(self, ev):
self._stats_reply_handler(ev)
# for OpenFlow version1.2
@set_ev_cls(ofp_event.EventOFPStatsReply, MAIN_DISPATCHER)
def stats_reply_handler_v1_2(self, ev):
self._stats_reply_handler(ev)
#TODO: Update routing table when port status is changed.
# REST command template
def rest_command(func):
def _rest_command(*args, **kwargs):
try:
msg = func(*args, **kwargs)
return Response(content_type='application/json',
body=json.dumps(msg))
except SyntaxError as e:
status = 400
details = e.msg
except (ValueError, NameError) as e:
status = 400
details = e.message
except NotFoundError as msg:
status = 404
details = str(msg)
msg = {REST_RESULT: REST_NG,
REST_DETAILS: details}
return Response(status=status, body=json.dumps(msg))
return _rest_command
class RouterController(ControllerBase):
_ROUTER_LIST = {}
_LOGGER = None
def __init__(self, req, link, data, **config):
super(RouterController, self).__init__(req, link, data, **config)
self.waiters = data['waiters']
@classmethod
def set_logger(cls, logger):
cls._LOGGER = logger
cls._LOGGER.propagate = False
hdlr = logging.StreamHandler()
fmt_str = '[RT][%(levelname)s] switch_id=%(sw_id)s: %(message)s'
hdlr.setFormatter(logging.Formatter(fmt_str))
cls._LOGGER.addHandler(hdlr)
@classmethod
def register_router(cls, dp):
dpid = {'sw_id': dpid_lib.dpid_to_str(dp.id)}
try:
router = Router(dp, cls._LOGGER)
except OFPUnknownVersion as message:
cls._LOGGER.error(str(message), extra=dpid)
return
cls._ROUTER_LIST.setdefault(dp.id, router)
cls._LOGGER.info('Join as router.', extra=dpid)
@classmethod
def unregister_router(cls, dp):
if dp.id in cls._ROUTER_LIST:
cls._ROUTER_LIST[dp.id].delete()
del cls._ROUTER_LIST[dp.id]
dpid = {'sw_id': dpid_lib.dpid_to_str(dp.id)}
cls._LOGGER.info('Leave router.', extra=dpid)
@classmethod
def packet_in_handler(cls, msg):
dp_id = msg.datapath.id
if dp_id in cls._ROUTER_LIST:
router = cls._ROUTER_LIST[dp_id]
router.packet_in_handler(msg)
# GET /router/{switch_id}
@rest_command
def get_data(self, req, switch_id, **_kwargs):
return self._access_router(switch_id, VLANID_NONE,
'get_data', req.body)
# GET /router/{switch_id}/{vlan_id}
@rest_command
def get_vlan_data(self, req, switch_id, vlan_id, **_kwargs):
return self._access_router(switch_id, vlan_id,
'get_data', req.body)
# POST /router/{switch_id}
@rest_command
def set_data(self, req, switch_id, **_kwargs):
return self._access_router(switch_id, VLANID_NONE,
'set_data', req.body)
# POST /router/{switch_id}/{vlan_id}
@rest_command
def set_vlan_data(self, req, switch_id, vlan_id, **_kwargs):
return self._access_router(switch_id, vlan_id,
'set_data', req.body)
# DELETE /router/{switch_id}
@rest_command
def delete_data(self, req, switch_id, **_kwargs):
return self._access_router(switch_id, VLANID_NONE,
'delete_data', req.body)
# DELETE /router/{switch_id}/{vlan_id}
@rest_command
def delete_vlan_data(self, req, switch_id, vlan_id, **_kwargs):
return self._access_router(switch_id, vlan_id,
'delete_data', req.body)
def _access_router(self, switch_id, vlan_id, func, rest_param):
rest_message = []
routers = self._get_router(switch_id)
param = eval(rest_param) if rest_param else {}
for router in routers.values():
function = getattr(router, func)
data = function(vlan_id, param, self.waiters)
rest_message.append(data)
return rest_message
def _get_router(self, switch_id):
routers = {}
if switch_id == REST_ALL:
routers = self._ROUTER_LIST
else:
sw_id = dpid_lib.str_to_dpid(switch_id)
if sw_id in self._ROUTER_LIST:
routers = {sw_id: self._ROUTER_LIST[sw_id]}
if routers:
return routers
else:
raise NotFoundError(switch_id=switch_id)
class Router(dict):
def __init__(self, dp, logger):
super(Router, self).__init__()
self.dp = dp
self.dpid_str = dpid_lib.dpid_to_str(dp.id)
self.sw_id = {'sw_id': self.dpid_str}
self.logger = logger
self.port_data = PortData(dp.ports)
ofctl = OfCtl.factory(dp, logger)
cookie = COOKIE_DEFAULT_ID
# Set SW config: TTL error packet in (only OFPv1.2)
ofctl.set_sw_config_for_ttl()
# Set flow: ARP handling (packet in)
priority = get_priority(PRIORITY_ARP_HANDLING)
ofctl.set_packetin_flow(cookie, priority, dl_type=ether.ETH_TYPE_ARP)
self.logger.info('Set ARP handling (packet in) flow [cookie=0x%x]',
cookie, extra=self.sw_id)
# Set flow: L2 switching (normal)
priority = get_priority(PRIORITY_NORMAL)
ofctl.set_normal_flow(cookie, priority)
self.logger.info('Set L2 switching (normal) flow [cookie=0x%x]',
cookie, extra=self.sw_id)
# Set VlanRouter for vid=None.
vlan_router = VlanRouter(VLANID_NONE, dp, self.port_data, logger)
self[VLANID_NONE] = vlan_router
# Start cyclic routing table check.
self.thread = hub.spawn(self._cyclic_update_routing_tbl)
self.logger.info('Start cyclic routing table update.',
extra=self.sw_id)
def delete(self):
hub.kill(self.thread)
self.thread.wait()
self.logger.info('Stop cyclic routing table update.',
extra=self.sw_id)
def _get_vlan_router(self, vlan_id):
vlan_routers = []
if vlan_id == REST_ALL:
vlan_routers = self.values()
else:
vlan_id = int(vlan_id)
if (vlan_id != VLANID_NONE and
(vlan_id < VLANID_MIN or VLANID_MAX < vlan_id)):
msg = 'Invalid {vlan_id} value. Set [%d-%d]'
raise ValueError(msg % (VLANID_MIN, VLANID_MAX))
elif vlan_id in self:
vlan_routers = [self[vlan_id]]
return vlan_routers
def _add_vlan_router(self, vlan_id):
vlan_id = int(vlan_id)
if vlan_id not in self:
vlan_router = VlanRouter(vlan_id, self.dp, self.port_data,
self.logger)
self[vlan_id] = vlan_router
return self[vlan_id]
def _del_vlan_router(self, vlan_id, waiters):
# Remove unnecessary VlanRouter.
if vlan_id == VLANID_NONE:
return
vlan_router = self[vlan_id]
if (len(vlan_router.address_data) == 0
and len(vlan_router.routing_tbl) == 0):
vlan_router.delete(waiters)
del self[vlan_id]
def get_data(self, vlan_id, dummy1, dummy2):
vlan_routers = self._get_vlan_router(vlan_id)
if vlan_routers:
msgs = [vlan_router.get_data() for vlan_router in vlan_routers]
else:
msgs = [{REST_VLANID: vlan_id}]
return {REST_SWITCHID: self.dpid_str,
REST_NW: msgs}
def set_data(self, vlan_id, param, waiters):
vlan_routers = self._get_vlan_router(vlan_id)
if not vlan_routers:
vlan_routers = [self._add_vlan_router(vlan_id)]
msgs = []
for vlan_router in vlan_routers:
try:
msg = vlan_router.set_data(param)
msgs.append(msg)
if msg[REST_RESULT] == REST_NG:
# Data setting is failure.
self._del_vlan_router(vlan_router.vlan_id, waiters)
except ValueError as err_msg:
# Data setting is failure.
self._del_vlan_router(vlan_router.vlan_id, waiters)
raise err_msg
return {REST_SWITCHID: self.dpid_str,
REST_COMMAND_RESULT: msgs}
def delete_data(self, vlan_id, param, waiters):
msgs = []
vlan_routers = self._get_vlan_router(vlan_id)
if vlan_routers:
for vlan_router in vlan_routers:
msg = vlan_router.delete_data(param, waiters)
if msg:
msgs.append(msg)
# Check unnecessary VlanRouter.
self._del_vlan_router(vlan_router.vlan_id, waiters)
if not msgs:
msgs = [{REST_RESULT: REST_NG,
REST_DETAILS: 'Data is nothing.'}]
return {REST_SWITCHID: self.dpid_str,
REST_COMMAND_RESULT: msgs}
def packet_in_handler(self, msg):
pkt = packet.Packet(msg.data)
#TODO: Packet library convert to string
#self.logger.debug('Packet in = %s', str(pkt), self.sw_id)
header_list = dict((p.protocol_name, p)
for p in pkt.protocols if type(p) != str)
if header_list:
# Check vlan-tag
vlan_id = VLANID_NONE
if VLAN in header_list:
vlan_id = header_list[VLAN].vid
# Event dispatch
if vlan_id in self:
self[vlan_id].packet_in_handler(msg, header_list)
else:
self.logger.debug('Drop unknown vlan packet. [vlan_id=%d]',
vlan_id, extra=self.sw_id)
def _cyclic_update_routing_tbl(self):
while True:
# send ARP to all gateways.
for vlan_router in self.values():
vlan_router.send_arp_all_gw()
hub.sleep(1)
hub.sleep(CHK_ROUTING_TBL_INTERVAL)
class VlanRouter(object):
def __init__(self, vlan_id, dp, port_data, logger):
super(VlanRouter, self).__init__()
self.vlan_id = vlan_id
self.dp = dp
self.sw_id = {'sw_id': dpid_lib.dpid_to_str(dp.id)}
self.logger = logger
self.port_data = port_data
self.address_data = AddressData()
self.routing_tbl = RoutingTable()
self.packet_buffer = SuspendPacketList(self.send_icmp_unreach_error)
self.ofctl = OfCtl.factory(dp, logger)
# Set flow: default route (drop)
self._set_defaultroute_drop()
def delete(self, waiters):
# Delete flow.
msgs = self.ofctl.get_all_flow(waiters)
for msg in msgs:
for stats in msg.body:
vlan_id = VlanRouter._cookie_to_id(REST_VLANID, stats.cookie)
if vlan_id == self.vlan_id:
self.ofctl.delete_flow(stats)
assert len(self.packet_buffer) == 0
@staticmethod
def _cookie_to_id(id_type, cookie):
if id_type == REST_VLANID:
rest_id = cookie >> COOKIE_SHIFT_VLANID
elif id_type == REST_ADDRESSID:
rest_id = cookie & UINT32_MAX
else:
assert id_type == REST_ROUTEID
rest_id = (cookie & UINT32_MAX) >> COOKIE_SHIFT_ROUTEID
return rest_id
def _id_to_cookie(self, id_type, rest_id):
vid = self.vlan_id << COOKIE_SHIFT_VLANID
if id_type == REST_VLANID:
cookie = rest_id << COOKIE_SHIFT_VLANID
elif id_type == REST_ADDRESSID:
cookie = vid + rest_id
else:
assert id_type == REST_ROUTEID
cookie = vid + (rest_id << COOKIE_SHIFT_ROUTEID)
return cookie
def _get_priority(self, priority_type, route=None):
return get_priority(priority_type, vid=self.vlan_id, route=route)
def _response(self, msg):
if msg and self.vlan_id:
msg.setdefault(REST_VLANID, self.vlan_id)
return msg
def get_data(self):
address_data = self._get_address_data()
routing_data = self._get_routing_data()
data = {}
if address_data[REST_ADDRESS]:
data.update(address_data)
if routing_data[REST_ROUTE]:
data.update(routing_data)
return self._response(data)
def _get_address_data(self):
address_data = []
for value in self.address_data.values():
default_gw = ip_addr_ntoa(value.default_gw)
address = '%s/%d' % (default_gw, value.netmask)
data = {REST_ADDRESSID: value.address_id,
REST_ADDRESS: address}
address_data.append(data)
return {REST_ADDRESS: address_data}
def _get_routing_data(self):
routing_data = []
for key, value in self.routing_tbl.items():
if value.gateway_mac is not None:
gateway = ip_addr_ntoa(value.gateway_ip)
data = {REST_ROUTEID: value.route_id,
REST_DESTINATION: key,
REST_GATEWAY: gateway}
routing_data.append(data)
return {REST_ROUTE: routing_data}
def set_data(self, data):
details = None
try:
# Set address data
if REST_ADDRESS in data:
address = data[REST_ADDRESS]
address_id = self._set_address_data(address)
details = 'Add address [address_id=%d]' % address_id
# Set routing data
elif REST_GATEWAY in data:
gateway = data[REST_GATEWAY]
if REST_DESTINATION in data:
destination = data[REST_DESTINATION]
else:
destination = DEFAULT_ROUTE
route_id = self._set_routing_data(destination, gateway)
details = 'Add route [route_id=%d]' % route_id
except CommandFailure as err_msg:
msg = {REST_RESULT: REST_NG, REST_DETAILS: str(err_msg)}
return self._response(msg)
if details is not None:
msg = {REST_RESULT: REST_OK, REST_DETAILS: details}
return self._response(msg)
else:
raise ValueError('Invalid parameter.')
def _set_address_data(self, address):
address = self.address_data.add(address)
cookie = self._id_to_cookie(REST_ADDRESSID, address.address_id)
# Set flow: host MAC learning (packet in)
priority = self._get_priority(PRIORITY_MAC_LEARNING)
self.ofctl.set_packetin_flow(cookie, priority,
dl_type=ether.ETH_TYPE_IP,
dl_vlan=self.vlan_id,
dst_ip=address.nw_addr,
dst_mask=address.netmask)
log_msg = 'Set host MAC learning (packet in) flow [cookie=0x%x]'
self.logger.info(log_msg, cookie, extra=self.sw_id)
# set Flow: IP handling(PacketIn)
priority = self._get_priority(PRIORITY_IP_HANDLING)
self.ofctl.set_packetin_flow(cookie, priority,
dl_type=ether.ETH_TYPE_IP,
dl_vlan=self.vlan_id,
dst_ip=address.default_gw)
self.logger.info('Set IP handling (packet in) flow [cookie=0x%x]',
cookie, extra=self.sw_id)
# Set flow: L2 switching (normal)
outport = self.ofctl.dp.ofproto.OFPP_NORMAL
priority = self._get_priority(PRIORITY_L2_SWITCHING)
self.ofctl.set_routing_flow(
cookie, priority, outport, dl_vlan=self.vlan_id,
nw_src=address.nw_addr, src_mask=address.netmask,
nw_dst=address.nw_addr, dst_mask=address.netmask)
self.logger.info('Set L2 switching (normal) flow [cookie=0x%x]',
cookie, extra=self.sw_id)
# Send GARP
self.send_arp_request(address.default_gw, address.default_gw)
return address.address_id
def _set_routing_data(self, destination, gateway):
err_msg = 'Invalid [%s] value.' % REST_GATEWAY
dst_ip = ip_addr_aton(gateway, err_msg=err_msg)
address = self.address_data.get_data(ip=dst_ip)
if address is None:
msg = 'Gateway=%s\'s address is not registered.' % gateway
raise CommandFailure(msg=msg)
elif dst_ip == address.default_gw:
msg = 'Gateway=%s is used as default gateway of address_id=%d'\
% (gateway, address.address_id)
raise CommandFailure(msg=msg)
else:
src_ip = address.default_gw
route = self.routing_tbl.add(destination, gateway)
self._set_route_packetin(route)
self.send_arp_request(src_ip, dst_ip)
return route.route_id
def _set_defaultroute_drop(self):
cookie = self._id_to_cookie(REST_VLANID, self.vlan_id)
priority = self._get_priority(PRIORITY_DEFAULT_ROUTING)
outport = None # for drop
self.ofctl.set_routing_flow(cookie, priority, outport,
dl_vlan=self.vlan_id)
self.logger.info('Set default route (drop) flow [cookie=0x%x]',
cookie, extra=self.sw_id)
def _set_route_packetin(self, route):
cookie = self._id_to_cookie(REST_ROUTEID, route.route_id)
priority, log_msg = self._get_priority(PRIORITY_TYPE_ROUTE,
route=route)
self.ofctl.set_packetin_flow(cookie, priority,
dl_type=ether.ETH_TYPE_IP,
dl_vlan=self.vlan_id,
dst_ip=route.dst_ip,
dst_mask=route.netmask)
self.logger.info('Set %s (packet in) flow [cookie=0x%x]', log_msg,
cookie, extra=self.sw_id)
def delete_data(self, data, waiters):
if REST_ROUTEID in data:
route_id = data[REST_ROUTEID]
msg = self._delete_routing_data(route_id, waiters)
elif REST_ADDRESSID in data:
address_id = data[REST_ADDRESSID]
msg = self._delete_address_data(address_id, waiters)
else:
raise ValueError('Invalid parameter.')
return self._response(msg)
def _delete_address_data(self, address_id, waiters):
if address_id != REST_ALL:
try:
address_id = int(address_id)
except ValueError as e:
err_msg = 'Invalid [%s] value. %s'
raise ValueError(err_msg % (REST_ADDRESSID, e.message))
skip_ids = self._chk_addr_relation_route(address_id)
# Get all flow.
delete_list = []
msgs = self.ofctl.get_all_flow(waiters)
max_id = UINT16_MAX
for msg in msgs:
for stats in msg.body:
vlan_id = VlanRouter._cookie_to_id(REST_VLANID, stats.cookie)
if vlan_id != self.vlan_id:
continue
addr_id = VlanRouter._cookie_to_id(REST_ADDRESSID,
stats.cookie)
if addr_id in skip_ids:
continue
elif address_id == REST_ALL:
if addr_id <= COOKIE_DEFAULT_ID or max_id < addr_id:
continue
elif address_id != addr_id:
continue
delete_list.append(stats)
delete_ids = []
for flow_stats in delete_list:
# Delete flow
self.ofctl.delete_flow(flow_stats)
address_id = VlanRouter._cookie_to_id(REST_ADDRESSID,
flow_stats.cookie)
del_address = self.address_data.get_data(addr_id=address_id)
if del_address is not None:
# Clean up suspend packet threads.
self.packet_buffer.delete(del_addr=del_address)
# Delete data.
self.address_data.delete(address_id)
if address_id not in delete_ids:
delete_ids.append(address_id)
msg = {}
if delete_ids:
delete_ids = ','.join(str(addr_id) for addr_id in delete_ids)
details = 'Delete address [address_id=%s]' % delete_ids
msg = {REST_RESULT: REST_OK, REST_DETAILS: details}
if skip_ids:
skip_ids = ','.join(str(addr_id) for addr_id in skip_ids)
details = 'Skip delete (related route exist) [address_id=%s]'\
% skip_ids
if msg:
msg[REST_DETAILS] += ', %s' % details
else:
msg = {REST_RESULT: REST_NG, REST_DETAILS: details}
return msg
def _delete_routing_data(self, route_id, waiters):
if route_id != REST_ALL:
try:
route_id = int(route_id)
except ValueError as e:
err_msg = 'Invalid [%s] value. %s'
raise ValueError(err_msg % (REST_ROUTEID, e.message))
# Get all flow.
msgs = self.ofctl.get_all_flow(waiters)
delete_list = []
for msg in msgs:
for stats in msg.body:
vlan_id = VlanRouter._cookie_to_id(REST_VLANID, stats.cookie)
if vlan_id != self.vlan_id:
continue
rt_id = VlanRouter._cookie_to_id(REST_ROUTEID, stats.cookie)
if route_id == REST_ALL:
if rt_id == COOKIE_DEFAULT_ID:
continue
elif route_id != rt_id:
continue
delete_list.append(stats)
# Delete flow.
delete_ids = []
for flow_stats in delete_list:
self.ofctl.delete_flow(flow_stats)
route_id = VlanRouter._cookie_to_id(REST_ROUTEID,
flow_stats.cookie)
self.routing_tbl.delete(route_id)
if route_id not in delete_ids:
delete_ids.append(route_id)
# case: Default route deleted. -> set flow (drop)
route_type = get_priority_type(flow_stats.priority,
vid=self.vlan_id)
if route_type == PRIORITY_DEFAULT_ROUTING:
self._set_defaultroute_drop()
msg = {}
if delete_ids:
delete_ids = ','.join(str(route_id) for route_id in delete_ids)
details = 'Delete route [route_id=%s]' % delete_ids
msg = {REST_RESULT: REST_OK, REST_DETAILS: details}
return msg
def _chk_addr_relation_route(self, address_id):
# Check exist of related routing data.
relate_list = []
gateways = self.routing_tbl.get_gateways()
for gateway in gateways:
address = self.address_data.get_data(ip=gateway)
if address is not None:
if (address_id == REST_ALL
and address.address_id not in relate_list):
relate_list.append(address.address_id)
elif address.address_id == address_id:
relate_list = [address_id]
break
return relate_list
def packet_in_handler(self, msg, header_list):
# Check invalid TTL (only OpenFlow V1.2)
ofproto = self.dp.ofproto
if ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
if msg.reason == ofproto.OFPR_INVALID_TTL:
self._packetin_invalid_ttl(msg, header_list)
return
# Analyze event type.
if ARP in header_list:
self._packetin_arp(msg, header_list)
return
if IPV4 in header_list:
rt_ports = self.address_data.get_default_gw()
if header_list[IPV4].dst in rt_ports:
# Packet to router's port.
if ICMP in header_list:
if header_list[ICMP].type == icmp.ICMP_ECHO_REQUEST:
self._packetin_icmp_req(msg, header_list)
return
elif TCP in header_list or UDP in header_list:
self._packetin_tcp_udp(msg, header_list)
return
else:
# Packet to internal host or gateway router.
self._packetin_to_node(msg, header_list)
return
def _packetin_arp(self, msg, header_list):
src_addr = self.address_data.get_data(ip=header_list[ARP].src_ip)
if src_addr is None:
return
# case: Receive ARP from the gateway
# Update routing table.
# case: Receive ARP from an internal host
# Learning host MAC.
gw_flg = self._update_routing_tbl(msg, header_list)
if gw_flg is False:
self._learning_host_mac(msg, header_list)
# ARP packet handling.
in_port = self.ofctl.get_packetin_inport(msg)
src_ip = header_list[ARP].src_ip
dst_ip = header_list[ARP].dst_ip
srcip = ip_addr_ntoa(src_ip)
dstip = ip_addr_ntoa(dst_ip)
rt_ports = self.address_data.get_default_gw()
if src_ip == dst_ip:
# GARP -> packet forward (normal)
output = self.ofctl.dp.ofproto.OFPP_NORMAL
self.ofctl.send_packet_out(in_port, output, msg.data)
self.logger.info('Receive GARP from [%s].', srcip,
extra=self.sw_id)
self.logger.info('Send GARP (normal).', extra=self.sw_id)
elif dst_ip not in rt_ports:
dst_addr = self.address_data.get_data(ip=dst_ip)
if (dst_addr is not None and
src_addr.address_id == dst_addr.address_id):
# ARP from internal host -> packet forward (normal)
output = self.ofctl.dp.ofproto.OFPP_NORMAL
self.ofctl.send_packet_out(in_port, output, msg.data)
self.logger.info('Receive ARP from an internal host [%s].',
srcip, extra=self.sw_id)
self.logger.info('Send ARP (normal)', extra=self.sw_id)
else:
if header_list[ARP].opcode == arp.ARP_REQUEST:
# ARP request to router port -> send ARP reply
src_mac = header_list[ARP].src_mac
dst_mac = self.port_data[in_port].mac
arp_target_mac = dst_mac
output = in_port
in_port = self.ofctl.dp.ofproto.OFPP_CONTROLLER
self.ofctl.send_arp(arp.ARP_REPLY, self.vlan_id,
dst_mac, src_mac, dst_ip, src_ip,
arp_target_mac, in_port, output)
log_msg = 'Receive ARP request from [%s] to router port [%s].'
self.logger.info(log_msg, srcip, dstip, extra=self.sw_id)
self.logger.info('Send ARP reply to [%s]', srcip,
extra=self.sw_id)
elif header_list[ARP].opcode == arp.ARP_REPLY:
# ARP reply to router port -> suspend packets forward
log_msg = 'Receive ARP reply from [%s] to router port [%s].'
self.logger.info(log_msg, srcip, dstip, extra=self.sw_id)
packet_list = self.packet_buffer.get_data(src_ip)
if packet_list:
# stop ARP reply wait thread.
for suspend_packet in packet_list:
self.packet_buffer.delete(pkt=suspend_packet)
# send suspend packet.
output = self.ofctl.dp.ofproto.OFPP_TABLE
for suspend_packet in packet_list:
self.ofctl.send_packet_out(suspend_packet.in_port,
output,
suspend_packet.data)
self.logger.info('Send suspend packet to [%s].',
srcip, extra=self.sw_id)
def _packetin_icmp_req(self, msg, header_list):
# Send ICMP echo reply.
in_port = self.ofctl.get_packetin_inport(msg)
self.ofctl.send_icmp(in_port, header_list, self.vlan_id,
icmp.ICMP_ECHO_REPLY,
icmp.ICMP_ECHO_REPLY_CODE,
icmp_data=header_list[ICMP].data)
srcip = ip_addr_ntoa(header_list[IPV4].src)
dstip = ip_addr_ntoa(header_list[IPV4].dst)
log_msg = 'Receive ICMP echo request from [%s] to router port [%s].'
self.logger.info(log_msg, srcip, dstip, extra=self.sw_id)
self.logger.info('Send ICMP echo reply to [%s].', srcip,
extra=self.sw_id)
def _packetin_tcp_udp(self, msg, header_list):
# Send ICMP port unreach error.
in_port = self.ofctl.get_packetin_inport(msg)
self.ofctl.send_icmp(in_port, header_list, self.vlan_id,
icmp.ICMP_DEST_UNREACH,
icmp.ICMP_PORT_UNREACH_CODE,
msg_data=msg.data)
srcip = ip_addr_ntoa(header_list[IPV4].src)
dstip = ip_addr_ntoa(header_list[IPV4].dst)
self.logger.info('Receive TCP/UDP from [%s] to router port [%s].',
srcip, dstip, extra=self.sw_id)
self.logger.info('Send ICMP destination unreachable to [%s].', srcip,
extra=self.sw_id)
def _packetin_to_node(self, msg, header_list):
if len(self.packet_buffer) >= MAX_SUSPENDPACKETS:
self.logger.info('Packet is dropped, MAX_SUSPENDPACKETS exceeded.',
extra=self.sw_id)
return
# Send ARP request to get node MAC address.
in_port = self.ofctl.get_packetin_inport(msg)
src_ip = None
dst_ip = header_list[IPV4].dst
srcip = ip_addr_ntoa(header_list[IPV4].src)
dstip = ip_addr_ntoa(dst_ip)
address = self.address_data.get_data(ip=dst_ip)
if address is not None:
log_msg = 'Receive IP packet from [%s] to an internal host [%s].'
self.logger.info(log_msg, srcip, dstip, extra=self.sw_id)
src_ip = address.default_gw
else:
route = self.routing_tbl.get_data(dst_ip=dst_ip)
if route is not None:
log_msg = 'Receive IP packet from [%s] to [%s].'
self.logger.info(log_msg, srcip, dstip, extra=self.sw_id)
gw_address = self.address_data.get_data(ip=route.gateway_ip)
if gw_address is not None:
src_ip = gw_address.default_gw
dst_ip = route.gateway_ip
if src_ip is not None:
self.packet_buffer.add(in_port, header_list, msg.data)
self.send_arp_request(src_ip, dst_ip, in_port=in_port)
self.logger.info('Send ARP request (flood)', extra=self.sw_id)
def _packetin_invalid_ttl(self, msg, header_list):
# Send ICMP TTL error.
srcip = ip_addr_ntoa(header_list[IPV4].src)
self.logger.info('Receive invalid ttl packet from [%s].', srcip,
extra=self.sw_id)
in_port = self.ofctl.get_packetin_inport(msg)
src_ip = self._get_send_port_ip(header_list)
if src_ip is not None:
self.ofctl.send_icmp(in_port, header_list, self.vlan_id,
icmp.ICMP_TIME_EXCEEDED,
icmp.ICMP_TTL_EXPIRED_CODE,
msg_data=msg.data, src_ip=src_ip)
self.logger.info('Send ICMP time exceeded to [%s].', srcip,
extra=self.sw_id)
def send_arp_all_gw(self):
gateways = self.routing_tbl.get_gateways()
for gateway in gateways:
address = self.address_data.get_data(ip=gateway)
self.send_arp_request(address.default_gw, gateway)
def send_arp_request(self, src_ip, dst_ip, in_port=None):
# Send ARP request from all ports.
for send_port in self.port_data.values():
if in_port is None or in_port != send_port.port_no:
src_mac = send_port.mac
dst_mac = mac_lib.BROADCAST_STR
arp_target_mac = mac_lib.DONTCARE_STR
inport = self.ofctl.dp.ofproto.OFPP_CONTROLLER
output = send_port.port_no
self.ofctl.send_arp(arp.ARP_REQUEST, self.vlan_id,
src_mac, dst_mac, src_ip, dst_ip,
arp_target_mac, inport, output)
def send_icmp_unreach_error(self, packet_buffer):
# Send ICMP host unreach error.
self.logger.info('ARP reply wait timer was timed out.',
extra=self.sw_id)
src_ip = self._get_send_port_ip(packet_buffer.header_list)
if src_ip is not None:
self.ofctl.send_icmp(packet_buffer.in_port,
packet_buffer.header_list,
self.vlan_id,
icmp.ICMP_DEST_UNREACH,
icmp.ICMP_HOST_UNREACH_CODE,
msg_data=packet_buffer.data,
src_ip=src_ip)
dstip = ip_addr_ntoa(packet_buffer.dst_ip)
self.logger.info('Send ICMP destination unreachable to [%s].',
dstip, extra=self.sw_id)
def _update_routing_tbl(self, msg, header_list):
# Set flow: routing to gateway.
out_port = self.ofctl.get_packetin_inport(msg)
src_mac = header_list[ARP].src_mac
dst_mac = self.port_data[out_port].mac
src_ip = header_list[ARP].src_ip
gateway_flg = False
for key, value in self.routing_tbl.items():
if value.gateway_ip == src_ip:
gateway_flg = True
if value.gateway_mac == src_mac:
continue
self.routing_tbl[key].gateway_mac = src_mac
cookie = self._id_to_cookie(REST_ROUTEID, value.route_id)
priority, log_msg = self._get_priority(PRIORITY_TYPE_ROUTE,
route=value)
self.ofctl.set_routing_flow(cookie, priority, out_port,
dl_vlan=self.vlan_id,
src_mac=dst_mac,
dst_mac=src_mac,
nw_dst=value.dst_ip,
dst_mask=value.netmask,
dec_ttl=True)
self.logger.info('Set %s flow [cookie=0x%x]', log_msg, cookie,
extra=self.sw_id)
return gateway_flg
def _learning_host_mac(self, msg, header_list):
# Set flow: routing to internal Host.
out_port = self.ofctl.get_packetin_inport(msg)
src_mac = header_list[ARP].src_mac
dst_mac = self.port_data[out_port].mac
src_ip = header_list[ARP].src_ip
gateways = self.routing_tbl.get_gateways()
if src_ip not in gateways:
address = self.address_data.get_data(ip=src_ip)
if address is not None:
cookie = self._id_to_cookie(REST_ADDRESSID, address.address_id)
priority = self._get_priority(PRIORITY_IMPLICIT_ROUTING)
self.ofctl.set_routing_flow(cookie, priority,
out_port, dl_vlan=self.vlan_id,
src_mac=dst_mac, dst_mac=src_mac,
nw_dst=src_ip,
idle_timeout=IDLE_TIMEOUT,
dec_ttl=True)
self.logger.info('Set implicit routing flow [cookie=0x%x]',
cookie, extra=self.sw_id)
def _get_send_port_ip(self, header_list):
try:
src_mac = header_list[ETHERNET].src
if IPV4 in header_list:
src_ip = header_list[IPV4].src
else:
src_ip = header_list[ARP].src_ip
except KeyError:
self.logger.debug('Receive unsupported packet.', extra=self.sw_id)
return None
address = self.address_data.get_data(ip=src_ip)
if address is not None:
return address.default_gw
else:
route = self.routing_tbl.get_data(gw_mac=src_mac)
if route is not None:
address = self.address_data.get_data(ip=route.gateway_ip)
if address is not None:
return address.default_gw
self.logger.debug('Receive packet from unknown IP[%s].',
ip_addr_ntoa(src_ip), extra=self.sw_id)
return None
class PortData(dict):
def __init__(self, ports):
super(PortData, self).__init__()
for port in ports.values():
data = Port(port.port_no, port.hw_addr)
self[port.port_no] = data
class Port(object):
def __init__(self, port_no, hw_addr):
super(Port, self).__init__()
self.port_no = port_no
self.mac = hw_addr
class AddressData(dict):
def __init__(self):
super(AddressData, self).__init__()
self.address_id = 1
def add(self, address):
err_msg = 'Invalid [%s] value.' % REST_ADDRESS
nw_addr, mask, default_gw = nw_addr_aton(address, err_msg=err_msg)
# Check overlaps
for other in self.values():
other_mask = mask_ntob(other.netmask)
add_mask = mask_ntob(mask, err_msg=err_msg)
if (other.nw_addr == ipv4_apply_mask(default_gw, other.netmask) or
nw_addr == ipv4_apply_mask(other.default_gw, mask,
err_msg)):
msg = 'Address overlaps [address_id=%d]' % other.address_id
raise CommandFailure(msg=msg)
address = Address(self.address_id, nw_addr, mask, default_gw)
ip_str = ip_addr_ntoa(nw_addr)
key = '%s/%d' % (ip_str, mask)
self[key] = address
self.address_id += 1
self.address_id &= UINT32_MAX
if self.address_id == COOKIE_DEFAULT_ID:
self.address_id = 1
return address
def delete(self, address_id):
for key, value in self.items():
if value.address_id == address_id:
del self[key]
return
def get_default_gw(self):
return [address.default_gw for address in self.values()]
def get_data(self, addr_id=None, ip=None):
for address in self.values():
if addr_id is not None:
if addr_id == address.address_id:
return address
else:
assert ip is not None
if ipv4_apply_mask(ip, address.netmask) == address.nw_addr:
return address
return None
class Address(object):
def __init__(self, address_id, nw_addr, netmask, default_gw):
super(Address, self).__init__()
self.address_id = address_id
self.nw_addr = nw_addr
self.netmask = netmask
self.default_gw = default_gw
def __contains__(self, ip):
return bool(ipv4_apply_mask(ip, self.netmask) == self.nw_addr)
class RoutingTable(dict):
def __init__(self):
super(RoutingTable, self).__init__()
self.route_id = 1
def add(self, dst_nw_addr, gateway_ip):
err_msg = 'Invalid [%s] value.'
if dst_nw_addr == DEFAULT_ROUTE:
dst_ip = 0
netmask = 0
else:
dst_ip, netmask, dummy = nw_addr_aton(
dst_nw_addr, err_msg=err_msg % REST_DESTINATION)
gateway_ip = ip_addr_aton(gateway_ip, err_msg=err_msg % REST_GATEWAY)
# Check overlaps
overlap_route = None
if dst_nw_addr == DEFAULT_ROUTE:
if DEFAULT_ROUTE in self:
overlap_route = self[DEFAULT_ROUTE].route_id
elif dst_nw_addr in self:
overlap_route = self[dst_nw_addr].route_id
if overlap_route is not None:
msg = 'Destination overlaps [route_id=%d]' % overlap_route
raise CommandFailure(msg=msg)
routing_data = Route(self.route_id, dst_ip, netmask, gateway_ip)
ip_str = ip_addr_ntoa(dst_ip)
key = '%s/%d' % (ip_str, netmask)
self[key] = routing_data
self.route_id += 1
self.route_id &= UINT32_MAX
if self.route_id == COOKIE_DEFAULT_ID:
self.route_id = 1
return routing_data
def delete(self, route_id):
for key, value in self.items():
if value.route_id == route_id:
del self[key]
return
def get_gateways(self):
return [routing_data.gateway_ip for routing_data in self.values()]
def get_data(self, gw_mac=None, dst_ip=None):
if gw_mac is not None:
for route in self.values():
if gw_mac == route.gateway_mac:
return route
return None
elif dst_ip is not None:
get_route = None
mask = 0
for route in self.values():
if ipv4_apply_mask(dst_ip, route.netmask) == route.dst_ip:
# For longest match
if mask < route.netmask:
get_route = route
mask = route.netmask
if get_route is None:
get_route = self.get(DEFAULT_ROUTE, None)
return get_route
else:
return None
class Route(object):
def __init__(self, route_id, dst_ip, netmask, gateway_ip):
super(Route, self).__init__()
self.route_id = route_id
self.dst_ip = dst_ip
self.netmask = netmask
self.gateway_ip = gateway_ip
self.gateway_mac = None
class SuspendPacketList(list):
def __init__(self, timeout_function):
super(SuspendPacketList, self).__init__()
self.timeout_function = timeout_function
def add(self, in_port, header_list, data):
suspend_pkt = SuspendPacket(in_port, header_list, data,
self.wait_arp_reply_timer)
self.append(suspend_pkt)
def delete(self, pkt=None, del_addr=None):
if pkt is not None:
del_list = [pkt]
else:
assert del_addr is not None
del_list = [pkt for pkt in self if pkt.dst_ip in del_addr]
for pkt in del_list:
self.remove(pkt)
hub.kill(pkt.wait_thread)
pkt.wait_thread.wait()
def get_data(self, dst_ip):
return [pkt for pkt in self if pkt.dst_ip == dst_ip]
def wait_arp_reply_timer(self, suspend_pkt):
hub.sleep(ARP_REPLY_TIMER)
if suspend_pkt in self:
self.timeout_function(suspend_pkt)
self.delete(pkt=suspend_pkt)
class SuspendPacket(object):
def __init__(self, in_port, header_list, data, timer):
super(SuspendPacket, self).__init__()
self.in_port = in_port
self.dst_ip = header_list[IPV4].dst
self.header_list = header_list
self.data = data
# Start ARP reply wait timer.
self.wait_thread = hub.spawn(timer, self)
class OfCtl(object):
_OF_VERSIONS = {}
@staticmethod
def register_of_version(version):
def _register_of_version(cls):
OfCtl._OF_VERSIONS.setdefault(version, cls)
return cls
return _register_of_version
@staticmethod
def factory(dp, logger):
of_version = dp.ofproto.OFP_VERSION
if of_version in OfCtl._OF_VERSIONS:
ofctl = OfCtl._OF_VERSIONS[of_version](dp, logger)
else:
raise OFPUnknownVersion(version=of_version)
return ofctl
def __init__(self, dp, logger):
super(OfCtl, self).__init__()
self.dp = dp
self.sw_id = {'sw_id': dpid_lib.dpid_to_str(dp.id)}
self.logger = logger
def set_sw_config_for_ttl(self):
# OpenFlow v1_2 only.
pass
def set_flow(self, cookie, priority, dl_type=0, dl_dst=0, dl_vlan=0,
nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,
nw_proto=0, idle_timeout=0, actions=None):
# Abstract method
raise NotImplementedError()
def send_arp(self, arp_opcode, vlan_id, src_mac, dst_mac,
src_ip, dst_ip, arp_target_mac, in_port, output):
# Generate ARP packet
if vlan_id != VLANID_NONE:
ether_proto = ether.ETH_TYPE_8021Q
pcp = 0
cfi = 0
vlan_ether = ether.ETH_TYPE_ARP
v = vlan.vlan(pcp, cfi, vlan_id, vlan_ether)
else:
ether_proto = ether.ETH_TYPE_ARP
hwtype = 1
arp_proto = ether.ETH_TYPE_IP
hlen = 6
plen = 4
pkt = packet.Packet()
e = ethernet.ethernet(dst_mac, src_mac, ether_proto)
a = arp.arp(hwtype, arp_proto, hlen, plen, arp_opcode,
src_mac, src_ip, arp_target_mac, dst_ip)
pkt.add_protocol(e)
if vlan_id != VLANID_NONE:
pkt.add_protocol(v)
pkt.add_protocol(a)
pkt.serialize()
# Send packet out
self.send_packet_out(in_port, output, pkt.data, data_str=str(pkt))
def send_icmp(self, in_port, protocol_list, vlan_id, icmp_type,
icmp_code, icmp_data=None, msg_data=None, src_ip=None):
# Generate ICMP reply packet
csum = 0
offset = ethernet.ethernet._MIN_LEN
if vlan_id != VLANID_NONE:
ether_proto = ether.ETH_TYPE_8021Q
pcp = 0
cfi = 0
vlan_ether = ether.ETH_TYPE_IP
v = vlan.vlan(pcp, cfi, vlan_id, vlan_ether)
offset += vlan.vlan._MIN_LEN
else:
ether_proto = ether.ETH_TYPE_IP
eth = protocol_list[ETHERNET]
e = ethernet.ethernet(eth.src, eth.dst, ether_proto)
if icmp_data is None and msg_data is not None:
ip_datagram = msg_data[offset:]
if icmp_type == icmp.ICMP_DEST_UNREACH:
icmp_data = icmp.dest_unreach(data_len=len(ip_datagram),
data=ip_datagram)
elif icmp_type == icmp.ICMP_TIME_EXCEEDED:
icmp_data = icmp.TimeExceeded(data_len=len(ip_datagram),
data=ip_datagram)
ic = icmp.icmp(icmp_type, icmp_code, csum, data=icmp_data)
ip = protocol_list[IPV4]
if src_ip is None:
src_ip = ip.dst
ip_total_length = ip.header_length * 4 + ic._MIN_LEN
if ic.data is not None:
ip_total_length += ic.data._MIN_LEN
if ic.data.data is not None:
ip_total_length += + len(ic.data.data)
i = ipv4.ipv4(ip.version, ip.header_length, ip.tos,
ip_total_length, ip.identification, ip.flags,
ip.offset, DEFAULT_TTL, inet.IPPROTO_ICMP, csum,
src_ip, ip.src)
pkt = packet.Packet()
pkt.add_protocol(e)
if vlan_id != VLANID_NONE:
pkt.add_protocol(v)
pkt.add_protocol(i)
pkt.add_protocol(ic)
pkt.serialize()
# Send packet out
self.send_packet_out(in_port, self.dp.ofproto.OFPP_IN_PORT,
pkt.data, data_str=str(pkt))
def send_packet_out(self, in_port, output, data, data_str=None):
actions = [self.dp.ofproto_parser.OFPActionOutput(output, 0)]
self.dp.send_packet_out(buffer_id=UINT32_MAX, in_port=in_port,
actions=actions, data=data)
#TODO: Packet library convert to string
#if data_str is None:
# data_str = str(packet.Packet(data))
#self.logger.debug('Packet out = %s', data_str, extra=self.sw_id)
def set_normal_flow(self, cookie, priority):
out_port = self.dp.ofproto.OFPP_NORMAL
actions = [self.dp.ofproto_parser.OFPActionOutput(out_port, 0)]
self.set_flow(cookie, priority, actions=actions)
def set_packetin_flow(self, cookie, priority, dl_type=0, dl_dst=0,
dl_vlan=0, dst_ip=0, dst_mask=32, nw_proto=0):
miss_send_len = UINT16_MAX
actions = [self.dp.ofproto_parser.OFPActionOutput(
self.dp.ofproto.OFPP_CONTROLLER, miss_send_len)]
self.set_flow(cookie, priority, dl_type=dl_type, dl_dst=dl_dst,
dl_vlan=dl_vlan, nw_dst=dst_ip, dst_mask=dst_mask,
nw_proto=nw_proto, actions=actions)
def send_stats_request(self, stats, waiters):
self.dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(self.dp.id, {})
event = hub.Event()
msgs = []
waiters_per_dp[stats.xid] = (event, msgs)
self.dp.send_msg(stats)
try:
event.wait(timeout=OFP_REPLY_TIMER)
except hub.Timeout:
del waiters_per_dp[stats.xid]
return msgs
@OfCtl.register_of_version(ofproto_v1_0.OFP_VERSION)
class OfCtl_v1_0(OfCtl):
def __init__(self, dp, logger):
super(OfCtl_v1_0, self).__init__(dp, logger)
def get_packetin_inport(self, msg):
return msg.in_port
def get_all_flow(self, waiters):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
match = ofp_parser.OFPMatch(ofp.OFPFW_ALL, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0)
stats = ofp_parser.OFPFlowStatsRequest(self.dp, 0, match,
0xff, ofp.OFPP_NONE)
return self.send_stats_request(stats, waiters)
def set_flow(self, cookie, priority, dl_type=0, dl_dst=0, dl_vlan=0,
nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,
nw_proto=0, idle_timeout=0, actions=None):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
cmd = ofp.OFPFC_ADD
# Match
wildcards = ofp.OFPFW_ALL
if dl_type:
wildcards &= ~ofp.OFPFW_DL_TYPE
if dl_dst:
wildcards &= ~ofp.OFPFW_DL_DST
if dl_vlan:
wildcards &= ~ofp.OFPFW_DL_VLAN
if nw_src:
v = (32 - src_mask) << ofp.OFPFW_NW_SRC_SHIFT | \
~ofp.OFPFW_NW_SRC_MASK
wildcards &= v
nw_src = ipv4_text_to_int(nw_src),
if nw_dst:
v = (32 - dst_mask) << ofp.OFPFW_NW_DST_SHIFT | \
~ofp.OFPFW_NW_DST_MASK
wildcards &= v
nw_dst = ipv4_text_to_int(nw_dst),
if nw_proto:
wildcards &= ~ofp.OFPFW_NW_PROTO
match = ofp_parser.OFPMatch(wildcards, 0, 0, dl_dst, dl_vlan, 0,
dl_type, 0, nw_proto,
nw_src, nw_dst, 0, 0)
actions = actions or []
m = ofp_parser.OFPFlowMod(self.dp, match, cookie, cmd,
idle_timeout=idle_timeout,
priority=priority, actions=actions)
self.dp.send_msg(m)
def set_routing_flow(self, cookie, priority, outport, dl_vlan=0,
nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,
src_mac=0, dst_mac=0, idle_timeout=0, **dummy):
ofp_parser = self.dp.ofproto_parser
dl_type = ether.ETH_TYPE_IP
# Decrement TTL value is not supported at OpenFlow V1.0
actions = []
if src_mac:
actions.append(ofp_parser.OFPActionSetDlSrc(
mac_lib.haddr_to_bin(src_mac)))
if dst_mac:
actions.append(ofp_parser.OFPActionSetDlDst(
mac_lib.haddr_to_bin(dst_mac)))
if outport is not None:
actions.append(ofp_parser.OFPActionOutput(outport))
self.set_flow(cookie, priority, dl_type=dl_type, dl_vlan=dl_vlan,
nw_src=nw_src, src_mask=src_mask,
nw_dst=nw_dst, dst_mask=dst_mask,
idle_timeout=idle_timeout, actions=actions)
def delete_flow(self, flow_stats):
match = flow_stats.match
cookie = flow_stats.cookie
cmd = self.dp.ofproto.OFPFC_DELETE_STRICT
priority = flow_stats.priority
actions = []
flow_mod = self.dp.ofproto_parser.OFPFlowMod(
self.dp, match, cookie, cmd, priority=priority, actions=actions)
self.dp.send_msg(flow_mod)
self.logger.info('Delete flow [cookie=0x%x]', cookie, extra=self.sw_id)
@OfCtl.register_of_version(ofproto_v1_2.OFP_VERSION)
class OfCtl_v1_2(OfCtl):
def __init__(self, dp, logger):
super(OfCtl_v1_2, self).__init__(dp, logger)
def set_sw_config_for_ttl(self):
flags = self.dp.ofproto.OFPC_INVALID_TTL_TO_CONTROLLER
miss_send_len = UINT16_MAX
m = self.dp.ofproto_parser.OFPSetConfig(self.dp, flags,
miss_send_len)
self.dp.send_msg(m)
self.logger.info('Set SW config for TTL error packet in.',
extra=self.sw_id)
def get_packetin_inport(self, msg):
in_port = self.dp.ofproto.OFPP_ANY
for match_field in msg.match.fields:
if match_field.header == self.dp.ofproto.OXM_OF_IN_PORT:
in_port = match_field.value
break
return in_port
def get_all_flow(self, waiters):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
match = ofp_parser.OFPMatch()
stats = ofp_parser.OFPFlowStatsRequest(self.dp, 0, ofp.OFPP_ANY,
ofp.OFPG_ANY, 0, 0, match)
return self.send_stats_request(stats, waiters)
def set_flow(self, cookie, priority, dl_type=0, dl_dst=0, dl_vlan=0,
nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,
nw_proto=0, idle_timeout=0, actions=None):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
cmd = ofp.OFPFC_ADD
# Match
match = ofp_parser.OFPMatch()
if dl_type:
match.set_dl_type(dl_type)
if dl_dst:
match.set_dl_dst(dl_dst)
if dl_vlan:
match.set_vlan_vid(dl_vlan)
if nw_src:
match.set_ipv4_src_masked(ipv4_text_to_int(nw_src),
mask_ntob(src_mask))
if nw_dst:
match.set_ipv4_dst_masked(ipv4_text_to_int(nw_dst),
mask_ntob(dst_mask))
if nw_proto:
if dl_type == ether.ETH_TYPE_IP:
match.set_ip_proto(nw_proto)
elif dl_type == ether.ETH_TYPE_ARP:
match.set_arp_opcode(nw_proto)
# Instructions
actions = actions or []
inst = [ofp_parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions)]
m = ofp_parser.OFPFlowMod(self.dp, cookie, 0, 0, cmd, idle_timeout,
0, priority, UINT32_MAX, ofp.OFPP_ANY,
ofp.OFPG_ANY, 0, match, inst)
self.dp.send_msg(m)
def set_routing_flow(self, cookie, priority, outport, dl_vlan=0,
nw_src=0, src_mask=32, nw_dst=0, dst_mask=32,
src_mac=0, dst_mac=0, idle_timeout=0, dec_ttl=False):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
dl_type = ether.ETH_TYPE_IP
actions = []
if dec_ttl:
actions.append(ofp_parser.OFPActionDecNwTtl())
if src_mac:
set_src = ofp_parser.OFPMatchField.make(ofp.OXM_OF_ETH_SRC,
src_mac)
actions.append(ofp_parser.OFPActionSetField(set_src))
if dst_mac:
set_dst = ofp_parser.OFPMatchField.make(ofp.OXM_OF_ETH_DST,
dst_mac)
actions.append(ofp_parser.OFPActionSetField(set_dst))
if outport is not None:
actions.append(ofp_parser.OFPActionOutput(outport, 0))
self.set_flow(cookie, priority, dl_type=dl_type, dl_vlan=dl_vlan,
nw_src=nw_src, src_mask=src_mask,
nw_dst=nw_dst, dst_mask=dst_mask,
idle_timeout=idle_timeout, actions=actions)
def delete_flow(self, flow_stats):
ofp = self.dp.ofproto
ofp_parser = self.dp.ofproto_parser
cmd = ofp.OFPFC_DELETE
cookie = flow_stats.cookie
cookie_mask = UINT64_MAX
match = ofp_parser.OFPMatch()
inst = []
flow_mod = ofp_parser.OFPFlowMod(self.dp, cookie, cookie_mask, 0, cmd,
0, 0, 0, UINT32_MAX, ofp.OFPP_ANY,
ofp.OFPG_ANY, 0, match, inst)
self.dp.send_msg(flow_mod)
self.logger.info('Delete flow [cookie=0x%x]', cookie, extra=self.sw_id)
def ip_addr_aton(ip_str, err_msg=None):
try:
return addrconv.ipv4.bin_to_text(socket.inet_aton(ip_str))
except (struct.error, socket.error) as e:
if err_msg is not None:
e.message = '%s %s' % (err_msg, e.message)
raise ValueError(e.message)
def ip_addr_ntoa(ip):
return socket.inet_ntoa(addrconv.ipv4.text_to_bin(ip))
def mask_ntob(mask, err_msg=None):
try:
return (UINT32_MAX << (32 - mask)) & UINT32_MAX
except ValueError:
msg = 'illegal netmask'
if err_msg is not None:
msg = '%s %s' % (err_msg, msg)
raise ValueError(msg)
def ipv4_apply_mask(address, prefix_len, err_msg=None):
import itertools
assert isinstance(address, str)
address_int = ipv4_text_to_int(address)
return ipv4_int_to_text(address_int & mask_ntob(prefix_len, err_msg))
def ipv4_int_to_text(ip_int):
assert isinstance(ip_int, (int, long))
return addrconv.ipv4.bin_to_text(struct.pack('!I', ip_int))
def ipv4_text_to_int(ip_text):
if ip_text == 0:
return ip_text
assert isinstance(ip_text, str)
return struct.unpack('!I', addrconv.ipv4.text_to_bin(ip_text))[0]
def nw_addr_aton(nw_addr, err_msg=None):
ip_mask = nw_addr.split('/')
default_route = ip_addr_aton(ip_mask[0], err_msg=err_msg)
netmask = 32
if len(ip_mask) == 2:
try:
netmask = int(ip_mask[1])
except ValueError as e:
if err_msg is not None:
e.message = '%s %s' % (err_msg, e.message)
raise ValueError(e.message)
if netmask < 0:
msg = 'illegal netmask'
if err_msg is not None:
msg = '%s %s' % (err_msg, msg)
raise ValueError(msg)
nw_addr = ipv4_apply_mask(default_route, netmask, err_msg)
return nw_addr, netmask, default_route
| citrix-openstack/build-ryu | ryu/app/rest_router.py | Python | apache-2.0 | 68,137 |
import os
import json
import re
from BeautifulSoup import BeautifulSoup
from psrd.rules import write_rules
from psrd.files import char_replace
from psrd.universal import parse_universal, print_struct, StatBlockHeading, StatBlockSection
from psrd.sections import ability_pass, entity_pass, find_section, find_all_sections, add_section, remove_section, cap_words, quote_pass
from psrd.stat_block import stat_block_pass
def druid_animal_companion_fix(section, prev):
tuples = section.keys
keys = []
details = []
s = section
ret = True
if s.name == 'Starting Statistics':
s = prev
ret = False
elif s.name in ('4th-Level Advancement', '7th-Level Advancement'):
prev.details.append(s)
ret = False
for tup in tuples:
keys.append(tup)
s.keys = keys
if ret:
return s
def druid_structural_pass(section):
if section.has_key('sections'):
newsections = []
prev = None
for s in section['sections']:
if s.__class__ == StatBlockHeading:
newsection = druid_animal_companion_fix(s, prev)
if newsection:
newsections.append(newsection)
prev = newsection
elif s.__class__ == dict:
newsections.append(druid_structural_pass(s))
else:
newsections.append(s)
section['sections'] = newsections
return section
def structural_pass(struct, filename):
if filename in ('druid.html'):
struct = druid_structural_pass(struct)
cs = find_section(struct, name="Class Skills", section_type='section')
table = find_section(cs, name=struct['name'], section_type='table')
idx = struct['sections'].index(cs)
while table:
idx = idx + 1
if table:
remove_section(struct, table)
struct['sections'].insert(idx, table)
table = find_section(cs, name=struct['name'], section_type='table')
return struct
def domain_pass(struct):
d = find_section(struct, name="Domains", section_type='section')
if d:
domains = find_all_sections(struct, name=re.compile('^.*Domain$'), section_type='section')
for domain in domains:
remove_section(struct, domain)
domain['subtype'] = 'cleric_domain'
add_section(d, domain)
return struct
def bloodline_pass(struct):
s = find_section(struct, name="Sorcerer Bloodlines", section_type='section')
if s:
collect = False
bloodlines = []
for section in struct['sections']:
if collect:
bloodlines.append(section)
elif s == section:
collect = True
for bloodline in bloodlines:
bloodline['subtype'] = 'sorcerer_bloodline'
remove_section(struct, bloodline)
add_section(s, bloodline)
return struct
def arcane_school_pass(struct):
s = find_section(struct, name="Arcane Schools", section_type='section')
if s:
collect = False
schools = []
for section in struct['sections']:
if section.get('name') == 'Familiars':
collect = False
elif collect:
schools.append(section)
elif s == section:
collect = True
for school in schools:
school['subtype'] = 'arcane_school'
remove_section(struct, school)
add_section(s, school)
return struct
def mark_subtype_pass(struct, name, section_type, subtype):
s = find_section(struct, name=name, section_type=section_type)
if s:
for section in s.get('sections', []):
section['subtype'] = subtype
return struct
def core_class_pass(struct):
struct['subtype'] = 'core'
return struct
def npc_class_pass(struct):
struct['subtype'] = 'npc'
return struct
def base_class_pass(struct):
struct['subtype'] = 'base'
return struct
def hybrid_class_pass(struct):
struct['subtype'] = 'hybrid'
return struct
def prestige_class_pass(struct):
struct['subtype'] = 'prestige'
return struct
def class_pass(struct):
struct['type'] = 'class'
align = find_section(struct, name="Alignment", section_type='section')
if align:
remove_section(struct, align)
soup = BeautifulSoup(align['text'])
struct['alignment'] = ''.join(soup.findAll(text=True))
hd = find_section(struct, name="Hit Die", section_type='section')
if not hd:
hd = find_section(struct, name="Hit Dice", section_type='section')
if hd:
remove_section(struct, hd)
soup = BeautifulSoup(hd['text'])
hit = ''.join(soup.findAll(text=True))
if hit.endswith("."):
hit = hit[:-1]
struct['hit_dice'] = hit
return struct
def anon_pass(cl):
if not cl.has_key('name'):
sections = cl['sections']
top = sections.pop(0)
top['sections'].extend(sections)
return top
return cl
def ranger_pass(cl):
if cl['name'] == 'Ranger':
cs = find_section(cl, name='Combat Style Feat', section_type='ability')
sectionlist = cs.setdefault('sections', [])
soup = BeautifulSoup(cs['text'])
cs['text'] = unicode(soup.contents[0])
archery = {'type': 'section', 'subtype': 'ranger_combat_style', 'source': cl['source']}
twc = archery.copy()
archery['name'] = 'Archery'
archery['text'] = unicode(soup.contents[1])
sectionlist.append(archery)
twc['name'] = 'Two-Weapon'
twc['text'] = unicode(soup.contents[2])
sectionlist.append(twc)
sectionlist.append({'type': 'section', 'source': cl['source'], 'text': unicode(soup.contents[3])})
return cl
def spell_list_pass(cl):
field_dict = {
"Alchemist": "Alchemist Formulae",
"Inquisitor": "Inquisitor Spells",
"Witch": "Witch Spells",
"Summoner": "Summoner Spells",
"Magus": "Magus Spell List"
}
if cl['name'] in field_dict.keys():
name = field_dict[cl['name']]
sl = find_section(cl, name=name, section_type='section')
sections = sl['sections']
for section in sections:
section['type'] = 'spell_list'
section['class'] = cl['name']
m = re.search('(\d)', section['name'])
section['level'] = int(m.group(0))
soup = BeautifulSoup(section['text'])
text = ''.join(soup.findAll(text=True))
text = text.replace('—', '')
text = text.replace('*', '')
text = text.replace('.', '')
del section['text']
spells = []
section['spells'] = spells
for spell_name in text.split(", "):
spell_name = spell_name.strip()
spell_name = cap_words(spell_name)
spell_name = spell_name.replace(" (Mass)", ", Mass")
spell_name = spell_name.replace(" (Greater)", ", Greater")
spell_name = spell_name.replace(" (Lesser)", ", Lesser")
spell_name = spell_name.replace("Topoison", "To Poison")
spells.append({"name": spell_name})
return cl
def parse_class(cl, book):
cl = stat_block_pass(cl, book)
cl = class_pass(cl)
cl = domain_pass(cl)
cl = bloodline_pass(cl)
cl = arcane_school_pass(cl)
cl = ability_pass(cl)
cl = ranger_pass(cl)
cl = quote_pass(cl)
cl = mark_subtype_pass(cl, "Discovery", "ability", "alchemist_discovery")
cl = mark_subtype_pass(cl, "Rage Powers", "ability", "barbarian_rage_power")
cl = mark_subtype_pass(cl, "Bardic Performance", "section", "bardic_performance")
cl = mark_subtype_pass(cl, "Deeds", "section", "gunslinger_deed")
cl = mark_subtype_pass(cl, "Magus Arcana", "section", "magus_arcana")
cl = mark_subtype_pass(cl, "Ninja Tricks", "section", "ninja_trick")
cl = mark_subtype_pass(cl, "Oracle's Curse", "section", "oracle_curse")
cl = mark_subtype_pass(cl, "Mysteries", "section", "oracle_mystery")
cl = mark_subtype_pass(cl, "Rogue Talents", "section", "rogue_talent")
cl = mark_subtype_pass(cl, "Advanced Talents", "section", "rogue_advanced_talent")
cl = mark_subtype_pass(cl, "1-Point Evolutions", "section", "summoner_evolution_1")
cl = mark_subtype_pass(cl, "2-Point Evolutions", "section", "summoner_evolution_2")
cl = mark_subtype_pass(cl, "3-Point Evolutions", "section", "summoner_evolution_3")
cl = mark_subtype_pass(cl, "4-Point Evolutions", "section", "summoner_evolution_4")
cl = mark_subtype_pass(cl, "Cavalier Orders", "section", "warrior_order")
cl = mark_subtype_pass(cl, "Samurai Orders", "section", "warrior_order")
cl = mark_subtype_pass(cl, "Hex", "section", "witch_hex")
cl = mark_subtype_pass(cl, "Major Hex", "section", "witch_major_hex")
cl = mark_subtype_pass(cl, "Grand Hex", "section", "witch_grand_hex")
cl = mark_subtype_pass(cl, "Patron Spells", "section", "witch_patron")
cl = spell_list_pass(cl)
cl = entity_pass(cl)
return cl
def first_pass(filename, output, book):
struct = parse_universal(filename, output, book)
return struct
def parse_core_classes(filename, output, book):
struct = first_pass(filename, output, book)
struct = structural_pass(struct, os.path.basename(filename))
core_class = parse_class(struct, book)
core_class = core_class_pass(core_class)
write_class(filename, output, book, core_class)
def npc_structure_pass(struct):
if not struct.has_key('name'):
struct['name'] = 'Adept'
new = []
old = [struct]
do_old = False
for section in struct['sections']:
if do_old:
old.append(section)
else:
if section['name'] == 'Aristocrat':
do_old = True
old.append(section)
else:
new.append(section)
struct['sections'] = new
return {'sections': old}
return struct
def parse_npc_classes(filename, output, book):
struct = first_pass(filename, output, book)
struct = npc_structure_pass(struct)
for n_class in struct['sections']:
n_class = parse_class(n_class, book)
n_class = npc_class_pass(n_class)
write_class(filename, output, book, n_class)
def parse_hybrid_classes(filename, output, book):
struct = first_pass(filename, output, book)
struct = anon_pass(struct)
b_class = parse_class(struct, book)
b_class = hybrid_class_pass(b_class)
write_class(filename, output, book, b_class)
def parse_base_classes(filename, output, book):
struct = first_pass(filename, output, book)
struct = anon_pass(struct)
b_class = parse_class(struct, book)
b_class = base_class_pass(b_class)
write_class(filename, output, book, b_class)
def parse_prestige_classes(filename, output, book):
struct = first_pass(filename, output, book)
struct = anon_pass(struct)
p_class = parse_class(struct, book)
p_class = prestige_class_pass(p_class)
write_class(filename, output, book, p_class)
def write_class(filename, output, book, cl):
print "%s: %s" %(cl['source'], cl['name'])
filename = create_class_filename(output, book, cl)
fp = open(filename, 'w')
json.dump(cl, fp, indent=4)
fp.close()
def create_class_filename(output, book, cl):
title = char_replace(book) + "/classes/" + char_replace(cl['name'])
return os.path.abspath(output + "/" + title + ".json")
| devonjones/PSRD-Parser | src/psrd/classes.py | Python | gpl-3.0 | 10,202 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Localization based on UNO from UHF/UKS check files
#
from functools import reduce
import numpy
import scipy.linalg
import h5py
from pyscf import tools,gto,scf,dft
from pyscf.tools import molden
import pmloc
import ulocal
def sqrtm(s):
e, v = numpy.linalg.eigh(s)
return numpy.dot(v*numpy.sqrt(e), v.T.conj())
def lowdin(s):
e, v = numpy.linalg.eigh(s)
return numpy.dot(v/numpy.sqrt(e), v.T.conj())
def dumpLUNO(fname,thresh=0.01):
chkfile = fname+'.chk'
outfile = fname+'_cmo.molden'
tools.molden.from_chkfile(outfile, chkfile)
#=============================
# Natural orbitals
# Lowdin basis X=S{-1/2}
# psi = chi * C
# = chi' * C'
# = chi*X*(X{-1}C')
#=============================
mol,mf = scf.chkfile.load_scf(chkfile)
mo_coeff = mf["mo_coeff"]
ova=mol.intor_symmetric("cint1e_ovlp_sph")
nb = mo_coeff.shape[1]
# Check overlap
diff = reduce(numpy.dot,(mo_coeff[0].T,ova,mo_coeff[0])) - numpy.identity(nb)
print numpy.linalg.norm(diff)
diff = reduce(numpy.dot,(mo_coeff[1].T,ova,mo_coeff[1])) - numpy.identity(nb)
print numpy.linalg.norm(diff)
# UHF-alpha/beta
ma = mo_coeff[0]
mb = mo_coeff[1]
nalpha = (mol.nelectron+mol.spin)/2
nbeta = (mol.nelectron-mol.spin)/2
# Spin-averaged DM
pTa = numpy.dot(ma[:,:nalpha],ma[:,:nalpha].T)
pTb = numpy.dot(mb[:,:nbeta],mb[:,:nbeta].T)
pT = 0.5*(pTa+pTb)
# Lowdin basis
s12 = sqrtm(ova)
s12inv = lowdin(ova)
pTOAO = reduce(numpy.dot,(s12,pT,s12))
eig,coeff = scipy.linalg.eigh(-pTOAO)
eig = -2.0*eig
eig[eig<0.0]=0.0
eig[abs(eig)<1.e-14]=0.0
ifplot = False #True
if ifplot:
import matplotlib.pyplot as plt
plt.plot(range(nb),eig,'ro')
plt.show()
# Back to AO basis
coeff = numpy.dot(s12inv,coeff)
diff = reduce(numpy.dot,(coeff.T,ova,coeff)) - numpy.identity(nb)
print 'CtSC-I',numpy.linalg.norm(diff)
#
# Averaged Fock
#
enorb = mf["mo_energy"]
fa = reduce(numpy.dot,(ma,numpy.diag(enorb[0]),ma.T))
fb = reduce(numpy.dot,(mb,numpy.diag(enorb[1]),mb.T))
# Non-orthogonal cases: FC=SCE
# Fao = SC*e*C{-1} = S*C*e*Ct*S
fav = 0.5*(fa+fb)
# Expectation value of natural orbitals <i|F|i>
fexpt = reduce(numpy.dot,(coeff.T,ova,fav,ova,coeff))
enorb = numpy.diag(fexpt)
nocc = eig.copy()
#
# Reordering and define active space according to thresh
#
idx = 0
active=[]
for i in range(nb):
if nocc[i]<=2.0-thresh and nocc[i]>=thresh:
active.append(True)
else:
active.append(False)
print '\nNatural orbitals:'
for i in range(nb):
print 'orb:',i,active[i],nocc[i],enorb[i]
active = numpy.array(active)
actIndices = list(numpy.argwhere(active==True).flatten())
cOrbs = coeff[:,:actIndices[0]]
aOrbs = coeff[:,actIndices]
vOrbs = coeff[:,actIndices[-1]+1:]
nb = cOrbs.shape[0]
nc = cOrbs.shape[1]
na = aOrbs.shape[1]
nv = vOrbs.shape[1]
print 'core orbs:',cOrbs.shape
print 'act orbs:',aOrbs.shape
print 'vir orbs:',vOrbs.shape
assert nc+na+nv == nb
# dump UNO
with open(fname+'_uno.molden','w') as thefile:
molden.header(mol,thefile)
molden.orbital_coeff(mol,thefile,coeff)
#=====================
# Population analysis
#=====================
from pyscf import lo
aux = lo.orth_ao(mol,method='meta_lowdin')
#clmo = ulocal.scdm(cOrbs,ova,aux)
#almo = ulocal.scdm(aOrbs,ova,aux)
clmo = cOrbs
almo = aOrbs
ierr,uc = pmloc.loc(mol,clmo)
ierr,ua = pmloc.loc(mol,almo)
clmo = clmo.dot(uc)
almo = almo.dot(ua)
vlmo = ulocal.scdm(vOrbs,ova,aux)
# P-SORT
mo_c,n_c,e_c = ulocal.psort(ova,fav,pT,clmo)
mo_o,n_o,e_o = ulocal.psort(ova,fav,pT,almo)
mo_v,n_v,e_v = ulocal.psort(ova,fav,pT,vlmo)
lmo = numpy.hstack((mo_c,mo_o,mo_v)).copy()
enorb = numpy.hstack([e_c,e_o,e_v])
occ = numpy.hstack([n_c,n_o,n_v])
# CHECK
diff = reduce(numpy.dot,(lmo.T,ova,lmo)) - numpy.identity(nb)
print 'diff=',numpy.linalg.norm(diff)
ulocal.lowdinPop(mol,lmo,ova,enorb,occ)
ulocal.dumpLMO(mol,fname,lmo)
print 'nalpha,nbeta,mol.spin,nb:',\
nalpha,nbeta,mol.spin,nb
return mol,ova,fav,pT,nb,nalpha,nbeta,nc,na,nv,lmo,enorb,occ
def dumpAct(fname,info,actlst,base=1):
actlst2 = [i-base for i in actlst]
mol,ova,fav,pT,nb,nalpha,nbeta,nc,na,nv,lmo,enorb,occ = info
corb = set(range(nc))
aorb = set(range(nc,nc+na))
vorb = set(range(nc+na,nc+na+nv))
print '[dumpAct]'
print ' corb=',corb
print ' aorb=',aorb
print ' vorb=',vorb
sorb = set(actlst2)
rcorb = corb.difference(corb.intersection(sorb))
#assuming act in actlst
#raorb = aorb.difference(aorb.intersection(sorb))
rvorb = vorb.difference(vorb.intersection(sorb))
corb = list(rcorb)
aorb = list(sorb)
vorb = list(rvorb)
print ' corb=',corb
print ' aorb=',aorb
print ' vorb=',vorb
clmo = lmo[:,corb].copy()
almo = lmo[:,aorb].copy()
vlmo = lmo[:,vorb].copy()
ierr,ua = pmloc.loc(mol,almo)
almo = almo.dot(ua)
#>>> DUMP <<<#
# P-SORT
mo_c = clmo
mo_v = vlmo
e_c = enorb[corb].copy()
e_v = enorb[vorb].copy()
n_c = occ[corb].copy()
n_v = occ[vorb].copy()
mo_o,n_o,e_o = ulocal.psort(ova,fav,pT,almo)
lmo2 = numpy.hstack((mo_c,mo_o,mo_v))
enorb = numpy.hstack([e_c,e_o,e_v])
occ = numpy.hstack([n_c,n_o,n_v])
assert len(enorb)==nb
assert len(occ)==nb
# CHECK
diff = reduce(numpy.dot,(lmo2.T,ova,lmo2)) - numpy.identity(nb)
print 'diff=',numpy.linalg.norm(diff)
ulocal.lowdinPop(mol,lmo,ova,enorb,occ)
ulocal.dumpLMO(mol,fname+'_new',lmo2)
print 'nalpha,nbeta,mol.spin,nb:',\
nalpha,nbeta,mol.spin,nb
print 'diff(LMO2-LMO)=',numpy.linalg.norm(lmo2-lmo)
nc = len(e_c)
na = len(e_o)
nv = len(e_v)
assert na == len(actlst)
assert nc+na+nv == nb
print 'nc,na,nv,nb=',nc,na,nv,nb
return lmo2,nc,na,nv
if __name__ == '__main__':
fname = 'hs_bp86'
info = dumpLUNO(fname)
actlst = [117,118,119,120,125,126]+range(127,137)
dumpAct(fname,info,actlst,base=1)
| sunqm/pyscf | examples/local_orb/nlocal.py | Python | apache-2.0 | 6,720 |
import pytest
def pytest_runtest_setup(item):
if 'notfixed' in item.keywords:
pytest.skip("Skipping tests that are not fixed yet.")
| elkeschaper/tral | tral/conftest.py | Python | gpl-2.0 | 146 |
# Copyright (c) 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
import email
import email.errors
import imp
import os
import re
import sysconfig
import tempfile
import textwrap
import fixtures
import mock
import pkg_resources
import six
import testtools
from testtools import matchers
import virtualenv
import wheel.install
from pbr import git
from pbr import packaging
from pbr.tests import base
PBR_ROOT = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
class TestRepo(fixtures.Fixture):
"""A git repo for testing with.
Use of TempHomeDir with this fixture is strongly recommended as due to the
lack of config --local in older gits, it will write to the users global
configuration without TempHomeDir.
"""
def __init__(self, basedir):
super(TestRepo, self).__init__()
self._basedir = basedir
def setUp(self):
super(TestRepo, self).setUp()
base._run_cmd(['git', 'init', '.'], self._basedir)
base._config_git()
base._run_cmd(['git', 'add', '.'], self._basedir)
def commit(self, message_content='test commit'):
files = len(os.listdir(self._basedir))
path = self._basedir + '/%d' % files
open(path, 'wt').close()
base._run_cmd(['git', 'add', path], self._basedir)
base._run_cmd(['git', 'commit', '-m', message_content], self._basedir)
def uncommit(self):
base._run_cmd(['git', 'reset', '--hard', 'HEAD^'], self._basedir)
def tag(self, version):
base._run_cmd(
['git', 'tag', '-sm', 'test tag', version], self._basedir)
class GPGKeyFixture(fixtures.Fixture):
"""Creates a GPG key for testing.
It's recommended that this be used in concert with a unique home
directory.
"""
def setUp(self):
super(GPGKeyFixture, self).setUp()
tempdir = self.useFixture(fixtures.TempDir())
gnupg_version_re = re.compile('^gpg\s.*\s([\d+])\.([\d+])\.([\d+])')
gnupg_version = base._run_cmd(['gpg', '--version'], tempdir.path)
for line in gnupg_version[0].split('\n'):
gnupg_version = gnupg_version_re.match(line)
if gnupg_version:
gnupg_version = (int(gnupg_version.group(1)),
int(gnupg_version.group(2)),
int(gnupg_version.group(3)))
break
else:
if gnupg_version is None:
gnupg_version = (0, 0, 0)
config_file = tempdir.path + '/key-config'
f = open(config_file, 'wt')
try:
if gnupg_version[0] == 2 and gnupg_version[1] >= 1:
f.write("""
%no-protection
%transient-key
""")
f.write("""
%no-ask-passphrase
Key-Type: RSA
Name-Real: Example Key
Name-Comment: N/A
Name-Email: [email protected]
Expire-Date: 2d
Preferences: (setpref)
%commit
""")
finally:
f.close()
# Note that --quick-random (--debug-quick-random in GnuPG 2.x)
# does not have a corresponding preferences file setting and
# must be passed explicitly on the command line instead
if gnupg_version[0] == 1:
gnupg_random = '--quick-random'
elif gnupg_version[0] >= 2:
gnupg_random = '--debug-quick-random'
else:
gnupg_random = ''
base._run_cmd(
['gpg', '--gen-key', '--batch', gnupg_random, config_file],
tempdir.path)
class Venv(fixtures.Fixture):
"""Create a virtual environment for testing with.
:attr path: The path to the environment root.
:attr python: The path to the python binary in the environment.
"""
def __init__(self, reason, modules=(), pip_cmd=None):
"""Create a Venv fixture.
:param reason: A human readable string to bake into the venv
file path to aid diagnostics in the case of failures.
:param modules: A list of modules to install, defaults to latest
pip, wheel, and the working copy of PBR.
:attr pip_cmd: A list to override the default pip_cmd passed to
python for installing base packages.
"""
self._reason = reason
if modules == ():
pbr = 'file://%s#egg=pbr' % PBR_ROOT
modules = ['pip', 'wheel', pbr]
self.modules = modules
if pip_cmd is None:
self.pip_cmd = ['-m', 'pip', 'install']
else:
self.pip_cmd = pip_cmd
def _setUp(self):
path = self.useFixture(fixtures.TempDir()).path
virtualenv.create_environment(path, clear=True)
python = os.path.join(path, 'bin', 'python')
command = [python] + self.pip_cmd + ['-U']
if self.modules and len(self.modules) > 0:
command.extend(self.modules)
self.useFixture(base.CapturedSubprocess(
'mkvenv-' + self._reason, command))
self.addCleanup(delattr, self, 'path')
self.addCleanup(delattr, self, 'python')
self.path = path
self.python = python
return path, python
class CreatePackages(fixtures.Fixture):
"""Creates packages from dict with defaults
:param package_dirs: A dict of package name to directory strings
{'pkg_a': '/tmp/path/to/tmp/pkg_a', 'pkg_b': '/tmp/path/to/tmp/pkg_b'}
"""
defaults = {
'setup.py': textwrap.dedent(six.u("""\
#!/usr/bin/env python
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
)
""")),
'setup.cfg': textwrap.dedent(six.u("""\
[metadata]
name = {pkg_name}
"""))
}
def __init__(self, packages):
"""Creates packages from dict with defaults
:param packages: a dict where the keys are the package name and a
value that is a second dict that may be empty, containing keys of
filenames and a string value of the contents.
{'package-a': {'requirements.txt': 'string', 'setup.cfg': 'string'}
"""
self.packages = packages
def _writeFile(self, directory, file_name, contents):
path = os.path.abspath(os.path.join(directory, file_name))
path_dir = os.path.dirname(path)
if not os.path.exists(path_dir):
if path_dir.startswith(directory):
os.makedirs(path_dir)
else:
raise ValueError
with open(path, 'wt') as f:
f.write(contents)
def _setUp(self):
tmpdir = self.useFixture(fixtures.TempDir()).path
package_dirs = {}
for pkg_name in self.packages:
pkg_path = os.path.join(tmpdir, pkg_name)
package_dirs[pkg_name] = pkg_path
os.mkdir(pkg_path)
for cf in ['setup.py', 'setup.cfg']:
if cf in self.packages[pkg_name]:
contents = self.packages[pkg_name].pop(cf)
else:
contents = self.defaults[cf].format(pkg_name=pkg_name)
self._writeFile(pkg_path, cf, contents)
for cf in self.packages[pkg_name]:
self._writeFile(pkg_path, cf, self.packages[pkg_name][cf])
self.useFixture(TestRepo(pkg_path)).commit()
self.addCleanup(delattr, self, 'package_dirs')
self.package_dirs = package_dirs
return package_dirs
class TestPackagingInGitRepoWithCommit(base.BaseTestCase):
scenarios = [
('preversioned', dict(preversioned=True)),
('postversioned', dict(preversioned=False)),
]
def setUp(self):
super(TestPackagingInGitRepoWithCommit, self).setUp()
repo = self.useFixture(TestRepo(self.package_dir))
repo.commit()
def test_authors(self):
self.run_setup('sdist', allow_fail=False)
# One commit, something should be in the authors list
with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f:
body = f.read()
self.assertNotEqual(body, '')
def test_changelog(self):
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
# One commit, something should be in the ChangeLog list
self.assertNotEqual(body, '')
def test_manifest_exclude_honoured(self):
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(
self.package_dir,
'pbr_testpackage.egg-info/SOURCES.txt'), 'r') as f:
body = f.read()
self.assertThat(
body, matchers.Not(matchers.Contains('pbr_testpackage/extra.py')))
self.assertThat(body, matchers.Contains('pbr_testpackage/__init__.py'))
def test_install_writes_changelog(self):
stdout, _, _ = self.run_setup(
'install', '--root', self.temp_dir + 'installed',
allow_fail=False)
self.expectThat(stdout, matchers.Contains('Generating ChangeLog'))
class TestPackagingInGitRepoWithoutCommit(base.BaseTestCase):
def setUp(self):
super(TestPackagingInGitRepoWithoutCommit, self).setUp()
self.useFixture(TestRepo(self.package_dir))
self.run_setup('sdist', allow_fail=False)
def test_authors(self):
# No commits, no authors in list
with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f:
body = f.read()
self.assertEqual(body, '\n')
def test_changelog(self):
# No commits, nothing should be in the ChangeLog list
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
self.assertEqual(body, 'CHANGES\n=======\n\n')
class TestPackagingWheels(base.BaseTestCase):
def setUp(self):
super(TestPackagingWheels, self).setUp()
self.useFixture(TestRepo(self.package_dir))
# Build the wheel
self.run_setup('bdist_wheel', allow_fail=False)
# Slowly construct the path to the generated whl
dist_dir = os.path.join(self.package_dir, 'dist')
relative_wheel_filename = os.listdir(dist_dir)[0]
absolute_wheel_filename = os.path.join(
dist_dir, relative_wheel_filename)
wheel_file = wheel.install.WheelFile(absolute_wheel_filename)
wheel_name = wheel_file.parsed_filename.group('namever')
# Create a directory path to unpack the wheel to
self.extracted_wheel_dir = os.path.join(dist_dir, wheel_name)
# Extract the wheel contents to the directory we just created
wheel_file.zipfile.extractall(self.extracted_wheel_dir)
wheel_file.zipfile.close()
def test_data_directory_has_wsgi_scripts(self):
# Build the path to the scripts directory
scripts_dir = os.path.join(
self.extracted_wheel_dir, 'pbr_testpackage-0.0.data/scripts')
self.assertTrue(os.path.exists(scripts_dir))
scripts = os.listdir(scripts_dir)
self.assertIn('pbr_test_wsgi', scripts)
self.assertIn('pbr_test_wsgi_with_class', scripts)
self.assertNotIn('pbr_test_cmd', scripts)
self.assertNotIn('pbr_test_cmd_with_class', scripts)
def test_generates_c_extensions(self):
built_package_dir = os.path.join(
self.extracted_wheel_dir, 'pbr_testpackage')
static_object_filename = 'testext.so'
soabi = get_soabi()
if soabi:
static_object_filename = 'testext.{0}.so'.format(soabi)
static_object_path = os.path.join(
built_package_dir, static_object_filename)
self.assertTrue(os.path.exists(built_package_dir))
self.assertTrue(os.path.exists(static_object_path))
class TestPackagingHelpers(testtools.TestCase):
def test_generate_script(self):
group = 'console_scripts'
entry_point = pkg_resources.EntryPoint(
name='test-ep',
module_name='pbr.packaging',
attrs=('LocalInstallScripts',))
header = '#!/usr/bin/env fake-header\n'
template = ('%(group)s %(module_name)s %(import_target)s '
'%(invoke_target)s')
generated_script = packaging.generate_script(
group, entry_point, header, template)
expected_script = (
'#!/usr/bin/env fake-header\nconsole_scripts pbr.packaging '
'LocalInstallScripts LocalInstallScripts'
)
self.assertEqual(expected_script, generated_script)
def test_generate_script_validates_expectations(self):
group = 'console_scripts'
entry_point = pkg_resources.EntryPoint(
name='test-ep',
module_name='pbr.packaging')
header = '#!/usr/bin/env fake-header\n'
template = ('%(group)s %(module_name)s %(import_target)s '
'%(invoke_target)s')
self.assertRaises(
ValueError, packaging.generate_script, group, entry_point, header,
template)
entry_point = pkg_resources.EntryPoint(
name='test-ep',
module_name='pbr.packaging',
attrs=('attr1', 'attr2', 'attr3'))
self.assertRaises(
ValueError, packaging.generate_script, group, entry_point, header,
template)
class TestPackagingInPlainDirectory(base.BaseTestCase):
def setUp(self):
super(TestPackagingInPlainDirectory, self).setUp()
def test_authors(self):
self.run_setup('sdist', allow_fail=False)
# Not a git repo, no AUTHORS file created
filename = os.path.join(self.package_dir, 'AUTHORS')
self.assertFalse(os.path.exists(filename))
def test_changelog(self):
self.run_setup('sdist', allow_fail=False)
# Not a git repo, no ChangeLog created
filename = os.path.join(self.package_dir, 'ChangeLog')
self.assertFalse(os.path.exists(filename))
def test_install_no_ChangeLog(self):
stdout, _, _ = self.run_setup(
'install', '--root', self.temp_dir + 'installed',
allow_fail=False)
self.expectThat(
stdout, matchers.Not(matchers.Contains('Generating ChangeLog')))
class TestPresenceOfGit(base.BaseTestCase):
def testGitIsInstalled(self):
with mock.patch.object(git,
'_run_shell_command') as _command:
_command.return_value = 'git version 1.8.4.1'
self.assertEqual(True, git._git_is_installed())
def testGitIsNotInstalled(self):
with mock.patch.object(git,
'_run_shell_command') as _command:
_command.side_effect = OSError
self.assertEqual(False, git._git_is_installed())
class TestNestedRequirements(base.BaseTestCase):
def test_nested_requirement(self):
tempdir = tempfile.mkdtemp()
requirements = os.path.join(tempdir, 'requirements.txt')
nested = os.path.join(tempdir, 'nested.txt')
with open(requirements, 'w') as f:
f.write('-r ' + nested)
with open(nested, 'w') as f:
f.write('pbr')
result = packaging.parse_requirements([requirements])
self.assertEqual(result, ['pbr'])
class TestVersions(base.BaseTestCase):
scenarios = [
('preversioned', dict(preversioned=True)),
('postversioned', dict(preversioned=False)),
]
def setUp(self):
super(TestVersions, self).setUp()
self.repo = self.useFixture(TestRepo(self.package_dir))
self.useFixture(GPGKeyFixture())
self.useFixture(base.DiveDir(self.package_dir))
def test_email_parsing_errors_are_handled(self):
mocked_open = mock.mock_open()
with mock.patch('pbr.packaging.open', mocked_open):
with mock.patch('email.message_from_file') as message_from_file:
message_from_file.side_effect = [
email.errors.MessageError('Test'),
{'Name': 'pbr_testpackage'}]
version = packaging._get_version_from_pkg_metadata(
'pbr_testpackage')
self.assertTrue(message_from_file.called)
self.assertIsNone(version)
def test_capitalized_headers(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('Sem-Ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_capitalized_headers_partial(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('Sem-ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_tagged_version_has_tag_version(self):
self.repo.commit()
self.repo.tag('1.2.3')
version = packaging._get_version_from_git('1.2.3')
self.assertEqual('1.2.3', version)
def test_non_canonical_tagged_version_bump(self):
self.repo.commit()
self.repo.tag('1.4')
self.repo.commit('Sem-Ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_untagged_version_has_dev_version_postversion(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.dev1'))
def test_untagged_pre_release_has_pre_dev_version_postversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.3.0a2.dev1'))
def test_untagged_version_minor_bump(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: deprecation')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.3.0.dev1'))
def test_untagged_version_major_bump(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_untagged_version_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
version = packaging._get_version_from_git('1.2.5')
self.assertThat(version, matchers.StartsWith('1.2.5.dev1'))
def test_untagged_version_after_pre_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git('1.2.5')
self.assertThat(version, matchers.StartsWith('1.2.5.dev1'))
def test_untagged_version_after_rc_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git('1.2.3')
self.assertThat(version, matchers.StartsWith('1.2.3.0a2.dev1'))
def test_preversion_too_low_simple(self):
# That is, the target version is either already released or not high
# enough for the semver requirements given api breaks etc.
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
# Note that we can't target 1.2.3 anymore - with 1.2.3 released we
# need to be working on 1.2.4.
err = self.assertRaises(
ValueError, packaging._get_version_from_git, '1.2.3')
self.assertThat(err.args[0], matchers.StartsWith('git history'))
def test_preversion_too_low_semver_headers(self):
# That is, the target version is either already released or not high
# enough for the semver requirements given api breaks etc.
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: feature')
# Note that we can't target 1.2.4, the feature header means we need
# to be working on 1.3.0 or above.
err = self.assertRaises(
ValueError, packaging._get_version_from_git, '1.2.4')
self.assertThat(err.args[0], matchers.StartsWith('git history'))
def test_get_kwargs_corner_cases(self):
# No tags:
git_dir = self.repo._basedir + '/.git'
get_kwargs = lambda tag: packaging._get_increment_kwargs(git_dir, tag)
def _check_combinations(tag):
self.repo.commit()
self.assertEqual(dict(), get_kwargs(tag))
self.repo.commit('sem-ver: bugfix')
self.assertEqual(dict(), get_kwargs(tag))
self.repo.commit('sem-ver: feature')
self.assertEqual(dict(minor=True), get_kwargs(tag))
self.repo.uncommit()
self.repo.commit('sem-ver: deprecation')
self.assertEqual(dict(minor=True), get_kwargs(tag))
self.repo.uncommit()
self.repo.commit('sem-ver: api-break')
self.assertEqual(dict(major=True), get_kwargs(tag))
self.repo.commit('sem-ver: deprecation')
self.assertEqual(dict(major=True, minor=True), get_kwargs(tag))
_check_combinations('')
self.repo.tag('1.2.3')
_check_combinations('1.2.3')
def test_invalid_tag_ignored(self):
# Fix for bug 1356784 - we treated any tag as a version, not just those
# that are valid versions.
self.repo.commit()
self.repo.tag('1')
self.repo.commit()
# when the tree is tagged and its wrong:
self.repo.tag('badver')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.0.1.dev1'))
# When the tree isn't tagged, we also fall through.
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.0.1.dev2'))
# We don't fall through x.y versions
self.repo.commit()
self.repo.tag('1.2')
self.repo.commit()
self.repo.tag('badver2')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.1.dev1'))
# Or x.y.z versions
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
self.repo.tag('badver3')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.dev1'))
# Or alpha/beta/pre versions
self.repo.commit()
self.repo.tag('1.2.4.0a1')
self.repo.commit()
self.repo.tag('badver4')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.0a2.dev1'))
# Non-release related tags are ignored.
self.repo.commit()
self.repo.tag('2')
self.repo.commit()
self.repo.tag('non-release-tag/2014.12.16-1')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.1.dev1'))
def test_valid_tag_honoured(self):
# Fix for bug 1370608 - we converted any target into a 'dev version'
# even if there was a distance of 0 - indicating that we were on the
# tag itself.
self.repo.commit()
self.repo.tag('1.3.0.0a1')
version = packaging._get_version_from_git()
self.assertEqual('1.3.0.0a1', version)
def test_skip_write_git_changelog(self):
# Fix for bug 1467440
self.repo.commit()
self.repo.tag('1.2.3')
os.environ['SKIP_WRITE_GIT_CHANGELOG'] = '1'
version = packaging._get_version_from_git('1.2.3')
self.assertEqual('1.2.3', version)
def tearDown(self):
super(TestVersions, self).tearDown()
os.environ.pop('SKIP_WRITE_GIT_CHANGELOG', None)
class TestRequirementParsing(base.BaseTestCase):
def test_requirement_parsing(self):
pkgs = {
'test_reqparse':
{
'requirements.txt': textwrap.dedent("""\
bar
quux<1.0; python_version=='2.6'
requests-aws>=0.1.4 # BSD License (3 clause)
Routes>=1.12.3,!=2.0,!=2.1;python_version=='2.7'
requests-kerberos>=0.6;python_version=='2.7' # MIT
"""),
'setup.cfg': textwrap.dedent("""\
[metadata]
name = test_reqparse
[extras]
test =
foo
baz>3.2 :python_version=='2.7' # MIT
bar>3.3 :python_version=='2.7' # MIT # Apache
""")},
}
pkg_dirs = self.useFixture(CreatePackages(pkgs)).package_dirs
pkg_dir = pkg_dirs['test_reqparse']
# pkg_resources.split_sections uses None as the title of an
# anonymous section instead of the empty string. Weird.
expected_requirements = {
None: ['bar', 'requests-aws>=0.1.4'],
":(python_version=='2.6')": ['quux<1.0'],
":(python_version=='2.7')": ['Routes>=1.12.3,!=2.0,!=2.1',
'requests-kerberos>=0.6'],
'test': ['foo'],
"test:(python_version=='2.7')": ['baz>3.2', 'bar>3.3']
}
venv = self.useFixture(Venv('reqParse'))
bin_python = venv.python
# Two things are tested by this
# 1) pbr properly parses markers from requiremnts.txt and setup.cfg
# 2) bdist_wheel causes pbr to not evaluate markers
self._run_cmd(bin_python, ('setup.py', 'bdist_wheel'),
allow_fail=False, cwd=pkg_dir)
egg_info = os.path.join(pkg_dir, 'test_reqparse.egg-info')
requires_txt = os.path.join(egg_info, 'requires.txt')
with open(requires_txt, 'rt') as requires:
generated_requirements = dict(
pkg_resources.split_sections(requires))
self.assertEqual(expected_requirements, generated_requirements)
def get_soabi():
soabi = None
try:
soabi = sysconfig.get_config_var('SOABI')
except IOError:
pass
if soabi is None and 'pypy' in sysconfig.get_scheme_names():
# NOTE(sigmavirus24): PyPy only added support for the SOABI config var
# to sysconfig in 2015. That was well after 2.2.1 was published in the
# Ubuntu 14.04 archive.
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.pypy') and suffix.endswith('.so'):
soabi = suffix.split('.')[1]
break
return soabi
| aasoliz/Bitcoin-Statistics | venv/lib/python2.7/site-packages/pbr/tests/test_packaging.py | Python | bsd-3-clause | 28,849 |
import wave
from frostsynth import get_srate, timeslice, interlace
from frostsynth.dump import iter_dumps
def save(source, filename, duration=None):
f = wave.open(filename,"wb")
f.setnchannels(1)
f.setsampwidth(2)
f.setframerate(int(get_srate()))
try:
if duration is None:
f.writeframes(iter_dumps(source, f.getsampwidth()))
else:
f.writeframes(iter_dumps(timeslice(source, duration), f.getsampwidth()))
finally:
f.close()
def stereo_save(left, right, filename):
source = interlace(left, right)
f = wave.open(filename,"wb")
f.setnchannels(2)
f.setsampwidth(2)
f.setframerate(int(get_srate()))
try:
f.writeframes(iter_dumps(source, f.getsampwidth()))
finally:
f.close()
| frostburn/frostsynth | frostsynth/waveout.py | Python | mit | 789 |
#!/usr/bin/env python
#
# An example on how to read the YAML output from etisnoop
# Pipe etisnoop to this script
#
# License: public domain
import sys
import yaml
for frame in yaml.load_all(sys.stdin):
print("FIGs in frame {}".format(frame['Frame']))
for fib in frame['LIDATA']['FIC']:
if fib['FIGs']:
for fig in fib['FIGs']:
print(" FIG " + fig['FIG'])
| Opendigitalradio/etisnoop | yamlexample.py | Python | gpl-3.0 | 401 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
ogrinfo.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import string
import re
try:
from osgeo import ogr
ogrAvailable = True
except:
ogrAvailable = False
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from processing.parameters.ParameterVector import ParameterVector
from processing.outputs.OutputHTML import OutputHTML
from OgrAlgorithm import OgrAlgorithm
class OgrInfo(OgrAlgorithm):
OUTPUT = 'OUTPUT'
INPUT_LAYER = 'INPUT_LAYER'
def defineCharacteristics(self):
self.name = 'Information'
self.group = '[OGR] Miscellaneous'
self.addParameter(ParameterVector(self.INPUT_LAYER, 'Input layer',
[ParameterVector.VECTOR_TYPE_ANY], False))
self.addOutput(OutputHTML(self.OUTPUT, 'Layer information'))
def commandLineName(self):
return "gdalogr:vectorinfo"
def processAlgorithm(self, progress):
input = self.getParameterValue(self.INPUT_LAYER)
ogrLayer = self.ogrConnectionString(input)
output = self.getOutputValue(self.OUTPUT)
self.ogrinfo(ogrLayer)
f = open(output, 'w')
f.write('<pre>' + self.info + '</pre>')
f.close()
def out(self, text):
self.info = self.info + text + '\n'
def ogrinfo(self, pszDataSource):
bVerbose = True
bSummaryOnly = True
self.info = ''
if not ogrAvailable:
self.info = 'OGR bindings not installed'
return
qDebug("Opening data source '%s'" % pszDataSource)
poDS = ogr.Open(pszDataSource, False)
if poDS is None:
self.info = self.failure(pszDataSource)
return
poDriver = poDS.GetDriver()
if bVerbose:
self.out("INFO: Open of `%s'\n using driver `%s' successful."
% (pszDataSource, poDriver.GetName()))
poDS_Name = poDS.GetName()
if str(type(pszDataSource)) == "<type 'unicode'>" \
and str(type(poDS_Name)) == "<type 'str'>":
poDS_Name = unicode(poDS_Name, 'utf8')
if bVerbose and pszDataSource != poDS_Name:
self.out("INFO: Internal data source name '%s'\n \
different from user name '%s'." \
% (poDS_Name, pszDataSource))
# --------------------------------------------------------------------
# Process each data source layer.
# --------------------------------------------------------------------
for iLayer in range(poDS.GetLayerCount()):
poLayer = poDS.GetLayer(iLayer)
if poLayer is None:
self.out("FAILURE: Couldn't fetch advertised layer %d!"
% iLayer)
return 1
self.ReportOnLayer(poLayer)
def ReportOnLayer(
self,
poLayer,
pszWHERE=None,
poSpatialFilter=None,
):
bVerbose = True
poDefn = poLayer.GetLayerDefn()
# --------------------------------------------------------------------
# Set filters if provided.
# --------------------------------------------------------------------
if pszWHERE is not None:
if poLayer.SetAttributeFilter(pszWHERE) != 0:
self.out('FAILURE: SetAttributeFilter(%s) failed.' % pszWHERE)
return
if poSpatialFilter is not None:
poLayer.SetSpatialFilter(poSpatialFilter)
# --------------------------------------------------------------------
# Report various overall information.
# --------------------------------------------------------------------
self.out('')
self.out('Layer name: %s' % poDefn.GetName())
if bVerbose:
self.out('Geometry: %s'
% ogr.GeometryTypeToName(poDefn.GetGeomType()))
self.out('Feature Count: %d' % poLayer.GetFeatureCount())
oExt = poLayer.GetExtent(True, can_return_null=True)
if oExt is not None:
self.out('Extent: (%f, %f) - (%f, %f)' % (oExt[0], oExt[1],
oExt[2], oExt[3]))
if poLayer.GetSpatialRef() is None:
pszWKT = '(unknown)'
else:
pszWKT = poLayer.GetSpatialRef().ExportToPrettyWkt()
self.out('Layer SRS WKT:\n%s' % pszWKT)
if len(poLayer.GetFIDColumn()) > 0:
self.out('FID Column = %s' % poLayer.GetFIDColumn())
if len(poLayer.GetGeometryColumn()) > 0:
self.out('Geometry Column = %s' % poLayer.GetGeometryColumn())
for iAttr in range(poDefn.GetFieldCount()):
poField = poDefn.GetFieldDefn(iAttr)
self.out('%s: %s (%d.%d)' % (poField.GetNameRef(),
poField.GetFieldTypeName(poField.GetType()),
poField.GetWidth(), poField.GetPrecision()))
| luca76/QGIS | python/plugins/processing/algs/gdal/ogrinfo.py | Python | gpl-2.0 | 6,032 |
import os.path
import logging
import subprocess
def get_hash(short=False):
if not os.path.exists('.git'):
logging.warning('*** Not working in a git repository ***')
return 'none'
git_command = ['git', 'rev-parse']
if short:
git_command += ['--short']
git_command += ['HEAD']
if uncommitted_changes():
logging.warning("*** Uncommitted changes present - Build container version might be outdated ***")
return subprocess.check_output(git_command).decode().strip()
def uncommitted_changes():
"""Return True is there are uncommitted changes."""
return subprocess.call(['git', 'diff-index', '--quiet', 'HEAD', '--']) != 0
| Stratoscale/skipper | skipper/git.py | Python | apache-2.0 | 687 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from neutron.db import db_base_plugin_v2
from neutron.db import subnet_service_type_db_models
from neutron.extensions import subnet_service_types
from neutron.tests.unit.db import test_db_base_plugin_v2
class SubnetServiceTypesExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
def get_extended_resources(self, version):
extension = subnet_service_types.Subnet_service_types()
return extension.get_extended_resources(version)
class SubnetServiceTypesExtensionTestPlugin(
db_base_plugin_v2.NeutronDbPluginV2,
subnet_service_type_db_models.SubnetServiceTypeMixin):
"""Test plugin to mixin the subnet service_types extension.
"""
supported_extension_aliases = ["subnet-service-types"]
class SubnetServiceTypesExtensionTestCase(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
"""Test API extension subnet_service_types attributes.
"""
CIDRS = ['10.0.0.0/8', '20.0.0.0/8', '30.0.0.0/8']
IP_VERSION = 4
def setUp(self):
plugin = ('neutron.tests.unit.extensions.test_subnet_service_types.' +
'SubnetServiceTypesExtensionTestPlugin')
ext_mgr = SubnetServiceTypesExtensionManager()
super(SubnetServiceTypesExtensionTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def _create_service_subnet(self, service_types=None, cidr=None,
network=None, enable_dhcp=False):
if not network:
with self.network() as network:
pass
network = network['network']
if not cidr:
cidr = self.CIDRS[0]
args = {'net_id': network['id'],
'tenant_id': network['tenant_id'],
'cidr': cidr,
'ip_version': self.IP_VERSION,
'enable_dhcp': enable_dhcp}
if service_types:
args['service_types'] = service_types
return self._create_subnet(self.fmt, **args)
def _test_create_subnet(self, service_types, expect_fail=False):
res = self._create_service_subnet(service_types)
if expect_fail:
self.assertEqual(webob.exc.HTTPClientError.code,
res.status_int)
else:
subnet = self.deserialize('json', res)
subnet = subnet['subnet']
self.assertEqual(len(service_types),
len(subnet['service_types']))
for service in service_types:
self.assertIn(service, subnet['service_types'])
def test_create_subnet_blank_type(self):
self._test_create_subnet([])
def test_create_subnet_bar_type(self):
self._test_create_subnet(['network:bar'])
def test_create_subnet_foo_type(self):
self._test_create_subnet(['compute:foo'])
def test_create_subnet_bar_and_foo_type(self):
self._test_create_subnet(['network:bar', 'compute:foo'])
def test_create_subnet_invalid_type(self):
self._test_create_subnet(['foo'], expect_fail=True)
self._test_create_subnet([1], expect_fail=True)
def test_create_subnet_no_type(self):
res = self._create_service_subnet()
subnet = self.deserialize('json', res)
subnet = subnet['subnet']
self.assertFalse(subnet['service_types'])
def _test_update_subnet(self, subnet, service_types, fail_code=None):
data = {'subnet': {'service_types': service_types}}
req = self.new_update_request('subnets', data, subnet['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
if fail_code is not None:
self.assertEqual(fail_code,
res['NeutronError']['type'])
else:
subnet = res['subnet']
self.assertEqual(len(service_types),
len(subnet['service_types']))
for service in service_types:
self.assertIn(service, subnet['service_types'])
def test_update_subnet_zero_to_one(self):
service_types = ['network:foo']
# Create a subnet with no service type
res = self._create_service_subnet()
subnet = self.deserialize('json', res)['subnet']
# Update it with a single service type
self._test_update_subnet(subnet, service_types)
def test_update_subnet_one_to_two(self):
service_types = ['network:foo']
# Create a subnet with one service type
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with two service types
service_types.append('compute:bar')
self._test_update_subnet(subnet, service_types)
def test_update_subnet_two_to_one(self):
service_types = ['network:foo', 'compute:bar']
# Create a subnet with two service types
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with one service type
service_types = ['network:foo']
self._test_update_subnet(subnet, service_types)
def test_update_subnet_one_to_zero(self):
service_types = ['network:foo']
# Create a subnet with one service type
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with zero service types
service_types = []
self._test_update_subnet(subnet, service_types)
def test_update_subnet_invalid_type(self):
# Create a subnet with no service type
res = self._create_service_subnet()
subnet = self.deserialize('json', res)['subnet']
# Update it with invalid service type(s)
self._test_update_subnet(subnet, ['foo'],
fail_code='InvalidSubnetServiceType')
self._test_update_subnet(subnet, [2],
fail_code='InvalidInputSubnetServiceType')
def _assert_port_res(self, port, service_type, subnet, fallback,
error='IpAddressGenerationFailureNoMatchingSubnet'):
res = self.deserialize('json', port)
if fallback:
port = res['port']
self.assertEqual(1, len(port['fixed_ips']))
self.assertEqual(service_type, port['device_owner'])
self.assertEqual(subnet['id'], port['fixed_ips'][0]['subnet_id'])
else:
self.assertEqual(error, res['NeutronError']['type'])
def test_create_port_with_matching_service_type(self):
with self.network() as network:
pass
matching_type = 'network:foo'
non_matching_type = 'network:bar'
# Create a subnet with no service types
self._create_service_subnet(network=network)
# Create a subnet with a non-matching service type
self._create_service_subnet([non_matching_type],
cidr=self.CIDRS[2],
network=network)
# Create a subnet with a service type to match the port device owner
res = self._create_service_subnet([matching_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Create a port with device owner matching the correct service subnet
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=matching_type)
self._assert_port_res(port, matching_type, service_subnet, True)
def test_create_port_without_matching_service_type(self, fallback=True):
with self.network() as network:
pass
subnet = ''
matching_type = 'compute:foo'
non_matching_type = 'network:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a non-matching service type
self._create_service_subnet([non_matching_type],
cidr=self.CIDRS[1],
network=network)
# Create a port with device owner not matching the service subnet
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=matching_type)
self._assert_port_res(port, matching_type, subnet, fallback)
def test_create_port_without_matching_service_type_no_fallback(self):
self.test_create_port_without_matching_service_type(fallback=False)
def test_create_port_no_device_owner(self, fallback=True):
with self.network() as network:
pass
subnet = ''
service_type = 'compute:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a service_type
self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
# Create a port without a device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'])
self._assert_port_res(port, '', subnet, fallback)
def test_create_port_no_device_owner_no_fallback(self):
self.test_create_port_no_device_owner(fallback=False)
def test_create_port_exhausted_subnet(self, fallback=True):
with self.network() as network:
pass
subnet = ''
service_type = 'compute:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a service_type
res = self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Update the service subnet with empty allocation pools
data = {'subnet': {'allocation_pools': []}}
req = self.new_update_request('subnets', data, service_subnet['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
# Create a port with a matching device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=service_type)
self._assert_port_res(port, service_type, subnet, fallback,
error='IpAddressGenerationFailure')
def test_create_port_exhausted_subnet_no_fallback(self):
self.test_create_port_exhausted_subnet(fallback=False)
def test_create_dhcp_port_compute_subnet(self, enable_dhcp=True):
with self.network() as network:
pass
res = self._create_service_subnet(['compute:nova'],
network=network,
enable_dhcp=enable_dhcp)
subnet = self.deserialize('json', res)['subnet']
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
fixed_ips=[{'subnet_id': subnet['id']}],
device_owner='network:dhcp')
self._assert_port_res(port, 'network:dhcp', subnet, enable_dhcp)
def test_create_dhcp_port_compute_subnet_no_dhcp(self):
self.test_create_dhcp_port_compute_subnet(enable_dhcp=False)
def test_update_port_fixed_ips(self):
with self.network() as network:
pass
service_type = 'compute:foo'
# Create a subnet with a service_type
res = self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Create a port with a matching device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=service_type)
port = self.deserialize('json', port)['port']
# Update the port's fixed_ips. It's ok to reuse the same IP it already
# has.
ip_address = port['fixed_ips'][0]['ip_address']
data = {'port': {'fixed_ips': [{'subnet_id': service_subnet['id'],
'ip_address': ip_address}]}}
# self._update will fail with a MismatchError if the update cannot be
# applied
port = self._update('ports', port['id'], data)
class SubnetServiceTypesExtensionTestCasev6(
SubnetServiceTypesExtensionTestCase):
CIDRS = ['2001:db8:2::/64', '2001:db8:3::/64', '2001:db8:4::/64']
IP_VERSION = 6
| eayunstack/neutron | neutron/tests/unit/extensions/test_subnet_service_types.py | Python | apache-2.0 | 14,519 |
#!/usr/bin/env python
import socket
import binascii
import time
HOST = '219.223.252.170'
PORT = 6000
BUFSIZE = 307
ADDR = (HOST, PORT)
# TCP socket
try:
tcpCliSoc = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
tcpCliSoc.connect(ADDR)
except socket.error, msg:
print 'Socket Error! Create, bind, listen' + str(msg[0] + ', Error message:' + msg[1])
sys.exit()
except Exception, errcode:
if errcode[0] == 10035:
print 'Error Code:10035'
if errcode[0] == 'timed out':
print 'Time out'
data = binascii.a2b_hex("534817035543130602145022002500250044339B194630303030303030303030303030303030303030303030303030303030303030303030303030303030303030309999993F303030303030303030303030303030303030303030303030303030303030303030303030303030303030303000401CC6303030303030303030303030303030303030303030303030303030303030303030303030303030303030303000401CC630303030303030303030303030303030303030303030303030303030303030303030303030303030303030300000305858585850393231392E3232332E3235322E313635373131392E3134352E382E31303700004F3436303030003531323334364C000000000080844300509A44CDCCCC3D3030303030300002353138333139343232323534303030304565F1")
sendCount = 0;
while 1:
sendCount += 1
print 'Send Successfully:', sendCount
time.sleep(1)
try:
tcpCliSoc.send(data)
except socket.error, msg:
print 'Socket Error! Create, bind, listen' + str(msg[0] + ', Error message:' + msg[1])
sys.exit()
except Exception, errcode:
if errcode[0] == 10035:
print 'Error Code:10035'
if errcode[0] == 'timed out':
print 'Time out'
tcpCliSoc.close()
| lijingpeng/python | script/HTTCPClient.py | Python | gpl-2.0 | 1,648 |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import re
import sys
from spiderfetch import spider
# how many bytes of the file to download before doing a type check
HEADER_SIZE_HTML = 1024
HEADER_SIZE_URLS = 100 * 1024
# ref: file-4.23.tar.gz/magic/Magdir/sgml
html_regex = "(?ims).*<\s*(!DOCTYPE html|html|head|title|body)"
_html_re = re.compile(html_regex)
class WrongFileTypeError(Exception):
pass
def is_html(data):
if data and re.match(_html_re, data):
return True
def has_urls(data, url=None):
if data:
try:
next(spider.findall(data, url))
return True
except StopIteration:
pass
if __name__ == "__main__":
try:
data = open(sys.argv[1], 'r').read()
print("is_html: %s" % is_html(data))
print("has_urls: %s" % has_urls(data))
except IndexError:
print("Usage: %s <url>" % sys.argv[0])
| numerodix/spiderfetch | spiderfetch/filetype.py | Python | gpl-2.0 | 968 |
"""Implementation of a stack data structure."""
from linked_list import LinkedList
class Stack(object):
"""Set properties and methods of Stack class."""
def __init__(self, inbound_data=None):
"""Create new Stack composing from LinkedList."""
self._linked_list = LinkedList(inbound_data)
self.head = self._linked_list.head
def push(self, val):
"""Return a new node on the Stack using LinkedList.push."""
self._linked_list.push(val)
self.head = self._linked_list.head
def pop(self):
"""Return pop method for LinkedList on Stack."""
self.head = self._linked_list.head
return self._linked_list.pop()
def __len__(self):
"""Return the size of the Stack, overwriting len method."""
return len(self._linked_list)
| CaHudson94/data-structures | src/Completed/stack.py | Python | mit | 819 |
import subprocess
import os
import errno
import time
import shutil
class img_mount:
def __init__(self, img):
self.__nautilus_state(False)
self._image = img
self._loopback = self.__create_loopback(self._image)
print('created loopback {0}'.format(self._loopback))
self._loopmap = self.__map_loopback(self._loopback)
print('created loopmap {0}'.format(self._loopmap))
self._filesystems = self.__get_filesystem(self._loopmap)
print('created filesystems {0}'.format(self._filesystems))
self._disks = self.__get_disks(self._filesystems)
print('disks {0}'.format(self._disks))
self.__mount(self._loopmap, self._disks)
self.__nautilus_state(True)
def close(self):
print('detached loopback {0}'.format(self._loopback))
self.__umount(self._disks)
self.__detach_loopback(self._loopback)
def write_config(self, config):
print(config)
shutil.copyfile('{0}/config.txt'.format(self._disks[0]),
'{0}/config.back'.format(self._disks[0]))
config_txt = open('{0}/config.txt'.format(self._disks[0]),'w')
config_txt.write(config)
config_txt.close()
def write_file(self, f, path):
pass
def __get_loopback(self):
p = subprocess.Popen('losetup -f'.split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
if err:
print('get_loopback')
print(err.decode('utf-8'))
return output.decode('utf-8').strip()
def __create_loopback(self, img):
loopback = self.__get_loopback()
code = 'losetup {0} {1}'.format(loopback, img).split()
p = subprocess.Popen(code,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
if output:
print('creat_loopback')
print(output.decode('utf-8'))
if err:
print('creat_loopback')
print(err.decode('utf-8'))
return loopback
def __detach_loopback(self, loopback):
p = subprocess.Popen('kpartx -d {0}'.format(loopback).split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
if output:
print('detach_loopback kpartx')
print(output.decode('utf-8'))
if err:
print('detach_loopback kpartx')
print(err.decode('utf-8'))
p = subprocess.Popen('losetup -D {0}'.format(loopback).split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
if output:
print('detach_loopback losetup')
print(output.decode('utf-8'))
if err:
print('detach_loopback losetup')
print(err.decode('utf-8'))
return p.returncode
def __map_loopback(self, loopback):
p = subprocess.Popen('kpartx -v -a {0}'.format(loopback).split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
t = ()
for i in output.decode('utf-8').split('\n')[:-1]:
t = t + (i.split()[2],)
if output:
print('map_loopback')
print(output.decode('utf-8'))
if err:
print('map_loopback')
print(err.decode('utf-8'))
return t
def __get_filesystem(self, loopmap):
t = ()
for i in loopmap:
count = 0
while not os.path.islink('/dev/mapper/{0}'.format(i)):
time.sleep(0.01)
count+=1
if count > 100:
return ('error: timed out', 'error: timed out')
for i in loopmap:
code = 'file -sL /dev/mapper/{0}'.format(i).split()
p = subprocess.Popen(code,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
t = t + (output.decode('utf-8'),)
return t
def __get_disks(seld, filesystems):
t = ()
for i in filesystems:
label = ''
if 'UUID=' in i:
label = i.split('UUID=')[1].split()[0]
t = t + (label,)
print("found UUID= {0}".format(label))
if 'label:' in i:
label = i.split('label:')[1].strip('\"\n ')
t = t + (label,)
print("found label: {0}".format(label))
return t
def __nautilus_state(self, state):
nautilus = 'gsettings set org.gnome.desktop.media-handling automount-open {0}'.format(str(state).lower())
print(nautilus)
p = subprocess.Popen(nautilus.split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
if output:
print('nautlius state')
print(output.decode('utf-8'))
if err:
print('nautlius state')
print(err.decode('utf-8'))
time.sleep(1)
def __mount(self, loopmap, filesystems):
cwd = os.getcwd()+'/'
for lm,fs in zip(loopmap, filesystems):
mnt_point = cwd+fs
try:
os.makedirs(mnt_point)
except OSError as e:
if e.errno != errno.EEXIST:
raise
code = 'mount /dev/mapper/{0} {1}'.format(lm, mnt_point).split()
p = subprocess.Popen(code,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
if output:
print('mount')
print(output.decode('utf-8'))
if err:
print('mount')
print(err.decode('utf-8'))
def __umount(self, disks):
for d in disks:
directory = os.getcwd()+'/'+d
code = 'umount {0}'.format(directory).split()
p = subprocess.Popen(code,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for i in range(500):
try:
os.rmdir(directory)
break
except:
time.sleep(.01)
output, err = p.communicate()
if output:
print('umount')
print(output.decode('utf-8'))
if err:
print('umount')
print(err.decode('utf-8'))
if __name__ == '__main__':
i = '/home/michael/RasPiReader/src/images/2017-09-07-raspbian-stretch/2017-09-07-raspbian-stretch.img'
i = '/home/micheal/RasPiReader/src/images/2017-09-07-raspbian-stretch/2017-09-07-raspbian-stretch.img'
img = img_mount(i)
img.write_config('#writing config\n')
time.sleep(30)
img.close()
| CrazyBonze/RasPiReader | src/img_mount.py | Python | gpl-3.0 | 7,282 |
from time import time
from benchmark import Benchmark
from optimizer.optimizer import Optimizer
from optimizer.simulator import Simulator
from optimizer.evaluator import Evaluator
from extra.printer import pprint, BLUE
class EvaluatorPerf(Benchmark):
def __init__(self, plant, orderList, testNumber):
Benchmark.__init__(self, plant, orderList, testNumber)
self.prefix = "evaluator"
class EvaluatorMachinesPerf(EvaluatorPerf):
def __init__(self, plant, orderList, testNumber):
EvaluatorPerf.__init__(self, plant, orderList, testNumber)
self.testName = "NumberOfMachines"
self.startValue = 1
def bench(self):
recipes = []
for o in self.orderList.orders:
recipes.append(o.recipe.recipe[:])
o.recipe.recipe = []
machines = self.plant.machines[:]
self.plant.machines = []
i = self.startValue
while i <= len(machines):
pprint("PERF Number of machines = " + str(i), BLUE)
self.plant.machines = machines[:i]
for j, o in enumerate(self.orderList.orders):
o.recipe.recipe = recipes[j][:i]
optimizer = Optimizer(self.plant, self.orderList, Simulator(self.plant),
Evaluator(self.plant))
optimizer.populationSize = 2
optimizer.iterations = 2
optimizer.indivMutationRate = 0.5
optimizer.selectionRate = 0.5
optimizer.mutationRange = 10
schedules = optimizer.run()
evaluator = Evaluator(self.plant)
t = time()
evaluator.evaluate(schedules[0])
t = time() - t
self.addCairoPlotTime(t)
self.addGnuPlotTime(i, t)
i += 1
class EvaluatorOrdersPerf(EvaluatorPerf):
def __init__(self, plant, orderList, testNumber):
EvaluatorPerf.__init__(self, plant, orderList, testNumber)
self.testName = "NumberOfOrders"
self.startValue = 2
def bench(self):
orders = self.orderList.orders[:]
self.orderList.orders = []
i = self.startValue
while i <= len(orders):
pprint("PERF Number of orders = " + str(i), BLUE)
self.orderList.orders = orders[:i]
optimizer = Optimizer(self.plant, self.orderList, Simulator(self.plant),
Evaluator(self.plant))
optimizer.populationSize = 2
optimizer.iterations = 2
optimizer.indivMutationRate = 0.5
optimizer.selectionRate = 0.5
optimizer.mutationRange = 10
schedules = optimizer.run()
evaluator = Evaluator(self.plant)
t = time()
evaluator.evaluate(schedules[0])
t = time() - t
self.addCairoPlotTime(t)
self.addGnuPlotTime(i, t)
i += 1
class EvaluatorLargeValuesPerf(EvaluatorPerf):
def __init__(self, plant, orderList, testNumber):
EvaluatorPerf.__init__(self, plant, orderList, testNumber)
self.testName = "LargeValuesMultiplier"
def bench(self):
val = 2
i = self.startValue
while i < 10:
pprint("PERF Large Value = " + str(i * val), BLUE)
for o in self.orderList.orders:
o.deadline *= val
for r in o.recipe.recipe:
r[1] *= val
optimizer = Optimizer(self.plant, self.orderList, Simulator(self.plant),
Evaluator(self.plant))
optimizer.populationSize = 2
optimizer.iterations = 2
optimizer.indivMutationRate = 0.5
optimizer.selectionRate = 0.5
optimizer.mutationRange = 500
schedules = optimizer.run()
evaluator = Evaluator(self.plant)
t = time()
evaluator.evaluate(schedules[0])
t = time() - t
self.addCairoPlotTime(t)
self.addGnuPlotTime((i + 1) * val, t)
i += 1
| fredmorcos/attic | projects/plantmaker/plantmaker-main/src/benchmark/evaluatorperf.py | Python | isc | 3,570 |
#!/usr/bin/env python
"""
Download NLTK data
"""
__author__ = "Manan Kalra"
__email__ = "[email protected]"
import nltk
nltk.download() | manankalra/Twitter-Sentiment-Analysis | demo/download.py | Python | mit | 145 |
# -*- coding: utf-8 -*-
#
# phpMyAdmin documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 26 14:04:48 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'phpMyAdmin'
copyright = u'2012 - 2013, The phpMyAdmin devel team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0.7'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html', 'doctrees']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'phpMyAdmindoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'phpMyAdmin.tex', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phpmyadmin', u'phpMyAdmin Documentation',
[u'The phpMyAdmin devel team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'phpMyAdmin', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'phpMyAdmin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'phpMyAdmin'
epub_author = u'The phpMyAdmin devel team'
epub_publisher = u'The phpMyAdmin devel team'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| turiyag/gillian | phpmyadmin/doc/conf.py | Python | gpl-2.0 | 9,186 |
##########################################################
# == PunchSense ==
#
# Hardware + Software to detect punches to a boxing
# punch bag and to make fun stuff with the collected data.
#
# == Licensing terms ==
#
# (c)2015 Felipe Correa da Silva Sanches <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it under the terms of the
# GNU Lesser General Public License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with this program.
# If not, see <https://www.gnu.org/copyleft/lesser.html>.
#
##########################################################
play_samples_at_every_hit = False
tolerable_error = 125 #msec
main_theme_start_time = 0
#main_theme_start_time = (60*1 + 45) #hits start at approx. 1min 45s
#Adjustment for holding the Arduino with the acelerometer sensor directly in bare hands
hit_intensity_threashold = 2000
jackpot_intensity_threashold = 3500
render_3d = False
render_graph = False
arduino_bridge = False #read i2c data directly from RPi GPIO instad
log_data = False
if not arduino_bridge:
from Adafruit_LSM303 import Adafruit_LSM303
#song and feedback sound samples
MAIN_THEME = 'data/main_theme.mp3'
GOOD_FEEDBACK = 'data/good_feedback.ogg'
BAD_FEEDBACK = 'data/bad_feedback.ogg'
JACKPOT_FEEDBACK = 'data/Jackpot.ogg'
import pygame
pygame.mixer.init()
pygame.mixer.pre_init(44100, -16, 2, 2048)
main_theme = pygame.mixer.music.load(MAIN_THEME)
good_sample = pygame.mixer.Sound(GOOD_FEEDBACK)
bad_sample = pygame.mixer.Sound(BAD_FEEDBACK)
jackpot_sample = pygame.mixer.Sound(JACKPOT_FEEDBACK)
hit_patterns = [
{
'name': "warm-up / freestyle",
'start': "0:00.000",
'loop_length': "1:40.000",
'hits': [],
'loops': 1 #this means it will play once (it won't loop!)
},
{
'name': "jab, jab; jab, right",
'start': "1:50.000",
'loop_length': "0:04.000",
'hits': [
{
'name': "jab",
'time': "0:02.001",
},
{
'name': "jab",
'time': "0:02.335",
},
{
'name': "jab",
'time': "0:03.168",
},
{
'name': "right",
'time': "0:03.666",
},
],
'loops': 48
},
{
'name': "[break]",
'start': "5:00.000",
'loop_length': "0:50.000",
'hits': [],
'loops': 1
},
{
'name': "jab, direto; cruzado, direto",
'start': "6:00.000",
'loop_length': "0:03.986",
'hits': [
{
'name': "jab",
'time': "0:02.668",
},
{
'name': "direto",
'time': "0:03.002",
},
{
'name': "cruzado",
'time': "0:04.496",
},
{
'name': "direto",
'time': "0:04.852",
},
],
'loops': 48
}
]
player_score = None
import math
def parse_time(s):
minutes, seconds = s.split(":")
msecs = 1000*(int(minutes)*60 + float(seconds))
# print "s: %s (msecs: %f)" % (s, msecs)
return msecs
def evaluate_hit(hit_time):
global player_score
if player_score == None:
player_score = 0
min_error = None
for pattern in hit_patterns:
for hit in pattern['hits']:
for i in range(pattern['loops']):
_start = parse_time(pattern['start'])
_loop_length = parse_time(pattern['loop_length'])
_time = parse_time(hit['time'])
absolute_candidate_time = _start + i*_loop_length + _time
error = math.fabs(absolute_candidate_time - hit_time)
#print "error: %f candidate: %f hit_time: %f" % (error, absolute_candidate_time, hit_time)
if error < tolerable_error:
player_score +=1
return "GOOD (%d msecs)" % (error)
if error < min_error or min_error == None:
min_error = error
return "BAD (%d msecs)" % (min_error)
if render_3d:
import OpenGL
from OpenGL.GLUT import *
from OpenGL.GL import *
from sys import argv
if arduino_bridge:
baudrate = 9600
port = "/dev/ttyACM0"
import serial
import sys
MAX_SAMPLES=1
samples = [0.0 for i in range(MAX_SAMPLES)]
cur_sample = 0
def add_sample(s):
global samples, cur_sample
samples[cur_sample] = s
cur_sample = (cur_sample+1) % MAX_SAMPLES
DETECT_DEBOUNCE = 10 #delay between hit detections
#(measured in ammount of samples)
inhibit_counter = 0
def detect_hit():
global samples, cur_sample, inhibit_counter
if inhibit_counter > 0:
inhibit_counter -= 1
return False
if samples[cur_sample] > jackpot_intensity_threashold:
print "JACKPOT!"
jackpot_sample.play()
inhibit_counter = DETECT_DEBOUNCE
return False
if samples[cur_sample] > hit_intensity_threashold:
#print "samples[%d]=%f" % (cur_sample, samples[cur_sample])
inhibit_counter = DETECT_DEBOUNCE
return True
return False
def parse_data(line):
#print "line: '%s'" % line
M = line.split("M:")[1].strip()
A = line.split("A:")[1].split("M:")[0].strip()
M = [float(v.strip()) for v in M.split("\t")]
A = [float(v.strip()) for v in A.split("\t")]
return [A, M]
def render_grid(grid_size=0.1, M=8, N=5, heihgt=0):
for x in range(M):
for y in range(N):
glBegin(GL_POLYGON)
glNormal3f(0, 0, 1)
glVertex3fv ([(x-float(M)/2) * grid_size, (y-float(N)/2) * grid_size, heihgt])
glVertex3fv ([(x+1-float(M)/2) * grid_size, (y-float(N)/2) * grid_size, heihgt])
glVertex3fv ([(x+1-float(M)/2) * grid_size, (y+1-float(N)/2) * grid_size, heihgt])
glVertex3fv ([(x-float(M)/2) * grid_size, (y+1-float(N)/2) * grid_size, heihgt])
glEnd()
import random
path = []
def init_path():
global path
path = []
for i in range(20):
dx = 0.02*(random.randint(-100, 100)/100.0)
dy = 0.02*(random.randint(-100, 100)/100.0)
dz = 0.04
path.append([dx, dy, dz])
def opengl_init (hit=False):
"Set up several OpenGL state variables"
# Background color
if (hit):
glClearColor (0.9, 0.6, 6.0, 0.0)
else:
glClearColor (0.0, 0.0, 0.0, 0.0)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0);
glShadeModel(GL_SMOOTH)
glLightfv(GL_LIGHT0, GL_POSITION, [10, 10, 10, 1.0]);
# Projection matrix
glMatrixMode(GL_PROJECTION)
# glPolygonMode(GL_FRONT, GL_LINE);
glPolygonMode(GL_BACK, GL_LINE);
glLoadIdentity()
glOrtho(0.0, 1.0, 0.0, 1.0, -1.0, 1.0)
angle = 0
def render_scene(A, M):
global angle, path
# angle += 0.4
angle = 360*math.atan2(M[1], M[0])/(2*3.1415)
if (int(angle) % 30==0):
init_path()
# Clear frame buffer
glClear (GL_COLOR_BUFFER_BIT);
glTranslatef(0.0, -0.5, 0.5)
glTranslatef(0.5, 0.5, 0.0)
glRotatef(-90 -30, 1.0, 0.0, 0.0)
glRotatef(angle+90, 0.0, 0.0, 1.0)
render_grid(heihgt=0.3)
if (render_graph):
glTranslatef(-0.5, -0.5, 0.0)
# Set draw color to white
glColor3f (1.0, 1.0, 1.0)
x,y,z = [0.5, 0.5, 0.3]
for i in range(len(path)):
dx, dy, dz = path[i]
render_segment(x, y, z, dx, dy, dz)
x += dx
y += dy
z += dz
# Flush and swap buffers
glutSwapBuffers()
from math import cos, sin, sqrt
def vector_cross(a, b):
cross = [0, 0, 0]
cross[0] = (a[1] * b[2]) - (a[2] * b[1])
cross[1] = (a[2] * b[0]) - (a[0] * b[2])
cross[2] = (a[0] * b[1]) - (a[1] * b[0])
return cross
def vector_module(a):
return math.sqrt(vector_dot(a, a))
def vector_dot(a, b):
return (a[0] * b[0]) + (a[1] * b[1]) + (a[2] * b[2])
def vector_normalize(v):
mag = vector_module(v)
v[0] /= mag
v[1] /= mag
v[2] /= mag
return v
def render_segment(x,y,z, dx, dy, dz, r=0.04):
N=20
glColor3f(0.5, 0.0, 1.0)
for i in range(N):
glBegin(GL_POLYGON)
glNormal3f(sin((i+0.5)*2*3.1415/N), cos((i+0.5)*2*3.1415/N), sqrt(dx*dx+dy*dy))
glVertex3fv ([x+r*sin(i*2*3.1415/N), y+r*cos(i*2*3.1415/N), z])
glVertex3fv ([x+r*sin((i+1)*2*3.1415/N), y+r*cos((i+1)*2*3.1415/N), z])
glVertex3fv ([x+dx+r*sin((i+1)*2*3.1415/N), y+dy+r*cos((i+1)*2*3.1415/N), z+dz])
glVertex3fv ([x+dx+r*sin(i*2*3.1415/N), y+dy+r*cos(i*2*3.1415/N), z+dz])
glEnd()
def is_at_the_end_of_a_pattern(time):
for pattern in hit_patterns:
if len(pattern['hits']) > 0:
last_hit = pattern['hits'][-1]
for i in range(pattern['loops']):
_start = parse_time(pattern['start'])
_loop_length = parse_time(pattern['loop_length'])
_last_hit_time = parse_time(last_hit['time'])
_pattern_end_time = _start + i*_loop_length + _last_hit_time + 3*tolerable_error
error = math.fabs(_pattern_end_time - time)
if error < tolerable_error:
return True
return False
def main_routine():
global player_score
init_path()
if arduino_bridge:
ser = serial.Serial(port, baudrate, timeout=1)
else:
lsm = Adafruit_LSM303()
pygame.mixer.music.play(0, main_theme_start_time)
while True:
hit_time = pygame.mixer.music.get_pos() + main_theme_start_time*1000
if player_score != None and is_at_the_end_of_a_pattern(hit_time):
if play_samples_at_every_hit == False:
if player_score >= 3:
good_sample.play()
else:
bad_sample.play()
player_score = None
try:
if arduino_bridge:
line = ser.readline()
else:
A, M = lsm.read()
try:
if arduino_bridge:
A, M = parse_data(line)
if (log_data):
print "acel x=%d y=%d z=%d\n" % (A[0], A[1], A[2])
print "Mag x=%d y=%d z=%d\n\n" % (M[0], M[1], M[2])
intensity = vector_module(A)
add_sample(intensity)
hit = detect_hit()
if (hit):
#A hit has been detected.
#Do something here!
evaluation = evaluate_hit(hit_time)
print "Detected a %s hit at: %s" % (evaluation, hit_time)
if play_samples_at_every_hit:
if "GOOD" in evaluation:
good_sample.play()
elif "BAD" in evaluation:
bad_sample.play()
if render_3d:
opengl_init(hit)
render_scene(A, M)
except IndexError, ValueError:
#sometimes in the beginning of a read we get only half of a line, which breaks the parser.
#here we simply ignore that sample and move on.
pass
except KeyboardInterrupt:
if arduino_bridge:
ser.close()
sys.exit()
if render_3d:
glutInit(argv)
glutInitWindowSize(1200, 1200)
glutCreateWindow("PunchSense")
glutDisplayFunc(main_routine)
opengl_init()
glutMainLoop()
else:
while True:
main_routine()
| felipesanches/PunchSense | punchsense.py | Python | lgpl-3.0 | 12,126 |
# -*- coding: utf-8 -*-
import re
from nose.tools import eq_
from users.helpers import emaillink, user_data, user_link, users_list
from users.models import UserProfile
def test_emaillink():
email = '[email protected]'
obfuscated = unicode(emaillink(email))
# remove junk
m = re.match(r'<a href="#"><span class="emaillink">(.*?)'
'<span class="i">null</span>(.*)</span></a>'
'<span class="emaillink js-hidden">(.*?)'
'<span class="i">null</span>(.*)</span>', obfuscated)
obfuscated = (''.join((m.group(1), m.group(2)))
.replace('@', '@').replace('.', '.'))[::-1]
eq_(email, obfuscated)
title = 'E-mail your question'
obfuscated = unicode(emaillink(email, title))
m = re.match(r'<a href="#">(.*)</a>'
'<span class="emaillink js-hidden">(.*?)'
'<span class="i">null</span>(.*)</span>', obfuscated)
eq_(title, m.group(1))
obfuscated = (''.join((m.group(2), m.group(3)))
.replace('@', '@').replace('.', '.'))[::-1]
eq_(email, obfuscated)
def test_user_link():
u = UserProfile(username='jconnor', display_name='John Connor', pk=1)
eq_(user_link(u), '<a href="%s">John Connor</a>' % u.get_url_path())
# handle None gracefully
eq_(user_link(None), '')
def test_user_link_xss():
u = UserProfile(username='jconnor',
display_name='<script>alert(1)</script>', pk=1)
html = "<script>alert(1)</script>"
eq_(user_link(u), '<a href="%s">%s</a>' % (u.get_url_path(), html))
def test_users_list():
u1 = UserProfile(username='jconnor', display_name='John Connor', pk=1)
u2 = UserProfile(username='sconnor', display_name='Sarah Connor', pk=2)
eq_(users_list([u1, u2]), ', '.join((user_link(u1), user_link(u2))))
# handle None gracefully
eq_(user_link(None), '')
def test_short_users_list():
"""Test the option to shortened the users list to a certain size."""
# short list with 'others'
u1 = UserProfile(username='oscar', display_name='Oscar the Grouch', pk=1)
u2 = UserProfile(username='grover', display_name='Grover', pk=2)
u3 = UserProfile(username='cookies!', display_name='Cookie Monster', pk=3)
shortlist = users_list([u1, u2, u3], size=2)
eq_(shortlist, ', '.join((user_link(u1), user_link(u2))) + ', others')
def test_user_link_unicode():
"""make sure helper won't choke on unicode input"""
u = UserProfile(username=u'jmüller', display_name=u'Jürgen Müller', pk=1)
eq_(user_link(u), u'<a href="%s">Jürgen Müller</a>' % u.get_url_path())
u = UserProfile(username='\xe5\xaf\x92\xe6\x98\x9f', pk=1)
eq_(user_link(u),
u'<a href="%s">%s</a>' % (u.get_url_path(), u.username))
def test_user_data():
u = user_data(UserProfile(username='foo', pk=1))
eq_(u['anonymous'], False)
| jinankjain/zamboni | apps/users/tests/test_helpers.py | Python | bsd-3-clause | 2,926 |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 22 02:28:35 2015
@author: Enes Kemal Ergin
List Examples from A Byte of Python
"""
shoplist = ['apple', 'mango', 'carrot', 'banana']
print('I have', len(shoplist), 'items to purchase.')
print('These items are: ', end= ' ')
for item in shoplist:
print(item, end= ' ')
print('\n also have to buy rice.')
shoplist.append('rice')
print('My Shopping list is now', shoplist)
print('I want to sort my list')
shoplist.sort()
print('Sorted list: ', shoplist)
print('The first item I will buy is', shoplist[0])
olditem = shoplist[0]
del shoplist[0]
print('I bought the', olditem)
print('My shopping list is now', shoplist) | NAU-ACM/IntroductionToPython | Week 4/list_example_byte_of_python.py | Python | mit | 678 |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import weakref
from sqlalchemy import sql
from tacker.common import exceptions as n_exc
from tacker.db import sqlalchemyutils
from tacker.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class CommonDbMixin(object):
"""Common methods used in core and service plugins."""
# Plugins, mixin classes implementing extension will register
# hooks into the dict below for "augmenting" the "core way" of
# building a query for retrieving objects from a model class.
# To this aim, the register_model_query_hook and unregister_query_hook
# from this class should be invoked
_model_query_hooks = {}
# This dictionary will store methods for extending attributes of
# api resources. Mixins can use this dict for adding their own methods
# TODO(salvatore-orlando): Avoid using class-level variables
_dict_extend_functions = {}
@classmethod
def register_model_query_hook(cls, model, name, query_hook, filter_hook,
result_filters=None):
"""Register a hook to be invoked when a query is executed.
Add the hooks to the _model_query_hooks dict. Models are the keys
of this dict, whereas the value is another dict mapping hook names to
callables performing the hook.
Each hook has a "query" component, used to build the query expression
and a "filter" component, which is used to build the filter expression.
Query hooks take as input the query being built and return a
transformed query expression.
Filter hooks take as input the filter expression being built and return
a transformed filter expression
"""
model_hooks = cls._model_query_hooks.get(model)
if not model_hooks:
# add key to dict
model_hooks = {}
cls._model_query_hooks[model] = model_hooks
model_hooks[name] = {'query': query_hook, 'filter': filter_hook,
'result_filters': result_filters}
@property
def safe_reference(self):
"""Return a weakref to the instance.
Minimize the potential for the instance persisting
unnecessarily in memory by returning a weakref proxy that
won't prevent deallocation.
"""
return weakref.proxy(self)
def _model_query(self, context, model):
query = context.session.query(model)
# define basic filter condition for model query
# NOTE(jkoelker) non-admin queries are scoped to their tenant_id
# NOTE(salvatore-orlando): unless the model allows for shared objects
query_filter = None
if not context.is_admin and hasattr(model, 'tenant_id'):
if hasattr(model, 'shared'):
query_filter = ((model.tenant_id == context.tenant_id) |
(model.shared == sql.true()))
else:
query_filter = (model.tenant_id == context.tenant_id)
# Execute query hooks registered from mixins and plugins
for _name, hooks in self._model_query_hooks.get(model,
{}).iteritems():
query_hook = hooks.get('query')
if isinstance(query_hook, basestring):
query_hook = getattr(self, query_hook, None)
if query_hook:
query = query_hook(context, model, query)
filter_hook = hooks.get('filter')
if isinstance(filter_hook, basestring):
filter_hook = getattr(self, filter_hook, None)
if filter_hook:
query_filter = filter_hook(context, model, query_filter)
# NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the
# condition, raising an exception
if query_filter is not None:
query = query.filter(query_filter)
return query
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = _('Cannot create resource for another tenant')
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_by_id(self, context, model, id):
query = self._model_query(context, model)
return query.filter(model.id == id).one()
def _apply_filters_to_query(self, query, model, filters):
if filters:
for key, value in filters.iteritems():
column = getattr(model, key, None)
if column:
query = query.filter(column.in_(value))
for _name, hooks in self._model_query_hooks.get(model,
{}).iteritems():
result_filter = hooks.get('result_filters', None)
if isinstance(result_filter, basestring):
result_filter = getattr(self, result_filter, None)
if result_filter:
query = result_filter(query, filters)
return query
def _apply_dict_extend_functions(self, resource_type,
response, db_object):
for func in self._dict_extend_functions.get(
resource_type, []):
args = (response, db_object)
if isinstance(func, basestring):
func = getattr(self, func, None)
else:
# must call unbound method - use self as 1st argument
args = (self,) + args
if func:
func(*args)
def _get_collection_query(self, context, model, filters=None,
sorts=None, limit=None, marker_obj=None,
page_reverse=False):
collection = self._model_query(context, model)
collection = self._apply_filters_to_query(collection, model, filters)
if limit and page_reverse and sorts:
sorts = [(s[0], not s[1]) for s in sorts]
collection = sqlalchemyutils.paginate_query(collection, model, limit,
sorts,
marker_obj=marker_obj)
return collection
def _get_collection(self, context, model, dict_func, filters=None,
fields=None, sorts=None, limit=None, marker_obj=None,
page_reverse=False):
query = self._get_collection_query(context, model, filters=filters,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [dict_func(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
def _get_collection_count(self, context, model, filters=None):
return self._get_collection_query(context, model, filters).count()
def _get_marker_obj(self, context, resource, limit, marker):
if limit and marker:
return getattr(self, '_get_%s' % resource)(context, marker)
return None
def _filter_non_model_columns(self, data, model):
"""Remove all the attributes from data which are not columns of
the model passed as second parameter.
"""
columns = [c.name for c in model.__table__.columns]
return dict((k, v) for (k, v) in
data.iteritems() if k in columns)
| yamahata/tacker | tacker/db/db_base.py | Python | apache-2.0 | 8,596 |
"""
Covariance estimators using shrinkage.
Shrinkage corresponds to regularising `cov` using a convex combination:
shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
"""
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD Style.
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance
from ..utils import array2d
###############################################################################
# ShrunkCovariance estimator
def shrunk_covariance(emp_cov, shrinkage=0.1):
"""Calculates a covariance matrix shrunk on the diagonal
Parameters
----------
emp_cov: array-like, shape (n_features, n_features)
Covariance matrix to be shrunk
shrinkage: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Returns
-------
shrunk_cov: array-like
shrunk covariance
Notes
-----
The regularized (shrunk) covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
emp_cov = array2d(emp_cov)
n_features = emp_cov.shape[0]
mu = np.trace(emp_cov) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov
class ShrunkCovariance(EmpiricalCovariance):
"""Covariance estimator with shrinkage
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored
shrinkage: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage`: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
def __init__(self, store_precision=True, assume_centered=False,
shrinkage=0.1):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.shrinkage = shrinkage
def fit(self, X, y=None):
""" Fits the shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: not used, present for API consistence purpose.
assume_centered: Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
Returns
-------
self: object
Returns self.
"""
# Not calling the parent object to fit, to avoid a potential
# matrix inversion when setting the precision
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X,
assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance)
return self
###############################################################################
# Ledoit-Wolf estimator
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage
assume_centered: Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size: int,
Size of the blocks into which the covariance matrix will be split.
Returns
-------
shrinkage: float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
return 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. " \
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
# optionaly center data
if not assume_centered:
X = X - X.mean(0)
# number of blocks to split the covariance matrix into
n_splits = int(n_features / block_size)
X2 = X ** 2
emp_cov_trace = np.sum(X2, axis=0) / n_samples
mu = np.sum(emp_cov_trace) / n_features
beta_ = 0. # sum of the coefficients of <X2.T, X2>
delta_ = 0. # sum of the *squared* coefficients of <X.T, X>
# starting block computation
for i in xrange(n_splits):
for j in xrange(n_splits):
rows = slice(block_size * i, block_size * (i + 1))
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
rows = slice(block_size * i, block_size * (i + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits:]))
delta_ += np.sum(
np.dot(X.T[rows], X[:, block_size * n_splits:]) ** 2)
for j in xrange(n_splits):
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:], X2[:, cols]))
delta_ += np.sum(
np.dot(X.T[block_size * n_splits:], X[:, cols]) ** 2)
delta_ += np.sum(np.dot(X.T[block_size * n_splits:],
X[:, block_size * n_splits:]) ** 2)
delta_ /= n_samples ** 2
beta_ += np.sum(np.dot(
X2.T[block_size * n_splits:], X2[:, block_size * n_splits:]))
# use delta_ to compute beta
beta = 1. / (n_features * n_samples) * (beta_ / n_samples - delta_)
# delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
delta = delta_ - 2. * mu * emp_cov_trace.sum() + n_features * mu ** 2
delta /= n_features
# get final beta as the min between beta and delta
beta = min(beta, delta)
# finally get shrinkage
shrinkage = beta / delta
return shrinkage
def ledoit_wolf(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered: Boolean
If True, data are not centered before computation.
Usefull to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size: int,
Size of the blocks into which the covariance matrix will be split.
If n_features > `block_size`, an error will be raised since the
shrunk covariance matrix will be considered as too large regarding
the available memory.
Returns
-------
shrunk_cov: array-like, shape (n_features, n_features)
Shrunk covariance.
shrinkage: float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. " \
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
if n_features > block_size:
raise MemoryError("LW: n_features is too large, " +
"try increasing block_size")
# get Ledoit-Wolf shrinkage
shrinkage = ledoit_wolf_shrinkage(
X, assume_centered=assume_centered, block_size=block_size)
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.sum(np.trace(emp_cov)) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class LedoitWolf(EmpiricalCovariance):
"""LedoitWolf Estimator
Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
coefficient is computed using O. Ledoit and M. Wolf's formula as
described in "A Well-Conditioned Estimator for Large-Dimensional
Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored
assume_centered: bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
block_size: int,
Size of the blocks into which the covariance matrix will be split
during its Ledoit-Wolf estimation.
If n_features > `block_size`, an error will be raised since the
shrunk covariance matrix will be considered as too large regarding
the available memory.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage_`: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shinkage is given by the Ledoit and Wolf formula (see References)
References
----------
"A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
February 2004, pages 365-411.
"""
def __init__(self, store_precision=True, assume_centered=False,
block_size=1000):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.block_size = block_size
def fit(self, X, y=None):
""" Fits the Ledoit-Wolf shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: not used, present for API consistence purpose.
Returns
-------
self: object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = ledoit_wolf(X - self.location_,
assume_centered=True, block_size=self.block_size)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
###############################################################################
# OAS estimator
def oas(X, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage algorithm.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered: boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
Returns
-------
shrunk_cov: array-like, shape (n_features, n_features)
Shrunk covariance
shrinkage: float
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
The formula we used to implement the OAS
does not correspond to the one given in the article. It has been taken
from the MATLAB program available from the author's webpage
(https://tbayes.eecs.umich.edu/yilun/covestimation).
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. " \
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.trace(emp_cov) / n_features
# formula from Chen et al.'s **implementation**
alpha = np.mean(emp_cov ** 2)
num = alpha + mu ** 2
den = (n_samples + 1.) * (alpha - (mu ** 2) / n_features)
shrinkage = min(num / den, 1.)
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class OAS(EmpiricalCovariance):
"""
Oracle Approximating Shrinkage Estimator
OAS is a particular form of shrinkage described in
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
The formula used here does not correspond to the one given in the
article. It has been taken from the Matlab program available from the
authors' webpage (https://tbayes.eecs.umich.edu/yilun/covestimation).
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered: bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage_`: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shinkage is given by the OAS formula (see References)
References
----------
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
def fit(self, X, y=None):
""" Fits the Oracle Approximating Shrinkage covariance model
according to the given training data and parameters.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: not used, present for API consistence purpose.
Returns
-------
self: object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = oas(X - self.location_, assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
| seckcoder/lang-learn | python/sklearn/sklearn/covariance/shrunk_covariance_.py | Python | unlicense | 17,960 |
# -*- coding: utf-8 -*-
from __future__ import print_function
"""Simple event system."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import string
import re
from collections import defaultdict
from functools import partial
from inspect import getargspec
#------------------------------------------------------------------------------
# Event system
#------------------------------------------------------------------------------
class EventEmitter(object):
"""Class that emits events and accepts registered callbacks.
Derive from this class to emit events and let other classes know
of occurrences of actions and events.
Example
-------
```python
class MyClass(EventEmitter):
def f(self):
self.emit('my_event', 1, key=2)
o = MyClass()
# The following function will be called when `o.f()` is called.
@o.connect
def on_my_event(arg, key=None):
print(arg, key)
```
"""
def __init__(self):
self.reset()
def reset(self):
"""Remove all registered callbacks."""
self._callbacks = defaultdict(list)
def _get_on_name(self, func):
"""Return `eventname` when the function name is `on_<eventname>()`."""
r = re.match("^on_(.+)$", func.__name__)
if r:
event = r.group(1)
else:
raise ValueError("The function name should be "
"`on_<eventname>`().")
return event
def _create_emitter(self, event):
"""Create a method that emits an event of the same name."""
if not hasattr(self, event):
setattr(self, event,
lambda *args, **kwargs: self.emit(event, *args, **kwargs))
def connect(self, func=None, event=None, set_method=False):
"""Register a callback function to a given event.
To register a callback function to the `spam` event, where `obj` is
an instance of a class deriving from `EventEmitter`:
```python
@obj.connect
def on_spam(arg1, arg2):
pass
```
This is called when `obj.emit('spam', arg1, arg2)` is called.
Several callback functions can be registered for a given event.
The registration order is conserved and may matter in applications.
"""
if func is None:
return partial(self.connect, set_method=set_method)
# Get the event name from the function.
if event is None:
event = self._get_on_name(func)
# We register the callback function.
self._callbacks[event].append(func)
# A new method self.event() emitting the event is created.
if set_method:
self._create_emitter(event)
return func
def unconnect(self, *funcs):
"""Unconnect specified callback functions."""
for func in funcs:
for callbacks in self._callbacks.values():
if func in callbacks:
callbacks.remove(func)
def emit(self, event, *args, **kwargs):
"""Call all callback functions registered with an event.
Any positional and keyword arguments can be passed here, and they will
be fowarded to the callback functions.
Return the list of callback return results.
"""
res = []
for callback in self._callbacks.get(event, []):
argspec = getargspec(callback)
if not argspec.keywords:
# Only keep the kwargs that are part of the callback's
# arg spec, unless the callback accepts `**kwargs`.
kwargs = {n: v for n, v in kwargs.items()
if n in argspec.args}
res.append(callback(*args, **kwargs))
return res
#------------------------------------------------------------------------------
# Progress reporter
#------------------------------------------------------------------------------
class PartialFormatter(string.Formatter):
"""Prevent KeyError when a format parameter is absent."""
def get_field(self, field_name, args, kwargs):
try:
return super(PartialFormatter, self).get_field(field_name,
args,
kwargs)
except (KeyError, AttributeError):
return None, field_name
def format_field(self, value, spec):
if value is None:
return '?'
try:
return super(PartialFormatter, self).format_field(value, spec)
except ValueError:
return '?'
def _default_on_progress(message, value, value_max, end='\r', **kwargs):
if value_max == 0:
return
if value <= value_max:
progress = 100 * value / float(value_max)
fmt = PartialFormatter()
kwargs['value'] = value
kwargs['value_max'] = value_max
print(fmt.format(message, progress=progress, **kwargs), end=end)
def _default_on_complete(message, end='\n', **kwargs):
# Override the initializing message and clear the terminal
# line.
fmt = PartialFormatter()
print(fmt.format(message + '\033[K', **kwargs), end=end)
class ProgressReporter(EventEmitter):
"""A class that reports progress done.
Example
-------
```python
pr = ProgressReporter()
pr.set_progress_message("Progress: {progress}%...")
pr.set_complete_message("Completed!")
pr.value_max = 10
for i in range(10):
pr.value += 1 # or pr.increment()
```
You can also add custom keyword arguments in `pr.increment()`: these
will be replaced in the message string.
Emits
-----
* `progress(value, value_max)`
* `complete()`
"""
def __init__(self):
super(ProgressReporter, self).__init__()
self._value = 0
self._value_max = 0
self._has_completed = False
def set_progress_message(self, message, line_break=False):
"""Set a progress message.
The string needs to contain `{progress}`.
"""
end = '\r' if not line_break else None
@self.connect
def on_progress(value, value_max, **kwargs):
kwargs['end'] = None if value == value_max else end
_default_on_progress(message, value, value_max, **kwargs)
def set_complete_message(self, message):
"""Set a complete message."""
@self.connect
def on_complete(**kwargs):
_default_on_complete(message, **kwargs)
def _set_value(self, value, **kwargs):
if value < self._value_max:
self._has_completed = False
self._value = value
self.emit('progress', self._value, self._value_max, **kwargs)
if not self._has_completed and self._value >= self._value_max:
self.emit('complete', **kwargs)
self._has_completed = True
def increment(self, **kwargs):
"""Equivalent to `self.value += 1`.
Custom keywoard arguments can also be passed to be processed in the
progress message format string.
"""
self._set_value(self._value + 1, **kwargs)
def reset(self, value_max=None):
"""Reset the value to 0 and the value max to a given value."""
super(ProgressReporter, self).reset()
self._value = 0
if value_max is not None:
self._value_max = value_max
@property
def value(self):
"""Current value (integer)."""
return self._value
@value.setter
def value(self, value):
self._set_value(value)
@property
def value_max(self):
"""Maximum value (integer)."""
return self._value_max
@value_max.setter
def value_max(self, value_max):
if value_max > self._value_max:
self._has_completed = False
self._value_max = value_max
def is_complete(self):
"""Return whether the task has completed."""
return self._value >= self._value_max
def set_complete(self, **kwargs):
"""Set the task as complete."""
self._set_value(self.value_max, **kwargs)
@property
def progress(self):
"""Return the current progress as a float value in `[0, 1]`."""
return self._value / float(self._value_max)
| nippoo/phy | phy/utils/event.py | Python | bsd-3-clause | 8,471 |
"""
WSGI config for niuforum project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "niuforum.settings")
application = get_wsgi_application()
| niutool/niuforum | niuforum/wsgi.py | Python | mit | 393 |
#!/usr/bin/env python
# A library to scrape statistics from Arris CM820 and similar cable modems
# Inspired by https://gist.github.com/berg/2651577
import BeautifulSoup
import requests
import time
cm_time_format = '%a %Y-%m-%d %H:%M:%S'
def get_status(baseurl):
# Retrieve and process the page from the modem
url = baseurl + 'status_cgi'
pagedata = requests.get(url).content
timestamp = time.time() # Get the time immediately after retrieval
bs = BeautifulSoup.BeautifulSoup(pagedata)
downstream_table = bs.findAll('table')[1].findAll('tr')[1:]
upstream_table = bs.findAll('table')[3].findAll('tr')[2:]
status_table = bs.findAll('table')[5].findAll('tr')
interface_table = bs.findAll('table')[7].findAll('tr')[1:]
downstream_stats = []
for row in downstream_table:
cols = row.findAll('td')
modem_channel = int(cols[0].string.strip()[-1])
docsis_channel = int(cols[1].string.strip())
frequency = float(cols[2].string.strip().split()[0])
if cols[3].string.strip() == '----':
channel_available = False
power = None
snr = None
modulation = None
octets = None
corrected_errors = None
uncorrectable_errors = None
else:
power = float(cols[3].string.strip().split()[0])
snr = float(cols[4].string.strip().split()[0])
modulation = cols[5].string.strip()
octets = int(cols[6].string.strip())
corrected_errors = int(cols[7].string.strip())
uncorrectable_errors = int(cols[8].string.strip())
channelstats = {'modem_channel': modem_channel,
'dcid': docsis_channel,
'frequency': frequency,
'power': power,
'snr': snr,
'modulation': modulation,
'octets': octets,
'corrected_errors': corrected_errors,
'uncorrectable_errors': uncorrectable_errors}
downstream_stats.append(channelstats)
upstream_stats = []
for row in upstream_table:
cols = row.findAll('td')
modem_channel = int(cols[0].string.strip()[-1])
docsis_channel = int(cols[1].string.strip())
frequency = float(cols[2].string.strip().split()[0])
power = float(cols[3].string.strip().split()[0])
channel_type = cols[4].string.strip()
symbol_rate = int(cols[5].string.strip().split()[0]) * 1000
modulation = cols[6].string.strip()
channelstats = {'modem_channel': modem_channel,
'ucid': docsis_channel,
'frequency': frequency,
'power': power,
'channel_type': channel_type,
'symbol_rate': symbol_rate,
'modulation': modulation}
upstream_stats.append(channelstats)
uptime_split = status_table[0].findAll('td')[1].string.strip().split(':')
uptime_days = int(uptime_split[0].strip().split()[0])
uptime_hours = int(uptime_split[1].strip().split()[0])
uptime_minutes = int(uptime_split[2].strip().split()[0])
uptime = ((((uptime_days * 24) + uptime_hours) * 60) + uptime_minutes) * 60
cpe_split = status_table[1].findAll('td')[1].string.strip().split(',')
cpelist = {}
for entry in cpe_split:
entrystripped = entry.strip()
entrysplit = entrystripped.split('CPE')
cpe_type = entrysplit[0]
cpe_count = int(entrysplit[1].strip('()'))
cpelist[cpe_type] = cpe_count
cm_status = status_table[2].findAll('td')[1].string.strip()
cm_time_string = status_table[3].findAll('td')[1].string.strip()
cm_time = time.mktime(time.strptime(cm_time_string, cm_time_format))
modem_status = {'uptime': uptime,
'cpe': cpelist,
'cm_status': cm_status,
'cm_time': cm_time}
interfaces = []
for row in interface_table:
cols = row.findAll('td')
interface_name = cols[0].string.strip()
provisioning_state = cols[1].string.strip()
interface_state = cols[2].string.strip()
interface_speed = cols[3].string.strip()
mac = cols[4].string.strip()
interface_data = {'name': interface_name,
'provisioned': provisioning_state,
'state': interface_state,
'speed': interface_speed,
'mac': mac}
interfaces.append(interface_data)
status = {'timestamp': timestamp,
'status': modem_status,
'downstream': downstream_stats,
'upstream': upstream_stats,
'interfaces': interfaces}
return status
def get_versions(baseurl):
raise NotImplementedError()
def get_eventlog(baseurl):
raise NotImplementedError()
def get_cmstate(baseurl):
raise NotImplementedError()
def get_productdetails(baseurl):
raise NotImplementedError()
def get_dhcpparams(baseurl):
raise NotImplementedError()
def get_qos(url):
raise NotImplementedError()
def get_config(url):
raise NotImplementedError()
| wolrah/arris_stats | arris_scraper.py | Python | mit | 5,303 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2011, Monash e-Research Centre
# (Monash University, Australia)
# Copyright (c) 2010-2011, VeRSI Consortium
# (Victorian eResearch Strategic Initiative, Australia)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the VeRSI, the VeRSI Consortium members, nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
test_views.py
http://docs.djangoproject.com/en/dev/topics/testing/
.. moduleauthor:: Russell Sim <[email protected]>
.. moduleauthor:: Steve Androulakis <[email protected]>
"""
from django.test import TestCase
class UploadTestCase(TestCase):
def setUp(self):
from django.contrib.auth.models import User
from os import path, mkdir
from tempfile import mkdtemp
from shutil import rmtree
from django.conf import settings
from tardis.tardis_portal import models
user = 'tardis_user1'
pwd = 'secret'
email = ''
self.user = User.objects.create_user(user, email, pwd)
self.test_dir = mkdtemp()
self.exp = models.Experiment(title='test exp1',
institution_name='monash',
created_by=self.user,
)
self.exp.save()
self.dataset = models.Dataset(description="dataset description...",
experiment=self.exp)
self.dataset.save()
self.experiment_path = path.join(settings.FILE_STORE_PATH,
str(self.dataset.experiment.id))
self.dataset_path = path.join(self.experiment_path,
str(self.dataset.id))
mkdir(self.experiment_path)
mkdir(self.dataset_path)
#write test file
self.filename = "testfile.txt"
self.f1 = open(path.join(self.test_dir, self.filename), 'w')
self.f1.write("Test file 1")
self.f1.close()
self.f1_size = path.getsize(path.join(self.test_dir, self.filename))
self.f1 = open(path.join(self.test_dir, self.filename), 'r')
def testFileUpload(self):
from django.http import QueryDict, HttpRequest
from tardis.tardis_portal.views import upload
from django.core.files import File
from django.core.files.uploadedfile import UploadedFile
from django.utils.datastructures import MultiValueDict
from tardis.tardis_portal import models
from os import path
#create request.FILES object
django_file = File(self.f1)
uploaded_file = UploadedFile(file=django_file)
uploaded_file.name = self.filename
uploaded_file.size = self.f1_size
post_data = [('enctype', "multipart/form-data")]
post = QueryDict('&'.join(['%s=%s' % (k, v) for k, v in post_data]))
files = MultiValueDict({'Filedata': [uploaded_file]})
request = HttpRequest()
request.FILES = files
request.POST = post
request.method = "POST"
response = upload(request, self.dataset.id)
test_files_db = models.Dataset_File.objects.filter(
dataset__id=self.dataset.id)
self.assertTrue(path.exists(path.join(self.dataset_path, self.filename)))
self.assertTrue(self.dataset.id == 1)
self.assertTrue(test_files_db[0].url == "file://1/testfile.txt")
def tearDown(self):
from shutil import rmtree
self.f1.close()
rmtree(self.test_dir)
rmtree(self.dataset_path)
rmtree(self.experiment_path)
self.exp.delete()
def testUploadComplete(self):
from django.http import QueryDict, HttpRequest
from tardis.tardis_portal.views import upload_complete
data = [('filesUploaded', '1'),
('speed', 'really fast!'),
('allBytesLoaded', '2'),
('errorCount', '0')]
post = QueryDict('&'.join(['%s=%s' % (k, v) for k, v in data]))
request = HttpRequest()
request.POST = post
response = upload_complete(request)
self.assertTrue("<p>Number: 1</p>" in response.content)
self.assertTrue("<p>Errors: 0</p>" in response.content)
self.assertTrue("<p>Bytes: 2</p>" in response.content)
self.assertTrue("<p>Speed: really fast!</p>" in response.content)
| grischa/mytardis-mrtardis | tardis/tardis_portal/tests/test_views.py | Python | bsd-3-clause | 5,829 |
#!/usr/bin/python3
from scrapers.scrape import scrape_page
# if you want to use this scraper without the RESTful api webservice then
# change this import: from scrape import scrape_page
import re
try:
import pandas as pd
pandasImported = True
except ImportError:
pandasImported = False
BASE_URL = "http://finviz.com/quote.ashx?t="
VALUE_NAMES_XPATH = '//*[@class="snapshot-td2-cp"]/text()'
VALUES_XPATH = '//*[@class="snapshot-td2"]/b/text() | //*[@class="snapshot-td2"]/b/*/text()'
def get_statistics_table(page):
"""
This function will return the financial statistics table on a stock's finviz page, if it exists as a
Python dictionary
:param page: HTML tree structure based on the html markup of the scraped web page.
:return: a dictionary of all the financial statistics listed on a stock's finviz page, otherwise will
return a empty dictionary
"""
value_names = page.xpath(VALUE_NAMES_XPATH)
values = page.xpath(VALUES_XPATH)
values = [value if value != "-" else None for value in values]
table = dict(zip(value_names, values))
return table
def get_statistic(ticker_symbol, stat_name, page=None):
"""
This function will get the associated financial statistic from the corresponding finviz page given the
statistic's name and the ticker symbol
:param ticker_symbol: The ticker symbol of the interested stock (e.g., "AAPL", "GOOG", "MSFT")
:param stat_name: The name of the interested financial statistic (e.g., "P/E", "Price", "Volume").
An exhaustive list of available financial statistics can be found on a stock's finviz page
:param page: HTML tree structure based on the html markup of the scraped web page. If one is not passed in the
function will scrape the page
:return: the value of the interested financial statistic if it exists, otherwise None
"""
if page is None:
page = scrape_page(BASE_URL + ticker_symbol)
table = get_statistics_table(page)
if stat_name in table.keys() and table[stat_name]:
return table[stat_name]
else:
return None
def get_all_statistics(ticker_symbol, page=None):
"""
This function will get all the associated financial statistics from the correspoding finviz page
given the ticker symbol
:param ticker_symbol: The ticker symbol of the interested stock (e.g., "AAPL", "GGOG", "MSFT")
:param page: HTML tree structure based on the html markup of the scraped page. If one is not passed in the
function will scrape the page
:return: a dictionary of all the financial statistics listed on a stock's finviz page, otherwise None
"""
if page is None:
page = scrape_page(BASE_URL + ticker_symbol)
table = get_statistics_table(page)
if table:
return table
else:
return None
def get_all_statistics_series(ticker_symbol):
"""
Return pandas Series of ticker symbol. Try to convert to numeric.
"""
if not pandasImported:
raise Exception("Pandas not installed.")
d = get_all_statistics(ticker_symbol)
new_dict = {}
for k,v in d.items():
if v == None:
continue
if ('%' in v) and (v.index('%') == (len(v)-1)):
# percent
new_dict[k + '(%)'] = float(v[:-1])
elif (k == '52W Range'):
m = re.match('([0-9\.\-]+) - ([0-9\.\-]+)',v)
new_dict['52W Low'] = float(m.group(1))
new_dict['52W High'] = float(m.group(2))
else:
try:
# remove any commas
v = re.sub(',','',v)
v = re.sub('B','E9',v) # expoentiate billions
v = re.sub('M','E6',v)
v = re.sub('K','E3',v)
new_dict[k] = float(v)
except ValueError:
new_dict[k] = v
return pd.Series(new_dict)
def get_all_statistics_df(symbol_list):
"""Return a dataframe for a list of symbols.
"""
series = []
for s in symbol_list:
series.append(get_all_statistics_series(s))
return pd.DataFrame(series,index=symbol_list)
if __name__ == "__main__":
# Test Cases
print(get_statistic("AAPL", "P/E"))
print(get_statistic("AAPL", "Inst Own"))
print(get_statistic("AAPL", "Change"))
print(get_statistic("AAPL", "This should return None"))
print(get_all_statistics("AAPL"))
| ajpotato214/Finance-Data-Scraper-API | finance_data_scraper/scrapers/finviz.py | Python | mit | 4,390 |
# -*- coding: utf-8 -*-
from ddbmock.database import dynamodb
def delete_table(post):
name = post[u'TableName']
table = dynamodb.delete_table(name)
return {
'TableDescription': table.to_dict(verbose=False)
}
| sendgridlabs/ddbmock | ddbmock/operations/delete_table.py | Python | lgpl-3.0 | 235 |
from flask import Flask
from flask.ext.quik import FlaskQuik
from flask.ext.quik import render_template
app = Flask(__name__)
quik = FlaskQuik(app)
@app.route('/', methods=['GET', 'POST'] )
def hello_quik():
return render_template('hello.html', name='quik')
app.run(host='0.0.0.0', debug=True, port=5000)
| avelino/Flask-Quik | tests/example.py | Python | mit | 315 |
# -*- coding: utf-8 -*-
import os
import sys
from sklearn import svm
from sklearn import cross_validation
from sklearn import preprocessing
from sklearn.grid_search import GridSearchCV
import numpy as np
import csv
def output_result(clf):
test_feature_file = np.genfromtxt(open("../data/test.csv", "rb"), delimiter=",", dtype=float)
test_features = []
print "Id,Solution"
i = 1
for test_feature in test_feature_file:
print str(i) + "," + str(int(clf.predict(test_feature)[0]))
i += 1
def get_score(clf, train_features, train_labels):
X_train, X_test, y_train, y_test = cross_validation.train_test_split(train_features, train_labels, test_size=0.4, random_state=0)
clf.fit(X_train, y_train)
print clf.score(X_test, y_test)
def get_accuracy(clf, train_features, train_labels):
scores = cross_validation.cross_val_score(clf, train_features, train_labels, cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
def grid_search(train_features, train_labels):
param_grid = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
clf = GridSearchCV(svm.SVC(C=1), param_grid, n_jobs=-1)
clf.fit(train_features, train_labels)
print clf.best_estimator_
if __name__ == "__main__":
# train_feature_file = csv.reader(open("train.csv", "rb"))
# train_label_file = csv.reader(open("trainLabels.csv", "rb"))
train_feature_file = np.genfromtxt(open("../data/train.csv", "rb"), delimiter=",", dtype=float)
train_label_file = np.genfromtxt(open("../data/trainLabels.csv", "rb"), delimiter=",", dtype=float)
train_features = []
train_labels = []
for train_feature, train_label in zip(train_feature_file, train_label_file):
train_features.append(train_feature)
train_labels.append(train_label)
train_features = np.array(train_features)
train_labels = np.array(train_labels)
grid_search(train_features, train_labels)
# clf.fit(train_features, train_labels)
# output_result(clf)
| Lewuathe/kaggle-repo | data-science-london/src/grid_search.py | Python | mit | 2,134 |
#!/usr/bin/env python
"""
Configuration script for the analyzer of B0s -> K*0 Ds+ Ds- background events
| | |-> tau- nu
| | |-> pi- pi- pi+ nu
| |-> tau+ nu
| |-> pi+ pi+ pi- nu
|-> K+ pi-
Note: it is supposed to be used within heppy_fcc framework
"""
import os
import heppy.framework.config as cfg
import logging
from ROOT import gSystem
from EventStore import EventStore as Events
from heppy_fcc.analyzers.BackgroundBs2DsDsKWithDs2TauNuAnalyzer import BackgroundBs2DsDsKWithDs2TauNuAnalyzer
logging.basicConfig(level=logging.WARNING)
# input component
# several input components can be declared and added to the list of selected components
input_component = cfg.Component('ILD-like', files = ['/afs/cern.ch/work/a/ansemkiv/private/FCC/analysis/background_Bs2DsDsK_with_Ds2TauNu_100k.root'])
selected_components = [input_component]
# analyzers
# analyzer for Bs -> Ds Ds K* events
bgana = cfg.Analyzer(BackgroundBs2DsDsKWithDs2TauNuAnalyzer,
smear_momentum = True,
momentum_x_resolution = 0.01,
momentum_y_resolution = 0.01,
momentum_z_resolution = 0.01,
smear_pv = True,
# IDL-like res
pv_x_resolution = 0.0025,
pv_y_resolution = 0.0025,
pv_z_resolution = 0.0025,
# progressive res
# pv_x_resolution = 0.001,
# pv_y_resolution = 0.001,
# pv_z_resolution = 0.001,
# outstanding res
# pv_x_resolution = 0.0005,
# pv_y_resolution = 0.0005,
# pv_z_resolution = 0.0005,
smear_sv = True,
# IDL-like res
sv_x_resolution = 0.007,
sv_y_resolution = 0.007,
sv_z_resolution = 0.007,
# progressive res
# sv_x_resolution = 0.003,
# sv_y_resolution = 0.003,
# sv_z_resolution = 0.003,
# outstanding res
# sv_x_resolution = 0.0015,
# sv_y_resolution = 0.0015,
# sv_z_resolution = 0.0015,
smear_tv = True,
# IDL-like res
tv_x_resolution = 0.005,
tv_y_resolution = 0.005,
tv_z_resolution = 0.005,
# progressive res
# tv_x_resolution = 0.002,
# tv_y_resolution = 0.002,
# tv_z_resolution = 0.002,
# outstanding res
# tv_x_resolution = 0.001,
# tv_y_resolution = 0.001,
# tv_z_resolution = 0.001,
stylepath = os.environ.get('FCC') + 'lhcbstyle.C',
tree_name = 'Events',
tree_title = 'Events',
mc_truth_tree_name = 'MCTruth',
mc_truth_tree_title = 'MC Truth',
verbose = False)
# definition of a sequence of analyzers, the analyzers will process each event in this order
sequence = cfg.Sequence([bgana])
# finalization of the configuration object.
gSystem.Load('libdatamodel')
config = cfg.Config(components = selected_components, sequence = sequence, services = [],events_class = Events)
| semkiv/heppy_fcc | background_Bs2DsDsK_with_Ds2TauNu_analysis_cfg.py | Python | gpl-3.0 | 3,807 |
#!/home/oscar/django/biblioteca/biblio-env/bin/python3
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# painter widget
class PaintCanvas(tkinter.Canvas):
def __init__(self, master, image):
tkinter.Canvas.__init__(self, master,
width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=tkinter.NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| oscarvogel/biblioteca | biblio-env/bin/painter.py | Python | gpl-3.0 | 2,189 |
""" App para almacenar los estudios socioeconómicos.
Este app almacena la información no vital de los estudios socioeconómicos, es decir, los campos
que no se utilizan de forma frecuente en la aplicación.
"""
| erikiado/jp2_online | estudios_socioeconomicos/__init__.py | Python | mit | 214 |
import hashlib
import mock
import uuid
from django.test import TestCase
from ..models import Commander
class CommanderTestCase(TestCase):
def test_generate_token(self):
with mock.patch.object(uuid, 'uuid4', return_value='a_test'):
cmdr = Commander(
name='Branch'
)
self.assertEqual(
cmdr.generate_token(),
hashlib.md5('a_test').hexdigest()
)
def test_save(self):
# We need to ensure tokens get auto-populated here.
cmdr = Commander.objects.create(
name='Branch'
)
self.assertTrue(len(cmdr.api_token) > 0)
| toastdriven/eliteracing | cmdrs/tests/test_models.py | Python | bsd-3-clause | 666 |
# -*- coding: utf-8 -*-
import threading
import time
import sys
from hit.process.processor import ATTMatrixHitProcessor
import traceback
class ThreadedSerialReader(threading.Thread):
def __init__(self, threadID, name, queue, max_readings, serial_port_builder, port, baud, CustomSerial=None,
isFast=True):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.queue = queue
self.connected = False
self.max_readings = max_readings
self.serial_port_builder = serial_port_builder
self.port = port
self.baudrate = baud
self.custom_serial = CustomSerial
self.is_stopped = False
self.build_serial()
self.processor = ATTMatrixHitProcessor()
self.isFast = isFast
self.publisher = None
# self.publisher = PikaPublisher("my_queue")
def build_serial(self):
if self.custom_serial != None:
self.serial_port = self.custom_serial
else:
self.serial_port = self.serial_port_builder.build_serial_port(self.port, self.baudrate)
def run(self):
self.write_log("Starting " + self.name)
time.sleep(5)
iterations = 0
while not self.connected and not self.is_stopped:
# time.sleep(0.1)
if (self.serial_port != None and self.serial_port.isOpen()):
while iterations < self.max_readings or self.max_readings == None:
# time.sleep(0.01)
if self.read_and_enqueue() == True:
iterations = iterations + 1
else:
break
else:
time.sleep(0.1)
try:
self.serial_port = self.build_serial()
except Exception:
self.write_log("Error: Check the serial connection or cable, please.")
self.write_log("Exiting " + self.name)
def read_and_enqueue(self):
try:
if self.isFast:
reading = self.serial_port.readline()
else:
reading = self.serial_port.readline(1)
if reading != "":
if self.publisher != None:
the_reading = reading + "/" + str(time.time())
self.publisher.publish(the_reading)
hit = self.processor.parse_hit(reading)
self.queue.put(hit)
self.connected = True
self.write_log("Reading from serial: " + reading)
else:
time.sleep(0.1)
pass
except:
self.write_log("Miss!")
self.serial_port.close()
self.connected = False
traceback.print_exc(file=sys.stdout)
return False
return True
def write_log(self, str_message):
print(str_message)
sys.stdout.flush()
def stop(self):
self.is_stopped = True
def restart(self):
self.is_stopped = False
def pause(self):
self.is_stopped = True
def unpause(self):
self.is_stopped = False
import pika
class PikaPublisher(object):
def __init__(self, queue_name):
self.queue_name = queue_name
self.connection = pika.BlockingConnection(pika.ConnectionParameters('127.0.0.1'))
def publish(self, message):
channel = self.connection.channel()
channel.queue_declare(queue=self.queue_name, durable=True)
channel.basic_publish(exchange='',
routing_key=self.queue_name,
body=message,
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
))
channel.close()
def close(self):
self.connection.close()
| Centre-Alt-Rendiment-Esportiu/att | src/python/hit/serial/serial_reader.py | Python | gpl-3.0 | 3,952 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import operator
from collections import OrderedDict
from functools import reduce
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
from django.db import models
from rest_framework import generics, exceptions
from rest_framework.compat import distinct
from rest_framework.decorators import api_view
from rest_framework.filters import SearchFilter
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from .serializers import *
def format_url(_url):
return 'business-logic:rest:{}'.format(_url)
@api_view(('GET',))
def api_root(request, format=None):
from rest_framework.reverse import reverse
return Response(
OrderedDict((
('program-interface', reverse(format_url('program-interface-list'), request=request, format=format)),
('program', reverse(format_url('program-list'), request=request, format=format)),
('program-version', reverse(format_url('program-version-list'), request=request, format=format)),
('program-version-create', reverse(format_url('program-version-create'), request=request, format=format)),
('reference', reverse(format_url('reference-descriptor-list'), request=request, format=format)),
('execution', reverse(format_url('execution-list'), request=request, format=format)),
)))
class StandardResultsSetPagination(PageNumberPagination):
page_size = 50
page_size_query_param = 'page_size'
max_page_size = 1000
class ObjectList(generics.ListAPIView):
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend,)
class ProgramInterfaceList(generics.ListAPIView):
queryset = ProgramInterface.objects.all()
serializer_class = ProgramInterfaceListSerializer
pagination_class = StandardResultsSetPagination
class ProgramInterfaceView(generics.RetrieveAPIView):
queryset = ProgramInterface.objects.all()
serializer_class = ProgramInterfaceSerializer
class ProgramList(ObjectList):
queryset = Program.objects.all()
serializer_class = ProgramListSerializer
filterset_fields = ('program_interface',)
class ProgramView(generics.RetrieveAPIView):
queryset = Program.objects.all()
serializer_class = ProgramSerializer
class ProgramVersionList(ObjectList):
queryset = ProgramVersion.objects.all()
serializer_class = ProgramVersionListSerializer
filterset_fields = ('program',)
class ProgramVersionCreate(generics.CreateAPIView):
queryset = ProgramVersion.objects.all()
serializer_class = ProgramVersionCreateSerializer
class ProgramVersionView(generics.RetrieveUpdateDestroyAPIView):
queryset = ProgramVersion.objects.all()
serializer_class = ProgramVersionSerializer
def perform_update(self, serializer):
instance = self.get_object()
instance.entry_point.delete()
super(ProgramVersionView, self).perform_update(serializer)
class ExecutionList(ObjectList):
queryset = Execution.objects.all()
serializer_class = ExecutionListSerializer
filterset_fields = ('program_version',)
class ExecutionView(generics.RetrieveDestroyAPIView):
queryset = Execution.objects.all()
serializer_class = ExecutionSerializer
class LogView(generics.RetrieveAPIView):
queryset = LogEntry.objects.all()
serializer_class = LogSerializer
lookup_field = 'execution__id'
class ReferenceDescriptorList(generics.ListAPIView):
queryset = ReferenceDescriptor.objects.all()
serializer_class = ReferenceDescriptorListSerializer
class ReferenceSearchFilter(SearchFilter):
def filter_queryset(self, request, queryset, view):
search_terms = self.get_search_terms(request)
if not search_terms:
return queryset
reference_descriptor = view.get_reference_descriptor()
search_fields = reference_descriptor.get_search_fields()
if not search_fields:
raise exceptions.ValidationError(
'ReferenceDescriptor for `{}` are not configured: incorrect `search_fields` field'.format(
view.get_reference_model_name()))
orm_lookups = [self.construct_search(six.text_type(search_field)) for search_field in search_fields]
base = queryset
for search_term in search_terms:
queries = [models.Q(**{orm_lookup: search_term}) for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, queries))
if self.must_call_distinct(queryset, search_fields):
# Filtering against a many-to-many field requires us to
# call queryset.distinct() in order to avoid duplicate items
# in the resulting queryset.
# We try to avoid this if possible, for performance reasons.
queryset = distinct(queryset, base)
return queryset
class ReferenceBaseView(object):
serializer_class = ReferenceSerializer
def get_queryset(self):
try:
self.get_reference_descriptor()
except ReferenceDescriptor.DoesNotExist:
raise exceptions.NotFound()
return self.get_reference_model().objects.all()
def get_reference_descriptor(self):
return ReferenceDescriptor.objects.get(
content_type=ContentType.objects.get_for_model(self.get_reference_model()))
def get_reference_model_name(self):
return self.kwargs['model']
def get_reference_model(self):
try:
app_name, model_name = self.get_reference_model_name().split('.')
model = get_model(app_name, model_name)
except (ValueError, LookupError):
raise exceptions.NotFound()
return model
class ReferenceList(ReferenceBaseView, generics.ListAPIView):
pagination_class = StandardResultsSetPagination
filter_backends = [ReferenceSearchFilter]
class ReferenceView(ReferenceBaseView, generics.RetrieveAPIView):
serializer_class = ReferenceSerializer
| dgk/django-business-logic | business_logic/rest/views.py | Python | mit | 6,217 |
from tests.test_1 import test_1
from tests.test_2 import test_2
from tests.test_3 import test_3
from tests.test_4 import test_4
from tests.test_5 import test_5
from tests.test_6 import test_6
from tests.test_7 import test_7
from tests.test_8 import test_8
from tests.test_9 import test_9
from tests.test_10 import test_10
from tests.test_11 import test_11
from tests.test_12 import test_12
from tests.test_13 import test_13
from tests.test_14 import test_14
from tests.test_15 import test_15
from tests.test_16 import test_16
from tests.test_17 import test_17
from tests.test_18 import test_18
from tests.test_19 import test_19
def test_function_list():
return [test_1, test_2, test_3, test_4, test_5, test_6, test_7,
test_8, test_9, test_10, test_11, test_12, test_13,
test_14, test_15, test_16, test_17, test_18, test_19]
| qutip/qutip-benchmark | benchmark/tests/__init__.py | Python | bsd-3-clause | 853 |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2017-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
from edb.testbase import server as tb
class TestEdgeQLCoalesce(tb.QueryTestCase):
"""The test DB is designed to test various coalescing operations.
"""
SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas',
'issues.esdl')
SETUP = os.path.join(os.path.dirname(__file__), 'schemas',
'issues_coalesce_setup.edgeql')
async def test_edgeql_coalesce_scalar_01(self):
await self.assert_query_result(
r'''
SELECT Issue {
time_estimate := Issue.time_estimate ?? -1
};
''',
[
{'time_estimate': -1},
{'time_estimate': -1},
{'time_estimate': -1},
{'time_estimate': 60},
{'time_estimate': 90},
{'time_estimate': 90},
],
sort=lambda x: x['time_estimate']
)
async def test_edgeql_coalesce_scalar_02(self):
await self.assert_query_result(
r'''
SELECT (Issue.number, Issue.time_estimate ?? -1)
ORDER BY Issue.number;
''',
[
['1', 60],
['2', 90],
['3', 90],
['4', -1],
['5', -1],
['6', -1],
]
)
async def test_edgeql_coalesce_scalar_03(self):
await self.assert_query_result(
r'''
# Only values present in the graph will be selected.
# There is at least one value there.
# Therefore, the second argument to ?? will not be returned.
SELECT Issue.time_estimate ?? -1;
''',
[
60,
90,
90,
],
sort=True
)
async def test_edgeql_coalesce_scalar_04(self):
await self.assert_query_result(
r'''
# No open issue has a time_estimate, so the first argument
# to ?? is an empty set.
# Therefore, the second argument to ?? will be returned.
SELECT (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_estimate ?? -1;
''',
[
-1,
]
)
async def test_edgeql_coalesce_scalar_05(self):
await self.assert_query_result(
r'''
WITH
I := (SELECT Issue
FILTER Issue.status.name = 'Open')
# No open issue has a time_estimate, so the first argument
# to ?? is an empty set.
# Therefore, the second argument to ?? will be returned.
SELECT I.time_estimate ?? -1;
''',
[
-1
]
)
async def test_edgeql_coalesce_scalar_06(self):
# The result is either an empty set if at least one
# estimate exists, or `-1` if no estimates exist.
# Our database contains one estimate.
await self.assert_query_result(
r"""
SELECT Issue.time_estimate ?? -1
FILTER NOT EXISTS Issue.time_estimate;
""",
[]
)
async def test_edgeql_coalesce_scalar_07(self):
await self.assert_query_result(
r'''
SELECT Issue {
number,
has_estimate := Issue.time_estimate ?!= <int64>{}
};
''',
[
{'number': '1', 'has_estimate': True},
{'number': '2', 'has_estimate': True},
{'number': '3', 'has_estimate': True},
{'number': '4', 'has_estimate': False},
{'number': '5', 'has_estimate': False},
{'number': '6', 'has_estimate': False},
],
sort=lambda x: x['number']
)
async def test_edgeql_coalesce_scalar_08(self):
await self.assert_query_result(
r'''
SELECT (Issue.number, Issue.time_estimate ?= 60)
ORDER BY Issue.number;
''',
[
['1', True],
['2', False],
['3', False],
['4', False],
['5', False],
['6', False],
]
)
async def test_edgeql_coalesce_scalar_09(self):
await self.assert_query_result(
r'''
# Only values present in the graph will be selected.
SELECT Issue.time_estimate ?= 60;
''',
[
False, False, True,
],
sort=True
)
await self.assert_query_result(
r'''
SELECT Issue.time_estimate ?= <int64>{};
''',
[
False, False, False,
],
sort=True
)
async def test_edgeql_coalesce_scalar_10(self):
await self.assert_query_result(
r'''
# No open issue has a time_estimate, so the first argument
# to ?= is an empty set.
SELECT (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_estimate ?= <int64>{};
''',
[
True,
]
)
async def test_edgeql_coalesce_scalar_11(self):
await self.assert_query_result(
r'''
# No open issue has a time_estimate, so the first argument
# to ?!= is an empty set.
WITH
I := (SELECT Issue
FILTER Issue.status.name = 'Open')
SELECT I.time_estimate ?!= <int64>{};
''',
[
False
]
)
await self.assert_query_result(
r'''
WITH
I := (SELECT Issue
FILTER Issue.status.name = 'Open')
SELECT I.time_estimate ?!= 60;
''',
[
True
]
)
async def test_edgeql_coalesce_scalar_12(self):
await self.assert_query_result(
r'''
SELECT Issue {
number,
time_estimate,
related_to: {time_estimate},
}
ORDER BY Issue.number;
''',
[
{'number': '1', 'related_to': [], 'time_estimate': 60},
{'number': '2', 'related_to': [], 'time_estimate': 90},
{'number': '3', 'related_to': [], 'time_estimate': 90},
{'number': '4', 'related_to': [], 'time_estimate': None},
{
'number': '5',
'related_to': [{'time_estimate': 60}],
'time_estimate': None,
},
{
'number': '6',
'related_to': [{'time_estimate': 90}],
'time_estimate': None,
},
]
)
await self.assert_query_result(
r'''
# now test a combination of several coalescing operators
SELECT
Issue.time_estimate ??
Issue.related_to.time_estimate ?=
<int64>Issue.number * 12
ORDER BY Issue.number;
''',
[
False, False, False, False, True, False,
]
)
async def test_edgeql_coalesce_set_01(self):
await self.assert_query_result(
r'''
SELECT Issue {
comp_time_estimate := Issue.time_estimate ?? {-1, -2}
};
''',
[
{'comp_time_estimate': [-1, -2]},
{'comp_time_estimate': [-1, -2]},
{'comp_time_estimate': [-1, -2]},
{'comp_time_estimate': [60]},
{'comp_time_estimate': [90]},
{'comp_time_estimate': [90]},
],
sort=lambda x: x['comp_time_estimate']
)
async def test_edgeql_coalesce_set_02(self):
await self.assert_query_result(
r'''
SELECT Issue {
multi te := (
SELECT Issue.time_estimate ?? {-1, -2}
)
};
''',
[
{'te': [-1, -2]},
{'te': [-1, -2]},
{'te': [-1, -2]},
{'te': [60]},
{'te': [90]},
{'te': [90]},
],
sort=lambda x: x['te']
)
async def test_edgeql_coalesce_set_03(self):
await self.assert_query_result(
r'''
SELECT _ := (Issue.number, Issue.time_estimate ?? {-1, -2})
ORDER BY _;
''',
[
['1', 60],
['2', 90],
['3', 90],
['4', -2],
['4', -1],
['5', -2],
['5', -1],
['6', -2],
['6', -1],
],
)
async def test_edgeql_coalesce_set_04(self):
await self.assert_query_result(
r'''
# Only values present in the graph will be selected.
# There is at least one value there.
# Therefore, the second argument to ?? will not be returned.
SELECT Issue.time_estimate ?? {-1, -2};
''',
[
60,
90,
90,
],
sort=True
)
async def test_edgeql_coalesce_set_05(self):
await self.assert_query_result(
r'''
# No open issue has a time_estimate, so the first argument
# to ?? is an empty set.
# Therefore, the second argument to ?? will be returned.
SELECT (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_estimate ?? {-1, -2};
''',
{
-1, -2,
},
)
async def test_edgeql_coalesce_set_06(self):
await self.assert_query_result(
r'''
WITH
I := (SELECT Issue
FILTER Issue.status.name = 'Open')
# No open issue has a time_estimate, so the first argument
# to ?? is an empty set.
# Therefore, the second argument to ?? will be returned.
SELECT I.time_estimate ?? {-1, -2};
''',
{
-1, -2,
},
)
async def test_edgeql_coalesce_set_07(self):
await self.assert_query_result(
r'''
SELECT Issue {
number,
te := Issue.time_estimate ?= {60, 30}
};
''',
[
{'number': '1', 'te': {True, False}},
{'number': '2', 'te': [False, False]},
{'number': '3', 'te': [False, False]},
{'number': '4', 'te': [False, False]},
{'number': '5', 'te': [False, False]},
{'number': '6', 'te': [False, False]},
],
sort=lambda x: x['number']
)
async def test_edgeql_coalesce_set_08(self):
await self.assert_query_result(
r'''
SELECT _ := (Issue.number, Issue.time_estimate ?= {60, 90})
ORDER BY _;
''',
[
['1', False],
['1', True],
['2', False],
['2', True],
['3', False],
['3', True],
['4', False],
['4', False],
['5', False],
['5', False],
['6', False],
['6', False],
],
)
async def test_edgeql_coalesce_set_09(self):
await self.assert_query_result(
r'''
# Only values present in the graph will be selected.
SELECT Issue.time_estimate ?= {60, 30};
''',
[
False,
False,
False,
False,
False,
True,
],
sort=True
)
async def test_edgeql_coalesce_set_10(self):
await self.assert_query_result(
r'''
# No open issue has a time_estimate, so the first argument
# to ?!= is an empty set.
SELECT (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_estimate ?!= {-1, -2};
''',
[
True, True,
],
)
async def test_edgeql_coalesce_set_11(self):
await self.assert_query_result(
r'''
# No open issue has a time_estimate, so the first argument
# to ?= is an empty set.
WITH
I := (SELECT Issue
FILTER Issue.status.name = 'Open')
SELECT I.time_estimate ?= {-1, -2};
''',
[
False, False,
],
)
async def test_edgeql_coalesce_dependent_01(self):
await self.assert_query_result(
r'''
SELECT Issue {
# for every issue, there's a unique derived "default"
# to use with ??
time_estimate :=
Issue.time_estimate ?? -<int64>Issue.number
} ORDER BY Issue.time_estimate;
''',
[
{'time_estimate': -6},
{'time_estimate': -5},
{'time_estimate': -4},
{'time_estimate': 60},
{'time_estimate': 90},
{'time_estimate': 90},
],
)
async def test_edgeql_coalesce_dependent_02(self):
await self.assert_query_result(
r'''
# for every issue, there's a unique derived "default" to use
# with ??
SELECT (Issue.number,
Issue.time_estimate ?? -<int64>Issue.number)
ORDER BY Issue.number;
''',
[
['1', 60],
['2', 90],
['3', 90],
['4', -4],
['5', -5],
['6', -6],
],
)
async def test_edgeql_coalesce_dependent_03(self):
await self.assert_query_result(
r'''
# ?? is OPTIONAL w.r.t. first argument, so it behaves like
# an element-wise function. Therefore, the longest common
# prefix `Issue` is factored out and the expression is
# evaluated for every Issue.
SELECT Issue.time_estimate ?? -<int64>Issue.number;
''',
[
-6, -5, -4, 60, 90, 90,
],
sort=True
)
async def test_edgeql_coalesce_dependent_04(self):
await self.assert_query_result(
r'''
# Since ?? is OPTIONAL over it's first argument,
# the expression is evaluated for all six issues.
SELECT (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_estimate ?? -<int64>Issue.number;
''',
[
-6, -5, -4, -3, -2, -1
],
sort=True
)
async def test_edgeql_coalesce_dependent_05(self):
await self.assert_query_result(
r'''
# Unlike the above test, we refer to the
# same "open" subset of issues on both
# sides of ??, so the result set contains
# only three elements.
WITH
I := (SELECT Issue
FILTER Issue.status.name = 'Open')
SELECT I.time_estimate ?? -<int64>I.number;
''',
[
-6, -5, -4,
],
sort=True
)
async def test_edgeql_coalesce_dependent_06(self):
await self.assert_query_result(
r'''
WITH
I2 := Issue
# ?? is OPTIONAL w.r.t. first argument, so it behaves like
# an element-wise function. However, since there is no
# common prefix, the expression gets evaluated ONLY for
# existing values of `Issue.time_estimate`.
SELECT Issue.time_estimate ?? -<int64>I2.number;
''',
[
60, 90, 90,
],
sort=True
)
async def test_edgeql_coalesce_dependent_07(self):
await self.assert_query_result(
r'''
SELECT (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_estimate ?? -<int64>Issue.number;
''',
[
-6, -5, -4, -3, -2, -1,
],
sort=True
)
async def test_edgeql_coalesce_dependent_08(self):
await self.assert_query_result(
r'''
# On one hand the right operand of ?? is not independent
# of the left. On the other hand, it is constructed in
# such a way as to be equivalent to literal `-1` for the
# case when its value is important.
#
# LCP is `Issue.time_estimate`, so this should not
# actually be evaluated for every `Issue`, but for every
# `Issue.time_estimate`.
SELECT Issue.time_estimate ?? {Issue.time_estimate, -1};
''',
[
60, 90, 90,
],
sort=True
)
async def test_edgeql_coalesce_dependent_09(self):
await self.assert_query_result(
r'''
# `Issue` on both sides is behind a fence, so the left-hand
# expression is an empty set, and the result is a union
# of all existing time estimates and -1.
SELECT _ := (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_estimate ?? {Issue.time_estimate, -1}
ORDER BY _;
''',
[
-1, 60, 90, 90
],
)
async def test_edgeql_coalesce_dependent_10(self):
await self.assert_query_result(
r'''
WITH
I := (
SELECT Issue
FILTER Issue.status.name = 'Open'
)
# `I.time_estimate` is now a LCP
SELECT I.time_estimate ?? {I.time_estimate, -1};
''',
[
-1,
],
)
async def test_edgeql_coalesce_dependent_11(self):
await self.assert_query_result(
r'''
SELECT Issue {
number,
foo := Issue.time_estimate ?= <int64>Issue.number * 30
} ORDER BY Issue.number;
''',
[
{'number': '1', 'foo': False},
{'number': '2', 'foo': False},
{'number': '3', 'foo': True},
{'number': '4', 'foo': False},
{'number': '5', 'foo': False},
{'number': '6', 'foo': False},
],
)
async def test_edgeql_coalesce_dependent_12(self):
await self.assert_query_result(
r'''
SELECT (
Issue.number,
Issue.time_estimate ?!= <int64>Issue.number * 30
)
ORDER BY Issue.number;
''',
[
['1', True],
['2', True],
['3', False],
['4', True],
['5', True],
['6', True],
],
)
async def test_edgeql_coalesce_dependent_13(self):
await self.assert_query_result(
r'''
# ?= is OPTIONAL w.r.t. both arguments, so it behaves like
# an element-wise function. Therefore, the longest common
# prefix `Issue` is factored out and the expression is
# evaluated for every Issue.
SELECT Issue.time_estimate ?= <int64>Issue.number * 30;
''',
[
False,
False,
False,
False,
False,
True,
],
sort=True
)
async def test_edgeql_coalesce_dependent_14(self):
await self.assert_query_result(
r'''
SELECT (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_estimate ?= <int64>Issue.number;
''',
[
False, False, False, False, False, False
],
sort=True
)
async def test_edgeql_coalesce_dependent_15(self):
await self.assert_query_result(
r'''
WITH
I := (SELECT Issue
FILTER Issue.status.name = 'Open')
# Same as dependent_13, but only 'Open' issues
# being considered.
SELECT I.time_estimate ?!= I.time_spent_log.spent_time;
''',
[
False, False, False,
],
sort=True
)
async def test_edgeql_coalesce_dependent_16(self):
await self.assert_query_result(
r'''
WITH
I2 := Issue
# ?= is OPTIONAL w.r.t. both arguments, so it behaves like
# an element-wise function. However, since there is no
# common prefix, the expression gets evaluated ONLY for
# existing values of `Issue.time_estimate`, so the cardinality
# of the result set is 18 (3 * 6).
SELECT Issue.time_estimate ?= <int64>I2.number * 30;
''',
[
False, False, False,
False, False, False,
False, False, False,
False, False, False,
False, False, False,
True, True, True,
],
sort=True
)
async def test_edgeql_coalesce_dependent_17(self):
await self.assert_query_result(
r'''
WITH
I2 := Issue
# ?!= is OPTIONAL w.r.t. both arguments, so it behaves like
# an element-wise function. However, since there is no
# common prefix, the expression gets evaluated ONLY for
# existing values of `Issue.time_estimate`, where
# `Issue.status` is 'Open', which happens to be an empty set,
# but ?!= is OPTIONAL, so the cardinality of the result set is
# 1 * |I.number| == 6.
SELECT (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_estimate ?!= <int64>I2.number * 30;
''',
[
True, True, True,
True, True, True,
],
sort=True
)
async def test_edgeql_coalesce_dependent_18(self):
await self.assert_query_result(
r'''
# LCP is `Issue.time_estimate`, so this should not
# actually be evaluated for every `Issue`, but for every
# `Issue.time_estimate`.
SELECT Issue.time_estimate ?= Issue.time_estimate * 2;
''',
[
False, False, False,
],
sort=True
)
async def test_edgeql_coalesce_dependent_19(self):
await self.assert_query_result(
r'''
# `Issue` is now a LCP and the overall expression will be
# evaluated for every `Issue`.
SELECT (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_estimate ?= Issue.time_estimate * 2;
''',
[
False, False, False, True, True, True,
],
sort=True
)
async def test_edgeql_coalesce_dependent_20(self):
await self.assert_query_result(
r'''
WITH
I := (
SELECT Issue
FILTER Issue.status.name = 'Open'
)
# `I.time_estimate` is now a LCP
SELECT I.time_estimate ?= I.time_estimate * 2;
''',
[
True,
],
)
async def test_edgeql_coalesce_dependent_21(self):
await self.assert_query_result(
r'''
WITH
X := {Priority, Status}
SELECT X[IS Priority].name ?? X[IS Status].name;
''',
{'High', 'Low', 'Open', 'Closed'},
)
async def test_edgeql_coalesce_dependent_22(self):
await self.assert_query_result(
r'''
WITH
X := {Priority, Status}
SELECT X[IS Priority].name[0] ?? X[IS Status].name;
''',
{'H', 'L', 'Open', 'Closed'},
)
await self.assert_query_result(
r'''
WITH
X := {Priority, Status}
SELECT X[IS Priority].name ?? X[IS Status].name[0];
''',
{'High', 'Low', 'O', 'C'},
)
await self.assert_query_result(
r'''
WITH
X := {Priority, Status}
SELECT X[IS Priority].name[0] ?? X[IS Status].name[0];
''',
{'H', 'L', 'O', 'C'},
)
async def test_edgeql_coalesce_dependent_23(self):
await self.assert_query_result(
r'''
WITH
X := {Priority, Status}
SELECT X {
foo := X[IS Priority].name ?? X[IS Status].name
};
''',
[
{'foo': 'Closed'},
{'foo': 'High'},
{'foo': 'Low'},
{'foo': 'Open'}
],
sort=lambda x: x['foo']
)
await self.assert_query_result(
r'''
WITH
X := {Priority, Status}
SELECT X {
foo := X[IS Priority].name[0] ?? X[IS Status].name
};
''',
[
{'foo': 'Closed'},
{'foo': 'H'},
{'foo': 'L'},
{'foo': 'Open'}
],
sort=lambda x: x['foo']
)
await self.assert_query_result(
r'''
WITH
X := {Priority, Status}
SELECT X {
foo := X[IS Priority].name ?? X[IS Status].name[0]
};
''',
[
{'foo': 'C'},
{'foo': 'High'},
{'foo': 'Low'},
{'foo': 'O'}
],
sort=lambda x: x['foo']
)
await self.assert_query_result(
r'''
WITH
X := {Priority, Status}
SELECT X {
foo := X[IS Priority].name[0] ?? X[IS Status].name[0]
};
''',
[
{'foo': 'C'},
{'foo': 'H'},
{'foo': 'L'},
{'foo': 'O'}
],
sort=lambda x: x['foo']
)
async def test_edgeql_coalesce_object_01(self):
await self.assert_query_result(
r'''
WITH
DUMMY := (SELECT LogEntry FILTER LogEntry.body = 'Dummy')
SELECT Issue {
number,
time_spent_log := (
SELECT x := (Issue.time_spent_log ?? DUMMY) {
id,
spent_time
}
ORDER BY x.spent_time
)
} ORDER BY Issue.number;
''',
[
{
'number': '1',
'time_spent_log': [{
'spent_time': 60,
}],
}, {
'number': '2',
'time_spent_log': [{
'spent_time': 90,
}],
}, {
'number': '3',
'time_spent_log': [{
'spent_time': 30,
}, {
'spent_time': 60,
}],
}, {
'number': '4',
'time_spent_log': [{
'spent_time': -1,
}],
}, {
'number': '5',
'time_spent_log': [{
'spent_time': -1,
}],
}, {
'number': '6',
'time_spent_log': [{
'spent_time': -1,
}],
},
],
)
async def test_edgeql_coalesce_object_02(self):
await self.assert_query_result(
r'''
WITH
DUMMY := (SELECT LogEntry FILTER LogEntry.body = 'Dummy')
SELECT x := (
Issue.number,
(Issue.time_spent_log ?? DUMMY).spent_time
) ORDER BY x.0 THEN x.1;
''',
[
['1', 60],
['2', 90],
['3', 30],
['3', 60],
['4', -1],
['5', -1],
['6', -1],
],
)
async def test_edgeql_coalesce_object_03(self):
await self.assert_query_result(
r'''
WITH
DUMMY := (SELECT LogEntry FILTER LogEntry.body = 'Dummy')
SELECT x := (Issue.time_spent_log ?? DUMMY) {
spent_time
}
ORDER BY x.spent_time;
''',
[
{'spent_time': 30},
{'spent_time': 60},
{'spent_time': 60},
{'spent_time': 90},
],
sort=lambda x: x['spent_time']
)
async def test_edgeql_coalesce_object_04(self):
await self.assert_query_result(
r'''
WITH
DUMMY := (SELECT LogEntry FILTER LogEntry.body = 'Dummy')
SELECT (
(SELECT Issue
FILTER Issue.status.name = 'Open').time_spent_log
??
DUMMY
) {
id,
spent_time
};
''',
[
{'spent_time': -1},
],
)
async def test_edgeql_coalesce_object_05(self):
await self.assert_query_result(
r'''
WITH
DUMMY := (SELECT LogEntry FILTER LogEntry.body = 'Dummy'),
I := (
SELECT Issue
FILTER Issue.status.name = 'Open'
)
SELECT (I.time_spent_log ?? DUMMY) {
id,
spent_time
};
''',
[
{'spent_time': -1},
],
)
async def test_edgeql_coalesce_object_06(self):
await self.assert_query_result(
r'''
WITH
LOG1 := (SELECT LogEntry FILTER LogEntry.body = 'Log1')
SELECT Issue {
number,
log1 := Issue.time_spent_log ?= LOG1
} ORDER BY Issue.number;
''',
[
{
'number': '1',
'log1': [True],
}, {
'number': '2',
'log1': [False],
}, {
'number': '3',
'log1': [False, False]
}, {
'number': '4',
'log1': [False],
}, {
'number': '5',
'log1': [False],
}, {
'number': '6',
'log1': [False],
},
],
)
async def test_edgeql_coalesce_object_07(self):
await self.assert_query_result(
r'''
WITH
LOG1 := (SELECT LogEntry FILTER LogEntry.body = 'Log1')
SELECT (
Issue.number, Issue.time_spent_log ?= LOG1
) ORDER BY Issue.number;
''',
[
['1', True],
['2', False],
['3', False],
['3', False],
['4', False],
['5', False],
['6', False],
],
)
async def test_edgeql_coalesce_object_08(self):
await self.assert_query_result(
r'''
WITH
LOG1 := (SELECT LogEntry FILTER LogEntry.body = 'Log1')
SELECT Issue.time_spent_log ?!= LOG1;
''',
[
False,
True,
True,
True,
],
sort=True
)
async def test_edgeql_coalesce_object_09(self):
await self.assert_query_result(
r'''
WITH
DUMMY := (SELECT LogEntry FILTER LogEntry.body = 'Dummy')
SELECT (
SELECT Issue
FILTER Issue.status.name = 'Open'
).time_spent_log ?= DUMMY;
''',
[
False,
],
)
async def test_edgeql_coalesce_object_10(self):
await self.assert_query_result(
r'''
WITH
DUMMY := (SELECT LogEntry FILTER LogEntry.body = 'Dummy'),
I := (
SELECT Issue
FILTER Issue.status.name = 'Open'
)
SELECT I.time_spent_log ?!= DUMMY;
''',
[
True,
],
)
async def test_edgeql_coalesce_object_11(self):
await self.assert_query_result(
r'''
SELECT
(
(SELECT Issue FILTER .number = '1')
??
(SELECT Issue FILTER .number = '2')
) {
number
}
''',
[{
'number': '1',
}]
)
async def test_edgeql_coalesce_object_12(self):
await self.assert_query_result(
r'''
SELECT
(
(SELECT Issue FILTER .number = '100')
??
(SELECT Issue FILTER .number = '2')
) {
number
}
''',
[{
'number': '2',
}]
)
async def test_edgeql_coalesce_wrapping_optional(self):
await self.con.execute(
r'''
CREATE FUNCTION optfunc(
a: std::str, b: OPTIONAL std::str) -> std::str
USING EdgeQL $$
SELECT b IF a = 'foo' ELSE a
$$;
'''
)
await self.assert_query_result(
r'''
SELECT optfunc('foo', <str>{}) ?? 'N/A';
''',
['N/A'],
)
await self.assert_query_result(
r'''
SELECT optfunc('foo', 'b') ?? 'N/A';
''',
['b'],
)
await self.assert_query_result(
r'''
SELECT optfunc('a', <str>{}) ?? 'N/A';
''',
['a'],
)
async def test_edgeql_coalesce_set_of_01(self):
await self.assert_query_result(
r'''
SELECT <str>Publication.id ?? <str>count(Publication)
''',
['0'],
)
async def test_edgeql_coalesce_set_of_02(self):
await self.assert_query_result(
r'''
SELECT Publication.title ?? <str>count(Publication)
''',
['0'],
)
async def test_edgeql_coalesce_set_of_03(self):
await self.assert_query_result(
r'''
SELECT <str>Publication.id ?= <str>count(Publication)
''',
[False],
)
async def test_edgeql_coalesce_set_of_04(self):
await self.assert_query_result(
r'''
SELECT Publication.title ?= <str>count(Publication)
''',
[False],
)
async def test_edgeql_coalesce_set_of_05(self):
await self.assert_query_result(
r'''
SELECT (Publication.title ?? <str>count(Publication))
?? Publication.title
''',
['0'],
)
async def test_edgeql_coalesce_set_of_06(self):
await self.assert_query_result(
r'''
SELECT (Publication.title ?= <str>count(Publication),
Publication)
''',
[],
)
async def test_edgeql_coalesce_set_of_07(self):
await self.assert_query_result(
r'''
SELECT (Publication.title ?= '0',
(Publication.title ?? <str>count(Publication)));
''',
[[False, '0']],
)
async def test_edgeql_coalesce_set_of_08(self):
await self.assert_query_result(
r'''
SELECT ("1" if Publication.title ?= "foo" else "2") ++
(Publication.title ?? <str>count(Publication))
''',
['20'],
)
async def test_edgeql_coalesce_set_of_09(self):
await self.assert_query_result(
r'''
SELECT (Publication.title ?= "Foo", Publication.title ?= "bar")
''',
[[False, False]],
)
async def test_edgeql_coalesce_set_of_10(self):
await self.assert_query_result(
r'''
SELECT (Publication.title++Publication.title ?= "Foo",
Publication.title ?= "bar")
''',
[[False, False]],
)
async def test_edgeql_coalesce_set_of_11(self):
await self.assert_query_result(
r'''
SELECT (Publication.title ?= "", count(Publication))
''',
[[False, 0]],
)
await self.assert_query_result(
r'''
SELECT (count(Publication), Publication.title ?= "")
''',
[[False, 0]],
)
async def test_edgeql_coalesce_set_of_12(self):
await self.assert_query_result(
r'''
SELECT (
Publication ?= Publication,
(Publication.title++Publication.title
?= Publication.title) ?=
(Publication ?!= Publication)
)
''',
[[True, False]]
)
async def test_edgeql_coalesce_set_of_13(self):
await self.assert_query_result(
r'''
SELECT (Publication ?= Publication, Publication)
''',
[],
)
async def test_edgeql_coalesce_set_of_nonempty_01(self):
await self.con.execute(
'''INSERT Publication { title := "1" }''')
await self.con.execute(
'''INSERT Publication { title := "asdf" }''')
await self.assert_query_result(
r'''
SELECT Publication.title ?= <str>count(Publication)
''',
[True, False],
)
async def test_edgeql_coalesce_self_01(self):
await self.assert_query_result(
r'''
SELECT Publication ?? Publication
''',
[],
)
async def test_edgeql_coalesce_self_02(self):
await self.assert_query_result(
r'''
WITH Z := (SELECT Comment FILTER .owner.name = "Yury")
SELECT (Z.parent ?? Z);
''',
[],
)
async def test_edgeql_coalesce_pointless_01(self):
# This is pointless but it should work.
await self.assert_query_result(
r'''
SELECT 'a' ?? (SELECT {'a', 'b'})
''',
["a"],
)
async def test_edgeql_coalesce_correlation_01(self):
await self.assert_query_result(
r'''
SELECT _ := (
SELECT (Issue.name ++ <str>Issue.time_estimate)) ?? 'n/a'
ORDER BY _;
''',
["Issue 160", "Issue 290", "Issue 390"],
)
async def test_edgeql_coalesce_correlation_02(self):
await self.assert_query_result(
r'''
WITH X := (SELECT (Issue.name ++ <str>Issue.time_estimate)),
SELECT _ := X ?? 'n/a'
ORDER BY _;
''',
["Issue 160", "Issue 290", "Issue 390"],
)
async def test_edgeql_coalesce_correlation_03(self):
# TODO: add this to the schema if we want more like it
await self.con.execute('''
CREATE FUNCTION opts(x: OPTIONAL str) -> str { USING (x) };
''')
await self.assert_query_result(
r'''
SELECT _ := (
count(Issue),
opts((SELECT (<str>Issue.time_estimate))),
) ORDER BY _;
''',
[[6, "60"], [6, "90"], [6, "90"]],
)
async def test_edgeql_coalesce_tuple_01(self):
await self.assert_query_result(
r'''
SELECT (SELECT ('no', 'no') FILTER false) ?? ('a', 'b');
''',
[
['a', 'b'],
]
)
async def test_edgeql_coalesce_tuple_02(self):
await self.assert_query_result(
r'''
SELECT _ := (Issue.name, (Issue.name, <str>Issue.time_estimate)
?? ('hm', 'n/a')) ORDER BY _;
''',
[
["Issue 1", ["Issue 1", "60"]],
["Issue 2", ["Issue 2", "90"]],
["Issue 3", ["Issue 3", "90"]],
["Issue 4", ["hm", "n/a"]],
["Issue 5", ["hm", "n/a"]],
["Issue 6", ["hm", "n/a"]],
]
)
async def test_edgeql_coalesce_tuple_03(self):
await self.assert_query_result(
r'''
SELECT _ := (Issue.name, (Issue.name, Issue.time_estimate)
?? (Issue.name, -1)) ORDER BY _;
''',
[
["Issue 1", ["Issue 1", 60]],
["Issue 2", ["Issue 2", 90]],
["Issue 3", ["Issue 3", 90]],
["Issue 4", ["Issue 4", -1]],
["Issue 5", ["Issue 5", -1]],
["Issue 6", ["Issue 6", -1]],
]
)
async def test_edgeql_coalesce_tuple_04(self):
await self.assert_query_result(
r'''
SELECT _ := (Issue.name, Issue.time_estimate)
?? (Issue.name, -1) ORDER BY _;
''',
[
["Issue 1", 60],
["Issue 2", 90],
["Issue 3", 90],
["Issue 4", -1],
["Issue 5", -1],
["Issue 6", -1],
],
)
async def test_edgeql_coalesce_tuple_05(self):
await self.assert_query_result(
r'''
WITH X := (Issue.name, Issue.time_estimate),
SELECT _ := X ?? ('hm', -1) ORDER BY _;
''',
[
["Issue 1", 60],
["Issue 2", 90],
["Issue 3", 90],
],
)
async def test_edgeql_coalesce_tuple_06(self):
await self.assert_query_result(
r'''
SELECT (SELECT ((), 'no') FILTER false) ?? ((), 'b');
''',
[
[[], 'b'],
],
)
async def test_edgeql_coalesce_tuple_07(self):
await self.assert_query_result(
r'''
SELECT (SELECT () FILTER false) ?? {(), ()};
''',
[
[], []
],
)
await self.assert_query_result(
r'''
SELECT (SELECT () FILTER true) ?? {(), ()};
''',
[
[]
],
)
await self.assert_query_result(
r'''
SELECT (SELECT ((), ()) FILTER true) ?? {((), ()), ((), ())}
''',
[
[[], []]
],
)
async def test_edgeql_coalesce_tuple_08(self):
await self.con.execute('''
CREATE TYPE Foo {
CREATE PROPERTY bar -> tuple<int64, int64>;
CREATE PROPERTY baz -> tuple<tuple<int64, int64>, str>;
};
''')
await self.assert_query_result(
r'''
SELECT Foo.bar ?? (1, 2)
''',
[[1, 2]],
)
await self.assert_query_result(
r'''
SELECT Foo.bar UNION (1, 2)
''',
[[1, 2]],
)
await self.assert_query_result(
r'''
SELECT (Foo.bar ?? (1, 2)).0
''',
[1],
)
await self.assert_query_result(
r'''
SELECT (Foo.bar UNION (1, 2)).0
''',
[1],
)
await self.assert_query_result(
r'''
SELECT (Foo.baz ?? ((1, 2), 'huh')).0.1
''',
[2],
)
# Insert some data and mess around some more
await self.con.execute('''
INSERT Foo { bar := (3, 4), baz := ((3, 4), 'test') }
''')
await self.assert_query_result(
r'''
SELECT ([Foo.bar], array_agg(Foo.bar));
''',
[[[[3, 4]], [[3, 4]]]],
)
await self.assert_query_result(
r'''
SELECT Foo.bar ?? (1, 2)
''',
[[3, 4]],
)
await self.assert_query_result(
r'''
SELECT _ := Foo.bar UNION (1, 2) ORDER BY _;
''',
[[1, 2], [3, 4]],
)
await self.assert_query_result(
r'''
SELECT (Foo.bar ?? (1, 2)).1
''',
[4],
)
await self.assert_query_result(
r'''
SELECT _ := (Foo.bar UNION (1, 2)).0 ORDER BY _;
''',
[1, 3],
)
await self.assert_query_result(
r'''
SELECT (Foo.baz ?? ((1, 2), 'huh')).0.1
''',
[4],
)
await self.assert_query_result(
r'''
WITH W := (Foo.baz UNION ((1, 2), 'huh')),
SELECT (W, W.1, W.0.0) ORDER BY W;
''',
[
[[[1, 2], "huh"], "huh", 1],
[[[3, 4], "test"], "test", 3],
],
)
async def test_edgeql_coalesce_tuple_09(self):
await self.assert_query_result(
r'''
SELECT _ := ([(1,2)][0] UNION (3,4)).1 ORDER BY _;
''',
[2, 4],
)
async def test_edgeql_coalesce_overload_01(self):
# first argument bool -> optional second arg
await self.assert_query_result(
r'''
SELECT Issue.name ++ opt_test(false, <str>Issue.time_estimate)
''',
{
"Issue 160", "Issue 290", "Issue 390",
"Issue 4", "Issue 5", "Issue 6",
},
)
await self.assert_query_result(
r'''
SELECT (Issue.name, opt_test(false, Issue.time_estimate))
''',
{
("Issue 1", 60),
("Issue 2", 90),
("Issue 3", 90),
("Issue 4", -1),
("Issue 5", -1),
("Issue 6", -1),
},
)
await self.assert_query_result(
r'''
SELECT opt_test(true, <str>Issue.time_estimate)
''',
tb.bag(["60", "90", "90"]),
)
await self.assert_query_result(
r'''
SELECT opt_test(true, Issue.time_estimate)
''',
tb.bag([60, 90, 90]),
)
await self.assert_query_result(
r'''
select Issue { z := opt_test(true, .time_estimate) }
''',
tb.bag([
{"z": 60}, {"z": 90}, {"z": 90},
{"z": -1}, {"z": -1}, {"z": -1}
]),
)
await self.assert_query_result(
r'''
select Issue { z := opt_test(true, .time_estimate, 1) }
''',
tb.bag([
{"z": 1}, {"z": 1}, {"z": 1},
{"z": 1}, {"z": 1}, {"z": 1},
]),
)
async def test_edgeql_coalesce_overload_02(self):
# first argument int -> singleton second arg
await self.assert_query_result(
r'''
SELECT Issue.name ++ opt_test(0, <str>Issue.time_estimate)
''',
{
"Issue 160", "Issue 290", "Issue 390",
},
)
await self.assert_query_result(
r'''
SELECT (Issue.name, opt_test(0, Issue.time_estimate))
''',
{
("Issue 1", 60),
("Issue 2", 90),
("Issue 3", 90),
},
)
await self.assert_query_result(
r'''
SELECT opt_test(0, <str>Issue.time_estimate)
''',
tb.bag(["60", "90", "90"]),
)
await self.assert_query_result(
r'''
SELECT opt_test(0, Issue.time_estimate)
''',
tb.bag([60, 90, 90]),
)
await self.assert_query_result(
r'''
select Issue { z := opt_test(0, .time_estimate) }
''',
tb.bag([
{"z": 60}, {"z": 90}, {"z": 90},
{"z": None}, {"z": None}, {"z": None}
]),
)
await self.assert_query_result(
r'''
select Issue { z := opt_test(0, .time_estimate, 1) }
''',
tb.bag([
{"z": 1}, {"z": 1}, {"z": 1},
{"z": None}, {"z": None}, {"z": None}
]),
)
| edgedb/edgedb | tests/test_edgeql_coalesce.py | Python | apache-2.0 | 53,344 |
import turtle
def fern(size, pensize):
if pensize < 1:
return
turtle.pensize(pensize)
turtle.forward(size)
turtle.right(55)
fern(2*size/3, pensize-1)
turtle.left(55)
turtle.back(size/2)
turtle.left(60)
fern(size/2, pensize-1)
turtle.right(60)
turtle.back(size/2)
turtle.speed("fastest")
turtle.penup()
turtle.goto(-100, -400)
turtle.pendown()
turtle.left(80)
turtle.color("green")
fern(500.0, 10)
| chrisglencross/python-lessons | misc/fern.py | Python | mit | 461 |
"""
Load npy xy, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rc('font', family = 'serif', serif = 'cmr10')
import numpy as np
from datetime import timedelta
import datetime
import imp
import re
from textwrap import wrap
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
#unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py')
###############
# Things to change
top_dir='/nfs/a90/eepdw/Data/Rain_Land_Sea_Diurnal'
pp_file = 'avg.5216'
lon_max = 100
lon_min = 80
lat_max= 25
lat_min=10
trmm_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/'
trmm_file = "trmm_diurnal_average_lat_%s_%s_lon_%s_%s_bay_of_bengal.npz" % (lat_min,lat_max, lon_min, lon_max)
#############
# Make own time x-axis
d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30), datetime.datetime(2011, 8, 22, 6, 30), timedelta(hours=1))
formatter = matplotlib.dates.DateFormatter('%H:%M')
def main():
#experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
experiment_ids_p = [ 'dkjxq', 'djznq' ] # Most of Params
experiment_ids_e = ['dkhgu', 'dkbhu'] # Most of Explicit
#experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#plt.ion()
NUM_COLOURS = 15
cmap=cm.get_cmap(cm.Set1, NUM_COLOURS)
#cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS))
for ls in ['sea']:
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
legendEntries=[]
legendtext=[]
plot_trmm = np.load('%s%s_%s' % (trmm_dir, ls, trmm_file))
dates_trmm=[]
p=[]
for dp in plot_trmm['hour']:
print dp
if ((int(dp)<23) & (int(dp)>=6)):
dates_trmm.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_trmm.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
#print dates_trmm
a = np.argsort(dates_trmm,axis=0)
d_trmm = np.array(dates_trmm)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_trmm, pl, label='TRMM', linewidth=2, linestyle='-', marker='', markersize=2, fmt='', color='#262626')
legendEntries.append(l)
legendtext.append('TRMM')
l0=plt.legend(legendEntries, legendtext,title='', frameon=False, prop={'size':8}, loc=9, bbox_to_anchor=(0.21, 0,1, 1))
# Change the legend label colors to almost black
texts = l0.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
for c, experiment_id in enumerate(experiment_ids_p):
expmin1 = experiment_id[:-1]
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s_bay_of_bengal.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max))
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, prop={'size':8}, bbox_to_anchor=(0, 0,1, 1))
# Change the legend label colors to almost black
texts = l1.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
c1=0
for c, experiment_id in enumerate(experiment_ids_e):
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
expmin1 = experiment_id[:-1]
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s_bay_of_bengal.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max))
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, bbox_to_anchor=(0.11, 0,1, 1), prop={'size':8})
plt.gca().add_artist(l1)
plt.gca().add_artist(l0)
plt.gca().xaxis.set_major_formatter(formatter)
# Change the legend label colors to almost black
texts = l2.texts
for t in texts:
t.set_color('#262626')
plt.xlabel('Time (UTC)')
plt.ylabel('mm/h')
title="Domain Averaged Rainfall - %s" % ls
t=re.sub('(.{68} )', '\\1\n', str(title), 0, re.DOTALL)
t = re.sub(r'[(\']', ' ', t)
t = re.sub(r'[\',)]', ' ', t)
pp_filenodot= pp_file.replace(".", "")
# Bit of formatting
# Set colour of axis lines
spines_to_keep = ['bottom', 'left']
for spine in spines_to_keep:
ax.spines[spine].set_linewidth(0.5)
ax.spines[spine].set_color('#262626')
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax.spines[spine].set_visible(False)
# Get rid of ticks. The position of the numbers is informative enough of
# the position of the value.
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Change the labels to the off-black
ax.xaxis.label.set_color('#262626')
ax.yaxis.label.set_color('#262626')
if not os.path.exists('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/'): os.makedirs('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/')
plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_bay_of_bengal_notitle_largeonly.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s' % (t.title()), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_bay_of_bengal_largeonly.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| peterwilletts24/Python-Scripts | plot_scripts/Rain/Diurnal/sea_diurnal_rain_plot_domain_constrain_large_bay_of_bengal.py | Python | mit | 10,247 |
"""
HLS and Color Threshold
-----------------------
You've now seen that various color thresholds can be applied to find the lane lines in images. Here we'll explore
this a bit further and look at a couple examples to see why a color space like HLS can be more robust.
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def run():
"""
Run different HLS and its thresholds.
"""
image = mpimg.imread('test6.jpg')
# Converting original to gray
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Threshold for original image
thresh = (180, 255)
binary = np.zeros_like(gray)
binary[(gray > thresh[0]) & (gray <= thresh[1])] = 1
red = image[:, :, 0]
green = image[:, :, 1]
blue = image[:, :, 2]
thresh_2 = (200, 255)
binary_2 = np.zeros_like(red)
binary_2[(red > thresh_2[0]) & (red <= thresh_2[1])] = 1
# Converting image to HLS
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
# Splitting HSL
hue = hls[:, :, 0]
lightness = hls[:, :, 1]
saturation = hls[:, :, 2]
# Threshold for saturation
thresh_3 = (90, 255)
binary_3 = np.zeros_like(saturation)
binary_3[(saturation > thresh_3[0]) & (saturation <= thresh_3[1])] = 1
# Threshold for Hue
thresh_4 = (15, 100)
binary_4 = np.zeros_like(hue)
binary_4[(hue > thresh_4[0]) & (hue <= thresh_4[1])] = 1
# -------------------- Figure -----------------------
f = plt.figure()
size_x, size_y = (4, 4)
f.add_subplot(size_x, size_y, 1)
plt.imshow(image)
plt.title("Original")
f.add_subplot(size_x, size_y, 2)
plt.imshow(gray, cmap='gray')
plt.title("Gray")
f.add_subplot(size_x, size_y, 3)
plt.imshow(binary, cmap='gray')
plt.title("Threshold of ({}, {})".format(thresh[0], thresh[1]))
f.add_subplot(size_x, size_y, 4)
plt.imshow(red, cmap='gray')
plt.title("Red")
f.add_subplot(size_x, size_y, 5)
plt.imshow(green, cmap='gray')
plt.title("Green")
f.add_subplot(size_x, size_y, 6)
plt.imshow(blue, cmap='gray')
plt.title("Blue")
f.add_subplot(size_x, size_y, 7)
plt.imshow(binary_2, cmap='gray')
plt.title("Threshold of Red color")
f.add_subplot(size_x, size_y, 8)
plt.imshow(hue, cmap='gray')
plt.title("Hue")
f.add_subplot(size_x, size_y, 9)
plt.imshow(lightness, cmap='gray')
plt.title("Lightness")
f.add_subplot(size_x, size_y, 10)
plt.imshow(saturation, cmap='gray')
plt.title("Saturation")
f.add_subplot(size_x, size_y, 11)
plt.imshow(binary_3, cmap='gray')
plt.title("Threshold of saturation")
f.add_subplot(size_x, size_y, 12)
plt.imshow(binary_4, cmap='gray')
plt.title("Threshold of hue")
plt.show()
if __name__ == '__main__':
run()
| akshaybabloo/Car-ND | Term_1/advanced_lane_finding_10/color_space_10_8.py | Python | mit | 2,835 |
# Imports for routing
import flask as fl
from app import app
import globes as gb
@app.route('/test/contamination_test')
def contamination_test():
print("hello contamination")
return fl.render_template('html/HomePage.html')
| cliftbar/FlaPyDisaster | FlaPyDisaster/contamination_routes.py | Python | mit | 232 |
from scrapyjs import SplashMiddleware
class SlybotJsMiddleware(SplashMiddleware):
def process_request(self, request, spider):
req = super(SlybotJsMiddleware, self).process_request(request, spider)
splash_auth = getattr(spider, 'splash_auth', None)
if splash_auth and 'Authorization' not in request.headers:
request.headers['Authorization'] = splash_auth
return req
def process_response(self, request, response, spider):
splash_options = request.meta.get("_splash_processed")
response = super(SlybotJsMiddleware, self).process_response(
request, response, spider)
if splash_options:
url = splash_options['args'].get('url')
response._set_url(url or response.url)
return response
| SouthStar/portia | slybot/slybot/splash.py | Python | bsd-3-clause | 800 |
# you can use print for debugging purposes, e.g.
# print "this is a debug message"
# The strategy is to iterate through the string, putting each character onto
# a specific stack, depending on its type.
# A, When we put the character onto the stack, we check to see if the one before it
# is an opposite - eg. { then } are opposites. If we find an opposite pair, we annihilate them.
# When a pair is annihilated, we check to see if any other types have priority
# The strategy is to iterate through the array, putting each character onto the stack. If the opposite character is present on the stack, immediately annihilate both. At the end if the size of the stack is zero, the string is properly formatted.
def solution(S):
annihilate = {
')': '(',
']': '[',
'}': '{',
}
stack = []
if not S:
return 1
for char in S:
#print char
if char in annihilate.keys() and len(stack) > 0:
if stack[-1] == annihilate[char]:
stack.pop()
else:
stack.append(char)
else:
stack.append(char)
#print stack
if len(stack) == 0:
return 1
else:
return 0
| mickeyshaughnessy/Codility-examples | Brackets.py | Python | mit | 1,226 |
__author__ = 'alexvanboxel'
from datetime import date
from datetime import timedelta
def month(current):
return date(current.year, current.month, 15)
def month_first_day(current):
return date(current.year, current.month, 1)
def month_last_day(current):
d = next_month(current)
return date(d.year, d.month, 1) - timedelta(days=1)
def rec_day_range(collect, current, stop):
value = current
collect.append(value)
if current == stop:
return collect
elif current > stop:
return collect
else:
return rec_day_range(collect, next_day(current), stop)
def rec_month_range(collect, current, stop):
value = month(current)
collect.append(value)
if current == stop:
return collect
elif current > stop:
return collect
else:
return rec_month_range(collect, next_month(current), stop)
def rec_year_range(collect, current, stop):
value = month(current)
collect.append(value)
if current == stop:
return collect
elif current > stop:
return collect
else:
return rec_year_range(collect, next_year(current), stop)
def day_range(range_from, range_till):
part_from = str(range_from).split('-')
part_till = str(range_till).split('-')
start = date(int(part_from[0]), int(part_from[1]), int(part_from[2]))
stop = date(int(part_till[0]), int(part_till[1]), int(part_till[2]))
return rec_day_range([], start, stop)
def month_range(range_from, range_till):
part_from = str(range_from).split('-')
part_till = str(range_till).split('-')
start = date(int(part_from[0]), int(part_from[1]), 15)
stop = date(int(part_till[0]), int(part_till[1]), 15)
return rec_month_range([], start, stop)
def year_range(range_from, range_till):
part_from = str(range_from).split('-')
part_till = str(range_till).split('-')
start = date(int(part_from[0]), 1, 15)
stop = date(int(part_till[0]), 1, 15)
return rec_year_range([], start, stop)
def this_month():
return month(date.today())
def last_month():
return prev_month(this_month())
def next_month(current):
return month(month(current) + timedelta(days=30))
def next_year(current):
return month(month(current) + timedelta(days=365))
def prev_month(current):
return month(month(current) - timedelta(days=30))
def substract_month(current, m):
if m == 0:
return current
else:
return substract_month(prev_month(current), m-1)
def prev_year(current):
return month(month(current) - timedelta(days=365))
def last_year():
return prev_year(this_month())
def year_first_month(current):
m = month(current)
return date(m.year, 1, 15)
def yester_day():
return prev_day(date.today());
def to_day():
return date.today();
def prev_day(current):
return current - timedelta(days=1)
def next_day(current):
return current + timedelta(days=1)
def end_of_month(current):
"""Return the current day when it's the last day of the month, otherwise return
a day from previous month. Has only month precision."""
if next_day(current).month != current.month:
return current
else:
return prev_month(current)
def generate_range_from_argv(argv, last):
if argv[0] == 'from':
part = argv[1].partition('-');
return month_range(part[0], part[2], last.year, last.month)
elif argv[0] == 'range':
part1 = argv[1].partition('-');
part2 = argv[2].partition('-');
return month_range(part1[0], part1[2], part2[0], part2[2])
elif argv[0] == 'month':
part = argv[1].partition('-');
return month_range(part[0], part[2], part[0], part[2])
elif argv[0] == 'last':
month = last_month()
return month_range(month.year, month.month, month.year, month.month)
elif argv[0] == 'this':
month = this_month()
return month_range(month.year, month.month, month.year, month.month)
else:
print "Known modes are: from, range"
| alexvanboxel/demo-devoxx15-luigi | dateutils.py | Python | apache-2.0 | 4,043 |
# -*- coding: utf-8 -*-
# ------
# License: MIT
# Copyright (c) 2013 Kohei Ozaki (eowenr atmark gmail dot com)
"""
CLI of videolectures-dl
"""
import argparse
import sys
from videolectures.util import VideoDownloader
from videolectures import __version__
def parse_args():
"""
Parse arguments of videolectures-dl
"""
desc = 'A command-line program to download videos from videolectures.net'
p = argparse.ArgumentParser(
description=desc,
prog='videolectures-dl',
conflict_handler='resolve')
p.add_argument(
'-h', '--help', action='help',
help='print this help text and exit')
p.add_argument(
'-v', '--version', action='version',
version=__version__,
help='print program version and exit')
p.add_argument(
'-w', '--overwrite', action='store_true', default=False,
help='overwrite an existent file')
p.add_argument(
'-t', '--title', action='store_true', default=False,
help='use title as a filename')
p.add_argument(
'-o', '--output', type=str, default=None,
help='video filename')
p.add_argument(
'-e', '--resume', action='store_true', default=False,
help='resume an incomplete RTMP download')
p.add_argument('video_url', help='url of video page')
args = p.parse_args()
if not len(args.video_url) > len("videolectures.net"):
p.print_help()
sys.exit(1)
return args
def main():
"""
Entrypoint
"""
arguments = parse_args()
download = VideoDownloader(arguments)
download.run(arguments.output, arguments.video_url)
sys.exit(0)
if __name__ == '__main__':
main()
| smly/videolectures-dl | videolectures/tool.py | Python | mit | 1,701 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Puzzlebox - Brainstorms - Configuration
#
# Copyright Puzzlebox Productions, LLC (2010-2012)
#
# This code is released under the GNU Pulic License (GPL) version 2
# For more information please refer to http://www.gnu.org/copyleft/gpl.html
__changelog__ = """\
Last Update: 2012.08.20
"""
import os, sys
#import pygame
#####################################################################
# General configuration
#####################################################################
DEBUG = 1
ENABLE_PYSIDE = True
ENABLE_CONTROL_PANEL = True
# Discrete control drives the robot for a set time period per detection.
# Setting Variable control to "True" will drive the robot in a
# particular direction for as long as the detection occurs
BRAINSTORMS_VARIABLE_CONTROL_DURATION = True
BLINK_DETECTION_ENABLED = True
BLINK_DETECTION_THRESHOLD = 6 # 6 blinks detected within the valid range
BLINK_DETECTION_VALID_RANGE = 2 # 2 seconds
BLINK_DETECTION_INCLUDE_FORWARD = True
BLINK_DETECTION_INCLUDE_LEFT = True
BLINK_DETECTION_INCLUDE_RIGHT = True
BLINK_DETECTION_INCLUDE_REVERSE = True
BRAINSTORMS_CONFIGURATION_FILE_PATH = 'puzzlebox_brainstorms_configuration.ini'
if (sys.platform != 'win32'):
if not os.path.exists(BRAINSTORMS_CONFIGURATION_FILE_PATH):
BRAINSTORMS_CONFIGURATION_FILE_PATH = \
os.path.join('/etc/puzzlebox_synapse', BRAINSTORMS_CONFIGURATION_FILE_PATH)
#####################################################################
# Logging
#####################################################################
LOG_LEVEL_DEBUG = 2
LOG_LEVEL_INFO = 1
LOG_LEVEL_ERROR = 0
LOG_LEVEL_DISABLE = -1
DEFAULT_LOG_LEVEL = LOG_LEVEL_DEBUG
DEFAULT_LOGFILE = 'brainstorms'
LOGFILE_DIR = '/var/log/puzzlebox'
LOGFILE_SUFFIX = '.log'
LOGFILE_SUFFIX_DEBUG = '_debug.log'
LOGFILE_SUFFIX_INFO = '_info.log'
LOGFILE_SUFFIX_ERROR = '_error.log'
SPLIT_LOGFILES = False
#####################################################################
# Network addresses
#####################################################################
BRAINSTORMS_SERVER_INTERFACE = '' # listen on all of server's network interfaces
BRAINSTORMS_SERVER_HOST = '127.0.0.1' # localhost
BRAINSTORMS_SERVER_PORT = 8194
THINKGEAR_SERVER_INTERFACE = '' # listen on all of server's network interfaces
THINKGEAR_SERVER_HOST = '127.0.0.1'
THINKGEAR_SERVER_PORT = 13854
#####################################################################
# NXT Control configuration
#####################################################################
AUTOCONNECT_TO_NXT_DEVICE = False
DEFAULT_NXT_POWER_LEVEL = 80
DEFAULT_NXT_BLUETOOTH_DEVICE_WINDOWS = 'COM1'
DEFAULT_NXT_BLUETOOTH_DEVICE_LINUX = '/dev/rfcomm0'
if (sys.platform == 'win32'):
NXT_BLUETOOTH_DEVICE = DEFAULT_NXT_BLUETOOTH_DEVICE_WINDOWS
else:
NXT_BLUETOOTH_DEVICE = DEFAULT_NXT_BLUETOOTH_DEVICE_LINUX
NXT_MOTORS_MOUNTED_BACKWARDS = False
NXT_MOTOR_PORT_LEFT = 'b'
NXT_MOTOR_PORT_RIGHT = 'a'
NXT_DEFAULT_RC_COMMAND = 'test_drive'
#####################################################################
#iRobot Configuration
#####################################################################
IROBOT_MOVE_DELAY = 1
IROBOT_TURN_DELAY = 0.5
IROBOT_SERIAL_TIMEOUT = 2
DEFAULT_IROBOT_BLUETOOTH_DEVICE_WINDOWS = 'COM40'
DEFAULT_IROBOT_BLUETOOTH_DEVICE_LINUX = '/dev/rfcomm0'
if (sys.platform == 'win32'):
IROBOT_BLUETOOTH_DEVICE = DEFAULT_IROBOT_BLUETOOTH_DEVICE_WINDOWS
else:
IROBOT_BLUETOOTH_DEVICE = DEFAULT_IROBOT_BLUETOOTH_DEVICE_LINUX
IROBOT_DEFAULT_RC_COMMAND = 'test_drive'
IROBOT_VELOCITY_MAX = 500 # mm/s
IROBOT_VELOCITY_SLOW = 15
IROBOT_VELOCITY_FAST = 350
IROBOT_TURN_SPEED = 300
#####################################################################
# RC Car Control configuration
#####################################################################
DEFAULT_RC_CAR_POWER_LEVEL = 80
DEFAULT_RC_CAR_DEVICE_WINDOWS = 'COM1'
DEFAULT_RC_CAR_DEVICE_LINUX = '/dev/rfcomm0'
if (sys.platform == 'win32'):
RC_CAR_DEVICE = DEFAULT_RC_CAR_DEVICE_WINDOWS
else:
RC_CAR_DEVICE = DEFAULT_RC_CAR_DEVICE_LINUX
#####################################################################
# Helicopter configuration
#####################################################################
COMMAND_PACKET = {
'default_neutral': '\x00\x00\x00\xaa\x05\xff\x09\xff\x0d\xff\x13\x54\x14\xaa', # default neutral setting to use for all commands
'default_full_thrust': '\x00\x00\x03\x54\x05\xff\x09\xff\x0d\xff\x13\x54\x14\xaa', # new controll set to highest throttle (no changes to trim)
#'neutral': '\x00\x00\x00\xfa\x05\xc5\x09\xde\x0e\x0b\x13\x54\x14\xaa', # 0.4.5 neutral setting to use for all commands
'neutral': '\x00\x00\x00\xae\x05\xff\x09\xff\x0d\xff\x13\x54\x14\xaa', # default neutral setting to use for all commands
'no_thrust': '\x00\x00\x00\x5a\x05\xc5\x09\xde\x0e\x0b\x13\x54\x14\xaa', # lowest trim setting for throttle
'minimum_thrust': '\x00\x00\x00\xca\x05\xc5\x09\xde\x0e\x0b\x13\x54\x14\xaa', # lowest trim level at which throttle kicks in
'minimum_thrust_minus_one': '\x00\x00\x00\xc6\x05\xc5\x09\xde\x0e\x0b\x13\x54\x14\xaa', # lowest trim level at which throttle kicks in
'maximum_thrust': '\x00\x00\x03\x54\x05\xc5\x09\xde\x0e\x0b\x13\x54\x14\xaa', # maximum possible throttle and trim
'fifty_percent_thrust': '\x00\x00\x01\x7d\x05\xc5\x09\xde\x0e\x0b\x13\x54\x14\xaa', # calculated 50% throttle
'test_packet': '\x00\x00\x03\x54\x06\x15\x09\xca\x0e\x2f\x13\x54\x14\xaa', # test packet from saleae logic screenshot
'maximum_forward': '\x00\x00\x00\x5a\x05\xc5\x0b\x54\x0e\x0b\x13\x54\x14\xaa', # maximum possible elevator and trim
#'fly_forward': '\x00\x00\x01\x7d\x05\xc5\x0a\xde\x0e\x0b\x13\x54\x14\xaa', # 0.4.5 fly_forward settings
'fly_forward': '\x00\x00\x01\x7d\x05\xc5\x0a\xde\x0e\x0b\x13\x54\x14\xaa',
}
COMMAND_ACTIVATE = 'fifty_percent_thrust'
# COMMAND_ACTIVATE = 'maximum_thrust'
# COMMAND_ACTIVATE = 'minimum_thrust'
# COMMAND_ACTIVATE = 'fly_forward'
#####################################################################
# Wheelchair configuration
#####################################################################
WHEELCHAIR_CONTROL_EEG = True
#####################################################################
# Server configuration
#####################################################################
BRAINSTORMS_DELIMITER = '\r'
#TWISTED_SERVER_MAX_COMPONENTS = 16
#####################################################################
# Client configuration
#####################################################################
CLIENT_NO_REPLY_WAIT = 5 # how many seconds before considering a component dead
#TWISTED_CLIENT_MAX_CONNECTION_ATTEMPTS = 5
#####################################################################
# ThinkGear Connect configuration
#####################################################################
THINKGEAR_DELIMITER = '\r'
THINKGEAR_CONFIGURATION_PARAMETERS = {"enableRawOutput": False, "format": "Json"}
THINKGEAR_AUTHORIZATION_ENABLED = False
THINKGEAR_AUTHORIZATION_REQUEST = { \
"appName": "Puzzlebox Brainstorms", \
"appKey": "2e285d7bd5565c0ea73e7e265c73f0691d932408"
}
#####################################################################
# ThinkGear Connect Server Emulator configuration
#####################################################################
THINKGEAR_ENABLE_SIMULATE_HEADSET_DATA = True
THINKGEAR_BLINK_FREQUENCY_TIMER = 6 # blink every 6 seconds
# (6 seconds is listed by Wikipedia
# as being the average number of times
# an adult blinks in a laboratory setting)
THINKGEAR_DEFAULT_SAMPLE_WAVELENGTH = 30 # number of seconds from 0 to max
# and back to 0 for any given
# detection value below
#####################################################################
# Client Interface configuration [Qt]
#####################################################################
THINKGEAR_POWER_THRESHOLDS = { \
'concentration': { \
0: 0, \
10: 0, \
20: 0, \
30: 0, \
40: 60, \
50: 65, \
60: 70, \
70: 75, \
75: 80, \
80: 85, \
90: 90, \
100: 90, \
}, \
'relaxation': { \
0: 0, \
10: 0, \
20: 0, \
30: 0, \
40: 0, \
50: 10, \
60: 10, \
70: 15, \
80: 15, \
90: 20, \
100: 20, \
}, \
} # THINKGEAR_POWER_THRESHOLDS
#####################################################################
# Flash socket policy handling
#####################################################################
FLASH_POLICY_FILE_REQUEST = \
'<policy-file-request/>%c' % 0 # NULL byte termination
FLASH_SOCKET_POLICY_FILE = '''<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy>
<site-control permitted-cross-domain-policies="all" />
<allow-access-from domain="*" to-ports="%i" />
</cross-domain-policy>%c''' % (THINKGEAR_SERVER_PORT, 0)
#####################################################################
# Configuration File Parser
#####################################################################
if os.path.exists(BRAINSTORMS_CONFIGURATION_FILE_PATH):
file = open(BRAINSTORMS_CONFIGURATION_FILE_PATH, 'r')
for line in file.readlines():
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
if line.find('=') == -1:
continue
if (line == "NXT_BLUETOOTH_DEVICE = ''") or \
(line == "IROBOT_BLUETOOTH_DEVICE = ''") or \
(line == "RC_CAR_DEVICE = ''"):
# use operating system default if device not set manually
continue
try:
exec line
except:
if DEBUG:
print "Error recognizing Puzzlebox Brainstorms configuration option:",
print line
| PuzzleboxIO/brainstorms-python | Puzzlebox/Brainstorms/Configuration.py | Python | agpl-3.0 | 9,943 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('home', '0014_auto_20150916_1847'),
]
operations = [
migrations.AlterField(
model_name='blogpage',
name='date',
field=models.DateField(default=datetime.datetime(2015, 9, 18, 17, 45, 8, 655958, tzinfo=utc), verbose_name='Post Date'),
),
]
| taedori81/gentlecoffee | home/migrations/0015_auto_20150918_1045.py | Python | bsd-3-clause | 535 |
"""
Keys and certificates for tests (KEY1 is a private key of CERT1, etc.)
Generated with::
$ openssl genrsa -des3 -passout pass:test -out key1.key 1024
$ openssl req -new -key key1.key -out key1.csr -passin pass:test
$ cp key1.key key1.key.org
$ openssl rsa -in key1.key.org -out key1.key -passin pass:test
$ openssl x509 -req -days 365 -in cert1.csr \
-signkey key1.key -out cert1.crt
$ rm key1.key.org cert1.csr
"""
from __future__ import absolute_import
import __builtin__
from celery import current_app
from celery.exceptions import ImproperlyConfigured
from celery.security import setup_security, disable_untrusted_serializers
from kombu.serialization import registry
from .case import SecurityCase
KEY1 = """-----BEGIN RSA PRIVATE KEY-----
MIICXgIBAAKBgQDCsmLC+eqL4z6bhtv0nzbcnNXuQrZUoh827jGfDI3kxNZ2LbEy
kJOn7GIl2tPpcY2Dm1sOM8G1XLm/8Izprp4ifpF4Gi0mqz0GquY5dcMNASG9zkRO
J1z8dQUyp3PIUHdQdrKbYQVifkA4dh6Kg27k8/IcdY1lHsaIju4bX7MADwIDAQAB
AoGBAKWpCRWjdiluwu+skO0Up6aRIAop42AhzfN8OuZ81SMJRP2rJTHECI8COATD
rDneb63Ce3ibG0BI1Jf3gr624D806xVqK/SVHZNbfWx0daE3Q43DDk1UdhRF5+0X
HPqqU/IdeW1YGyWJi+IhMTXyGqhZ1BTN+4vHL7NlRpDt6JOpAkEA+xvfRO4Ca7Lw
NEgvW7n+/L9b+xygQBtOA5s260pO+8jMrXvOdCjISaKHD8HZGFN9oUmLsDXXBhjh
j0WCMdsHbQJBAMZ9OIw6M/Uxv5ANPCD58p6PZTb0knXVPMYBFQ7Y/h2HZzqbEyiI
DLGZpAa9/IhVkoCULd/TNytz5rl27KEni+sCQArFQEdZmhr6etkTO4zIpoo6vvw/
VxRI14jKEIn5Dvg3vae3RryuvyCBax+e5evoMNxJJkexl354dLxLc/ElfuUCQQCq
U14pBvD7ITuPM6w7aAEIi2iBZhIgR2GlT9xwJ0i4si6lHdms2EJ8TKlyl6mSnEvh
RkavYSJgiU6eLC0WhUcNAkEA7vuNcz/uuckmq870qfSzUQJIYLzwVOadEdEEAVy0
L0usztlKmAH8U/ceQMMJLMI9W4m680JrMf3iS7f+SkgUTA==
-----END RSA PRIVATE KEY-----"""
KEY2 = """-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDH22L8b9AmST9ABDmQTQ2DWMdDmK5YXZt4AIY81IcsTQ/ccM0C
fwXEP9tdkYwtcxMCWdASwY5pfMy9vFp0hyrRQMSNfuoxAgONuNWPyQoIvY3ZXRe6
rS+hb/LN4+vdjX+oxmYiQ2HmSB9rh2bepE6Cw+RLJr5sXXq+xZJ+BLt5tQIDAQAB
AoGBAMGBO0Arip/nP6Rd8tYypKjN5nEefX/1cjgoWdC//fj4zCil1vlZv12abm0U
JWNEDd2y0/G1Eow0V5BFtFcrIFowU44LZEiSf7sKXlNHRHlbZmDgNXFZOt7nVbHn
6SN+oCYjaPjji8idYeb3VQXPtqMoMn73MuyxD3k3tWmVLonpAkEA6hsu62qhUk5k
Nt88UZOauU1YizxsWvT0bHioaceE4TEsbO3NZs7dmdJIcRFcU787lANaaIq7Rw26
qcumME9XhwJBANqMOzsYQ6BX54UzS6x99Jjlq9MEbTCbAEZr/yjopb9f617SwfuE
AEKnIq3HL6/Tnhv3V8Zy3wYHgDoGNeTVe+MCQQDi/nyeNAQ8RFqTgh2Ak/jAmCi0
yV/fSgj+bHgQKS/FEuMas/IoL4lbrzQivkyhv5lLSX0ORQaWPM+z+A0qZqRdAkBh
XE+Wx/x4ljCh+nQf6AzrgIXHgBVUrfi1Zq9Jfjs4wnaMy793WRr0lpiwaigoYFHz
i4Ei+1G30eeh8dpYk3KZAkB0ucTOsQynDlL5rLGYZ+IcfSfH3w2l5EszY47kKQG9
Fxeq/HOp9JYw4gRu6Ycvqu57KHwpHhR0FCXRBxuYcJ5V
-----END RSA PRIVATE KEY-----"""
CERT1 = """-----BEGIN CERTIFICATE-----
MIICATCCAWoCCQCR6B3XQcBOvjANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
cyBQdHkgTHRkMB4XDTExMDcxOTA5MDgyMloXDTEyMDcxODA5MDgyMlowRTELMAkG
A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwrJi
wvnqi+M+m4bb9J823JzV7kK2VKIfNu4xnwyN5MTWdi2xMpCTp+xiJdrT6XGNg5tb
DjPBtVy5v/CM6a6eIn6ReBotJqs9BqrmOXXDDQEhvc5ETidc/HUFMqdzyFB3UHay
m2EFYn5AOHYeioNu5PPyHHWNZR7GiI7uG1+zAA8CAwEAATANBgkqhkiG9w0BAQUF
AAOBgQA4+OiJ+pyq9lbEMFYC9K2+e77noHJkwUOs4wO6p1R14ZqSmoIszQ7KEBiH
2HHPMUY6kt4GL1aX4Vr1pUlXXdH5WaEk0fvDYZemILDMqIQJ9ettx8KihZjFGC4k
Y4Sy5xmqdE9Kjjd854gTRRnzpMnJp6+74Ki2X8GHxn3YBM+9Ng==
-----END CERTIFICATE-----"""
CERT2 = """-----BEGIN CERTIFICATE-----
MIICATCCAWoCCQCV/9A2ZBM37TANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
cyBQdHkgTHRkMB4XDTExMDcxOTA5MDkwMloXDTEyMDcxODA5MDkwMlowRTELMAkG
A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAx9ti
/G/QJkk/QAQ5kE0Ng1jHQ5iuWF2beACGPNSHLE0P3HDNAn8FxD/bXZGMLXMTAlnQ
EsGOaXzMvbxadIcq0UDEjX7qMQIDjbjVj8kKCL2N2V0Xuq0voW/yzePr3Y1/qMZm
IkNh5kgfa4dm3qROgsPkSya+bF16vsWSfgS7ebUCAwEAATANBgkqhkiG9w0BAQUF
AAOBgQBzaZ5vBkzksPhnWb2oobuy6Ne/LMEtdQ//qeVY4sKl2tOJUCSdWRen9fqP
e+zYdEdkFCd8rp568Eiwkq/553uy4rlE927/AEqs/+KGYmAtibk/9vmi+/+iZXyS
WWZybzzDZFncq1/N1C3Y/hrCBNDFO4TsnTLAhWtZ4c0vDAiacw==
-----END CERTIFICATE-----"""
class TestSecurity(SecurityCase):
def tearDown(self):
registry._disabled_content_types.clear()
def test_disable_untrusted_serializers(self):
disabled = registry._disabled_content_types
self.assertEqual(0, len(disabled))
disable_untrusted_serializers(
['application/json', 'application/x-python-serialize'])
self.assertIn('application/x-yaml', disabled)
self.assertNotIn('application/json', disabled)
self.assertNotIn('application/x-python-serialize', disabled)
disabled.clear()
disable_untrusted_serializers()
self.assertIn('application/x-yaml', disabled)
self.assertIn('application/json', disabled)
self.assertIn('application/x-python-serialize', disabled)
def test_setup_security(self):
disabled = registry._disabled_content_types
self.assertEqual(0, len(disabled))
current_app.conf.CELERY_TASK_SERIALIZER = 'json'
setup_security()
self.assertIn('application/x-python-serialize', disabled)
disabled.clear()
def test_security_conf(self):
current_app.conf.CELERY_TASK_SERIALIZER = 'auth'
self.assertRaises(ImproperlyConfigured, setup_security)
_import = __builtin__.__import__
def import_hook(name, *args, **kwargs):
if name == 'OpenSSL':
raise ImportError
return _import(name, *args, **kwargs)
__builtin__.__import__ = import_hook
self.assertRaises(ImproperlyConfigured, setup_security)
__builtin__.__import__ = _import
| mozilla/make.mozilla.org | vendor-local/lib/python/celery/tests/test_security/__init__.py | Python | bsd-3-clause | 5,707 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Linus Unnebäck <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: make
short_description: Run targets in a Makefile
requirements: [ make ]
version_added: "2.1"
author: Linus Unnebäck (@LinusU) <[email protected]>
description:
- Run targets in a Makefile.
options:
target:
description:
- The target to run
required: false
default: none
params:
description:
- Any extra parameters to pass to make
required: false
default: none
chdir:
description:
- cd into this directory before running make
required: true
file:
description:
- Use file as a Makefile
required: false
default: none
version_added: 2.5
'''
EXAMPLES = '''
# Build the default target
- make:
chdir: /home/ubuntu/cool-project
# Run `install` target as root
- make:
chdir: /home/ubuntu/cool-project
target: install
become: yes
# Pass in extra arguments to build
- make:
chdir: /home/ubuntu/cool-project
target: all
params:
NUM_THREADS: 4
BACKEND: lapack
# Pass a file as a Makefile
- make:
chdir: /home/ubuntu/cool-project
target: all
file: /some-project/Makefile
'''
# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
# fix this
RETURN = '''# '''
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
def run_command(command, module, check_rc=True):
"""
Run a command using the module, return
the result code and std{err,out} content.
:param command: list of command arguments
:param module: Ansible make module instance
:return: return code, stdout content, stderr content
"""
rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
return rc, sanitize_output(out), sanitize_output(err)
def sanitize_output(output):
"""
Sanitize the output string before we
pass it to module.fail_json. Defaults
the string to empty if it is None, else
strips trailing newlines.
:param output: output to sanitize
:return: sanitized output
"""
if output is None:
return ''
else:
return output.rstrip("\r\n")
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
target=dict(required=False, default=None, type='str'),
params=dict(required=False, default=None, type='dict'),
chdir=dict(required=True, default=None, type='path'),
file=dict(required=False, default=None, type='path')
),
)
# Build up the invocation of `make` we are going to use
make_path = module.get_bin_path('make', True)
make_target = module.params['target']
if module.params['params'] is not None:
make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
else:
make_parameters = []
if module.params['file'] is not None:
base_command = [make_path, "--file", module.params['file'], make_target]
else:
base_command = [make_path, make_target]
base_command.extend(make_parameters)
# Check if the target is already up to date
rc, out, err = run_command(base_command + ['--question'], module, check_rc=False)
if module.check_mode:
# If we've been asked to do a dry run, we only need
# to report whether or not the target is up to date
changed = (rc != 0)
else:
if rc == 0:
# The target is up to date, so we don't have to
# do anything
changed = False
else:
# The target isn't upd to date, so we need to run it
rc, out, err = run_command(base_command, module)
changed = True
# We don't report the return code, as if this module failed
# we would be calling fail_json from run_command, so even if
# we had a non-zero return code, we did not fail. However, if
# we report a non-zero return code here, we will be marked as
# failed regardless of what we signal using the failed= kwarg.
module.exit_json(
changed=changed,
failed=False,
stdout=out,
stderr=err,
target=module.params['target'],
params=module.params['params'],
chdir=module.params['chdir'],
file=module.params['file']
)
if __name__ == '__main__':
main()
| tsdmgz/ansible | lib/ansible/modules/system/make.py | Python | gpl-3.0 | 4,773 |
#!/usr/bin/env python
#
# Copyright 2012 Jonas Berg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
.. moduleauthor:: Jonas Berg <[email protected]>
test_minimalmodbus: Unittests for the :mod:`minimalmodbus` module.
For each function are these tests performed:
* Known results
* Invalid input value
* Invalid input type
This unittest suite uses a mock/dummy serial port from the module :mod:`dummy_serial`,
so it is possible to test the functionality using previously recorded communication data.
With dummy responses, it is also possible to simulate errors in the communication
from the slave. A few different types of communication errors are tested, as seen in this table.
===================================== ===================== =================================
Simulated response error Tested using function Tested using Modbus function code
===================================== ===================== =================================
No response read_bit 2
Wrong CRC in response write_register 16
Wrong slave address in response write_register 16
Wrong function code in response write_register 16
Slave indicates an error write_register 16
Wrong byte count in response read_bit 2
Wrong register address in response write_register 16
Wrong number of registers in response write_bit 15
Wrong number of registers in response write_register 16
Wrong write data in response write_bit 5
Wrong write data in response write_register 6
===================================== ===================== =================================
"""
__author__ = "Jonas Berg"
__email__ = "[email protected]"
__license__ = "Apache License, Version 2.0"
import sys
import time
import unittest
import minimalmodbus
import dummy_serial
ALSO_TIME_CONSUMING_TESTS = True
"""Set this to :const:`False` to skip the most time consuming tests"""
VERBOSITY = 0
"""Verbosity level for the unit testing. Use value 0 or 2. Note that it only has an effect for Python 2.7 and above."""
SHOW_ERROR_MESSAGES_FOR_ASSERTRAISES = False
"""Set this to :const:`True` for printing the error messages caught by assertRaises().
If set to :const:`True`, any unintentional error messages raised during the processing of the command in :meth:`.assertRaises` are also caught (not counted). It will be printed in the short form, and will show no traceback. It can also be useful to set :data:`VERBOSITY` = 2.
"""
_LARGE_NUMBER_OF_BYTES = 1000
# For compatibility with Python2.6
_VERSION_LIMIT = 0x02070000
_runTestsForNewVersion = sys.hexversion >= _VERSION_LIMIT
###########################################################
# For showing the error messages caught by assertRaises() #
# and to implement a better assertAlmostEqual() #
###########################################################
class _NonexistantError(Exception):
pass
class ExtendedTestCase(unittest.TestCase):
"""Overriding the assertRaises() method to be able to print the error message.
Use :data:`test_minimalmodbus.SHOW_ERROR_MESSAGES_FOR_ASSERTRAISES` = :const:`True`
in order to use this option. It can also be useful to set :data:`test_minimalmodbus.VERBOSITY` = 2.
Based on http://stackoverflow.com/questions/8672754/how-to-show-the-error-messages-caught-by-assertraises-in-unittest-in-python2-7
"""
def assertRaises(self, excClass, callableObj, *args, **kwargs):
"""Prints the caught error message (if :data:`SHOW_ERROR_MESSAGES_FOR_ASSERTRAISES` is :const:`True`)."""
if SHOW_ERROR_MESSAGES_FOR_ASSERTRAISES:
try:
unittest.TestCase.assertRaises(self, _NonexistantError, callableObj, *args, **kwargs)
except:
minimalmodbus._print_out( '\n ' + repr(sys.exc_info()[1]) )
else:
unittest.TestCase.assertRaises(self, excClass, callableObj, *args, **kwargs)
def assertAlmostEqualRatio(self, first, second, epsilon = 1.000001):
"""A function to compare floats, with ratio instead of difference.
Args:
* first (float): Input argument for comparison
* second (float): Input argument for comparison
* epsilon (float): Largest allowed ratio of largest to smallest of the two input arguments
"""
if first == second:
return
if (first < 0 and second >= 0) or (first >= 0 and second < 0):
raise AssertionError('The arguments have different signs: {0!r} and {1!r}'.format(first, second))
ratio = max(first, second)/float(min(first, second))
if ratio > epsilon:
raise AssertionError('The arguments are not equal: {0!r} and {1!r}. Epsilon is {2!r}.'.\
format(first, second, epsilon))
##############################
# Constants for type testing #
##############################
_NOT_INTERGERS_OR_NONE = [0.0, 1.0, '1', ['1'], [1], ['\x00\x2d\x00\x58'], ['A', 'B', 'C']]
_NOT_INTERGERS = _NOT_INTERGERS_OR_NONE + [None]
_NOT_NUMERICALS_OR_NONE = ['1', ['1'], [1], ['\x00\x2d\x00\x58'], ['A', 'B', 'C']]
_NOT_NUMERICALS = _NOT_NUMERICALS_OR_NONE + [None]
_NOT_STRINGS_OR_NONE = [1, 0.0, 1.0, ['1'], [1], ['\x00\x2d\x00\x58'], ['A', 'B', 'C'], True, False]
_NOT_STRINGS = _NOT_STRINGS_OR_NONE + [None]
_NOT_BOOLEANS = ['True', 'False', -1, 1, 2, 0, 8, 9999999, -1.0, 1.0, 0.0, [True], [False], [1], [1.0] ]
_NOT_INTLISTS = [0, 1, 2, -1, True, False, 0.0, 1.0, '1', ['1'], None, ['\x00\x2d\x00\x58'], ['A', 'B', 'C'], [1.0], [1.0, 2.0] ]
####################
# Payload handling #
####################
class TestEmbedPayload(ExtendedTestCase):
knownValues=[
(2, 2, 'rtu', '123', '\x02\x02123X\xc2'),
(1, 16, 'rtu', 'ABC', '\x01\x10ABC<E'),
(0, 5, 'rtu', 'hjl', '\x00\x05hjl\x8b\x9d'),
(1, 3, 'rtu', '\x01\x02\x03', '\x01\x03\x01\x02\x03\t%'),
(1, 3, 'ascii', '123', ':010331323366\r\n'),
(4, 5, 'ascii', '\x01\x02\x03', ':0405010203F1\r\n'),
(2, 2, 'ascii', '123', ':020231323366\r\n'),
]
def testKnownValues(self):
for slaveaddress, functioncode, mode, inputstring, knownresult in self.knownValues:
result = minimalmodbus._embedPayload(slaveaddress, mode, functioncode, inputstring)
self.assertEqual(result, knownresult)
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._embedPayload, 248, 'rtu', 16, 'ABC') # Wrong slave address
self.assertRaises(ValueError, minimalmodbus._embedPayload, -1, 'rtu', 16, 'ABC')
self.assertRaises(ValueError, minimalmodbus._embedPayload, 248, 'ascii', 16, 'ABC')
self.assertRaises(ValueError, minimalmodbus._embedPayload, -1, 'ascii', 16, 'ABC')
self.assertRaises(ValueError, minimalmodbus._embedPayload, 1, 'rtuu', 16, 'ABC') # Wrong Modbus mode
self.assertRaises(ValueError, minimalmodbus._embedPayload, 1, 'RTU', 16, 'ABC')
self.assertRaises(ValueError, minimalmodbus._embedPayload, 1, 'ASCII', 16, 'ABC')
self.assertRaises(ValueError, minimalmodbus._embedPayload, 1, 'asci', 16, 'ABC')
self.assertRaises(ValueError, minimalmodbus._embedPayload, 1, 'rtu', 222, 'ABC') # Wrong function code
self.assertRaises(ValueError, minimalmodbus._embedPayload, 1, 'rtu', -1, 'ABC')
self.assertRaises(ValueError, minimalmodbus._embedPayload, 1, 'ascii', 222, 'ABC')
self.assertRaises(ValueError, minimalmodbus._embedPayload, 1, 'ascii', -1, 'ABC')
def testWrongInputType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._embedPayload, value, 'rtu', 16, 'ABC')
self.assertRaises(TypeError, minimalmodbus._embedPayload, value, 'ascii', 16, 'ABC')
self.assertRaises(TypeError, minimalmodbus._embedPayload, 1, 'rtu', value, 'ABC')
self.assertRaises(TypeError, minimalmodbus._embedPayload, 1, 'ascii', value, 'ABC')
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._embedPayload, 1, value, 16, 'ABC')
self.assertRaises(TypeError, minimalmodbus._embedPayload, 1, 'rtu', 16, value)
self.assertRaises(TypeError, minimalmodbus._embedPayload, 1, 'ascii', 16, value)
class TestExtractPayload(ExtendedTestCase):
knownValues = TestEmbedPayload.knownValues
def testKnownValues(self):
for slaveaddress, functioncode, mode, knownresult, inputstring in self.knownValues:
result = minimalmodbus._extractPayload(inputstring, slaveaddress, mode, functioncode)
self.assertEqual(result, knownresult)
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._extractPayload, '\x02\x02123X\xc3', 2, 'rtu', 2) # Wrong CRC from slave
self.assertRaises(ValueError, minimalmodbus._extractPayload, ':0202313233F1\r\n', 2, 'ascii', 2) # Wrong LRC from slave
self.assertRaises(ValueError, minimalmodbus._extractPayload, '\x02\x82123q\x02', 2, 'rtu', 2) # Error indication from slave
self.assertRaises(ValueError, minimalmodbus._extractPayload, ':0282313233E6\r\n', 2, 'ascii', 2)
self.assertRaises(ValueError, minimalmodbus._extractPayload, 'ABC', 2, 'rtu', 2) # Too short message from slave
self.assertRaises(ValueError, minimalmodbus._extractPayload, 'ABCDEFGH', 2, 'ascii', 2)
self.assertRaises(ValueError, minimalmodbus._extractPayload, '\x02\x72123B\x02', 2, 'rtu', 2) # Wrong functioncode from slave
self.assertRaises(ValueError, minimalmodbus._extractPayload, ':020431323364\r\n', 2, 'ascii', 2)
self.assertRaises(ValueError, minimalmodbus._extractPayload, '020231323366\r\n', 2, 'ascii', 2) # Missing ASCII header
self.assertRaises(ValueError, minimalmodbus._extractPayload, ':020231323366', 2, 'ascii', 2) # Wrong ASCII footer
self.assertRaises(ValueError, minimalmodbus._extractPayload, ':020231323366\r', 2, 'ascii', 2)
self.assertRaises(ValueError, minimalmodbus._extractPayload, ':020231323366\n', 2, 'ascii', 2)
self.assertRaises(ValueError, minimalmodbus._extractPayload, ':02023132366\r\n', 2, 'ascii', 2) # Odd number of ASCII payload characters
for value in [3, 95, 128, 248, -1]:
self.assertRaises(ValueError, minimalmodbus._extractPayload, '\x02\x02123X\xc2', value, 'rtu', 2) # Wrong slave address
self.assertRaises(ValueError, minimalmodbus._extractPayload, '\x02\x02123X\xc2', 2, 'rtu', value) # Wrong functioncode
for value in ['RTU', 'ASCII', 'asc', '', ' ']:
self.assertRaises(ValueError, minimalmodbus._extractPayload, '\x02\x02123X\xc2', 2, value, 2) # Wrong mode
def testWrongInputType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._extractPayload, '\x02\x02123X\xc2', value, 'rtu', 2) # Wrong slaveaddress type
self.assertRaises(TypeError, minimalmodbus._extractPayload, '\x02\x02123X\xc2', value, 'ascii', 2)
self.assertRaises(TypeError, minimalmodbus._extractPayload, '\x02\x02123X\xc2', 2, 'rtu', value) # Wrong functioncode type
self.assertRaises(TypeError, minimalmodbus._extractPayload, '\x02\x02123X\xc2', 2, 'ascii', value)
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._extractPayload, value, 2, 'rtu', 2) # Wrong message
self.assertRaises(TypeError, minimalmodbus._extractPayload, value, 2, 'ascii', 2)
self.assertRaises(TypeError, minimalmodbus._extractPayload, '\x02\x02123X\xc2', 2, value, 2) # Wrong mode
class TestSanityEmbedExtractPayload(ExtendedTestCase):
knownValues = TestEmbedPayload.knownValues
def testKnownValues(self):
for slaveaddress, functioncode, mode, payload, message in self.knownValues:
embeddedResult = minimalmodbus._embedPayload(slaveaddress, mode, functioncode, payload)
extractedResult = minimalmodbus._extractPayload(embeddedResult, slaveaddress, mode, functioncode)
self.assertEqual(extractedResult, payload)
def testRange(self):
for i in range(110):
payload = str(i)
embeddedResultRtu = minimalmodbus._embedPayload(2, 'rtu', 6, payload)
extractedResultRtu = minimalmodbus._extractPayload(embeddedResultRtu, 2, 'rtu', 6)
self.assertEqual(extractedResultRtu, payload)
embeddedResultAscii = minimalmodbus._embedPayload(2, 'ascii', 6, payload)
extractedResultAscii = minimalmodbus._extractPayload(embeddedResultAscii, 2, 'ascii', 6)
self.assertEqual(extractedResultAscii, payload)
############################################
## Serial communication utility functions ##
############################################
class TestPredictResponseSize(ExtendedTestCase):
knownValues = [
('rtu', 1, '\x00\x3e\x00\x01', 6),
('rtu', 1, '\x00\x3e\x00\x07', 6),
('rtu', 1, '\x00\x3e\x00\x08', 6),
('rtu', 1, '\x00\x3e\x00\x09', 7),
('rtu', 3, 'AB\x00\x07', 19),
('rtu', 4, 'AB\x00\x07', 19),
('rtu', 4, 'AB\x01\x07', 531),
('rtu', 5, '\x00\x47\xff\x00', 8),
('rtu', 16, '\x00\x48\x00\x01\x01\x01', 8),
('ascii', 1, '\x00\x3e\x00\x01', 13),
('ascii', 1, '\x00\x3e\x00\x07', 13),
('ascii', 1, '\x00\x3e\x00\x08', 13),
('ascii', 1, '\x00\x3e\x00\x09', 15),
('ascii', 3, 'AB\x00\x07', 39),
('ascii', 4, 'AB\x00\x07', 39),
('ascii', 4, 'AB\x01\x07', 1063),
('ascii', 5, '\x00\x47\xff\x00', 17),
('ascii', 16, '\x00\x48\x00\x01\x01\x01', 17),
]
def testKnownValues(self):
for mode, functioncode, payloadToSlave, knownvalue in self.knownValues:
resultvalue = minimalmodbus._predictResponseSize(mode, functioncode, payloadToSlave)
self.assertEqual(resultvalue, knownvalue)
def testRecordedRtuMessages(self):
## Use the dictionary where the key is the 'message', and the item is the 'response'
for message in GOOD_RTU_RESPONSES:
slaveaddress = ord(message[0])
functioncode = ord(message[1])
payloadToSlave = minimalmodbus._extractPayload(message, slaveaddress, 'rtu', functioncode)
result = minimalmodbus._predictResponseSize('rtu', functioncode, payloadToSlave)
responseFromSlave = GOOD_RTU_RESPONSES[message]
self.assertEqual(result, len(responseFromSlave))
def testRecordedAsciiMessages(self):
## Use the dictionary where the key is the 'message', and the item is the 'response'
for message in GOOD_ASCII_RESPONSES:
slaveaddress = int(message[1:3])
functioncode = int(message[3:5])
payloadToSlave = minimalmodbus._extractPayload(message, slaveaddress, 'ascii', functioncode)
result = minimalmodbus._predictResponseSize('ascii', functioncode, payloadToSlave)
responseFromSlave = GOOD_ASCII_RESPONSES[message]
self.assertEqual(result, len(responseFromSlave))
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._predictResponseSize, 'asciiii', 6, 'ABCD') # Wrong mode
self.assertRaises(ValueError, minimalmodbus._predictResponseSize, 'ascii', 999, 'ABCD') # Wrong function code
self.assertRaises(ValueError, minimalmodbus._predictResponseSize, 'rtu', 999, 'ABCD') # Wrong function code
self.assertRaises(ValueError, minimalmodbus._predictResponseSize, 'ascii', 1, 'ABC') # Too short message
self.assertRaises(ValueError, minimalmodbus._predictResponseSize, 'rtu', 1, 'ABC') # Too short message
self.assertRaises(ValueError, minimalmodbus._predictResponseSize, 'ascii', 1, 'AB') # Too short message
self.assertRaises(ValueError, minimalmodbus._predictResponseSize, 'ascii', 1, 'A') # Too short message
self.assertRaises(ValueError, minimalmodbus._predictResponseSize, 'ascii', 1, '') # Too short message
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._predictResponseSize, value, 1, 'ABCD')
self.assertRaises(TypeError, minimalmodbus._predictResponseSize, 'rtu', 1, value)
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._predictResponseSize, 'rtu', value, 'ABCD')
class TestCalculateMinimumSilentPeriod(ExtendedTestCase):
knownValues=[
(2400, 0.016),
(2400.0, 0.016),
(4800, 0.008),
(9600, 0.004),
(19200, 0.002),
(38400, 0.001),
(115200, 0.00033)
]
def testKnownValues(self):
for baudrate, knownresult in self.knownValues:
result = minimalmodbus._calculate_minimum_silent_period(baudrate)
self.assertAlmostEqualRatio(result, knownresult, 1.02) # Allow 2% deviation from listed known values
def testWrongInputValue(self):
for value in [-2400, -2400.0, -1, -0.5 , 0, 0.5, 0.9]:
self.assertRaises(ValueError, minimalmodbus._calculate_minimum_silent_period, value)
def testWrongInputType(self):
for value in _NOT_NUMERICALS:
self.assertRaises(TypeError, minimalmodbus._calculate_minimum_silent_period, value)
##############################
# String and num conversions #
##############################
class TestNumToOneByteString(ExtendedTestCase):
knownValues=[
(0, '\x00' ),
(7, '\x07' ),
(255, '\xff' ),
]
def testKnownValues(self):
for inputvalue, knownstring in self.knownValues:
resultstring = minimalmodbus._numToOneByteString( inputvalue )
self.assertEqual(resultstring, knownstring)
def testKnownLoop(self):
for value in range(256):
knownstring = chr(value)
resultstring = minimalmodbus._numToOneByteString(value)
self.assertEqual(resultstring, knownstring)
def testWrongInput(self):
self.assertRaises(ValueError, minimalmodbus._numToOneByteString, -1)
self.assertRaises(ValueError, minimalmodbus._numToOneByteString, 256)
def testWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._numToOneByteString, value)
class TestNumToTwoByteString(ExtendedTestCase):
knownValues=[
(0.0, 0, False, False, '\x00\x00'), # Range 0-65535
(0, 0, False, False, '\x00\x00'),
(0, 0, True, False, '\x00\x00'),
(77.0, 1, False, False, '\x03\x02'),
(77.0, 1, True, False, '\x02\x03'),
(770, 0, False, False, '\x03\x02'),
(770, 0, True, False, '\x02\x03'),
(65535, 0, False, False, '\xff\xff'),
(65535, 0, True, False, '\xff\xff'),
(770, 0, False, True, '\x03\x02'), # Range -32768 to 32767
(77.0, 1, False, True, '\x03\x02'),
(0.0, 0, False, True, '\x00\x00'),
(0.0, 3, False, True, '\x00\x00'),
(-1, 0, False, True, '\xff\xff'),
(-1, 1, False, True, '\xff\xf6'),
(-77, 0, False, True, '\xff\xb3'),
(-770, 0, False, True, '\xfc\xfe'),
(-77, 1, False, True, '\xfc\xfe'),
(-32768, 0, False, True, '\x80\x00'),
(32767, 0, False, True, '\x7f\xff'),
]
def testKnownValues(self):
for inputvalue, numberOfDecimals, LsbFirst, signed, knownstring in self.knownValues:
resultstring = minimalmodbus._numToTwoByteString(inputvalue, numberOfDecimals, LsbFirst, signed)
self.assertEqual(resultstring, knownstring)
def testWrongInputValue(self):
for LsbFirst in [False, True]:
# Range 0-65535
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, 77, -1, LsbFirst)
if _runTestsForNewVersion: # For compatibility with Python2.6
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, 77000, 0, LsbFirst) # Gives DeprecationWarning instead of ValueError for Python 2.6
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, 65536, 0, LsbFirst)
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, 77, 4, LsbFirst)
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, -1, 0, LsbFirst)
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, -77, 1, LsbFirst)
# Range -32768 to 32767
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, 77, -1, LsbFirst, True)
if _runTestsForNewVersion: # For compatibility with Python2.6
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, -77000, 0, LsbFirst, True) # Gives DeprecationWarning instead of ValueError for Python 2.6
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, -32769, 0, LsbFirst, True)
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, 32768, 0, LsbFirst, True)
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, 77000, 0, LsbFirst, True)
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, 77, 4, LsbFirst, True)
self.assertRaises(ValueError, minimalmodbus._numToTwoByteString, -77, 4, LsbFirst, True)
def testWrongInputType(self):
for value in _NOT_NUMERICALS:
self.assertRaises(TypeError, minimalmodbus._numToTwoByteString, value, 1, False, False)
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._numToTwoByteString, 77, value, False, False)
for value in _NOT_BOOLEANS:
self.assertRaises(TypeError, minimalmodbus._numToTwoByteString, 77, 1, value, False)
self.assertRaises(TypeError, minimalmodbus._numToTwoByteString, 77, 1, False, value)
class TestTwoByteStringToNum(ExtendedTestCase):
knownValues=TestNumToTwoByteString.knownValues
def testKnownValues(self):
for knownvalue, numberOfDecimals, LsbFirst, signed, bytestring in self.knownValues:
if not LsbFirst:
resultvalue = minimalmodbus._twoByteStringToNum(bytestring, numberOfDecimals, signed)
self.assertEqual(resultvalue, knownvalue)
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._twoByteStringToNum, 'ABC', 1)
self.assertRaises(ValueError, minimalmodbus._twoByteStringToNum, 'A', 1)
self.assertRaises(ValueError, minimalmodbus._twoByteStringToNum, 'AB', -1)
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._twoByteStringToNum, value, 1)
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._twoByteStringToNum, 'AB', value)
for value in _NOT_BOOLEANS:
self.assertRaises(TypeError, minimalmodbus._twoByteStringToNum, '\x03\x02', 1, value)
class TestSanityTwoByteString(ExtendedTestCase):
knownValues=TestNumToTwoByteString.knownValues
def testSanity(self):
for value, numberOfDecimals, LsbFirst, signed, bytestring in self.knownValues:
if not LsbFirst:
resultvalue = minimalmodbus._twoByteStringToNum( \
minimalmodbus._numToTwoByteString(value, numberOfDecimals, LsbFirst, signed), \
numberOfDecimals, signed )
self.assertEqual(resultvalue, value)
if ALSO_TIME_CONSUMING_TESTS:
for value in range(0x10000):
resultvalue = minimalmodbus._twoByteStringToNum( minimalmodbus._numToTwoByteString(value) )
self.assertEqual(resultvalue, value)
class TestLongToBytestring(ExtendedTestCase):
knownValues=[
(0, False, 2, '\x00\x00\x00\x00'),
(0, True, 2, '\x00\x00\x00\x00'),
(1, False, 2, '\x00\x00\x00\x01'),
(1, True, 2, '\x00\x00\x00\x01'),
(2, False, 2, '\x00\x00\x00\x02'),
(2, True, 2, '\x00\x00\x00\x02'),
(75000, False, 2, '\x00\x01\x24\xf8'),
(75000, True, 2, '\x00\x01\x24\xf8'),
(1000000, False, 2, '\x00\x0f\x42\x40'),
(1000000, True, 2, '\x00\x0f\x42\x40'),
(2147483647, False, 2, '\x7f\xff\xff\xff'),
(2147483647, True, 2, '\x7f\xff\xff\xff'),
(2147483648, False, 2, '\x80\x00\x00\x00'),
(4294967295, False, 2, '\xff\xff\xff\xff'),
(-1, True, 2, '\xff\xff\xff\xff'),
(-2147483648, True, 2, '\x80\x00\x00\x00'),
(-200000000, True, 2, '\xf4\x14\x3e\x00'),
]
def testKnownValues(self):
for value, signed, numberOfRegisters, knownstring in self.knownValues:
resultstring = minimalmodbus._longToBytestring(value, signed, numberOfRegisters)
self.assertEqual(resultstring, knownstring)
def testWrongInputValue(self):
if _runTestsForNewVersion: # For compatibility with Python2.6
self.assertRaises(ValueError, minimalmodbus._longToBytestring, -1, False, 2) # Range 0 to 4294967295
self.assertRaises(ValueError, minimalmodbus._longToBytestring, 4294967296, False, 2)
self.assertRaises(ValueError, minimalmodbus._longToBytestring, -2147483649, True, 2) # Range -2147483648 to 2147483647
self.assertRaises(ValueError, minimalmodbus._longToBytestring, 2147483648, True, 2)
self.assertRaises(ValueError, minimalmodbus._longToBytestring, 222222222222222, True, 2)
for numberOfRegisters in [0, 1, 3, 4, 5, 6, 7, 8, 16]:
self.assertRaises(ValueError, minimalmodbus._longToBytestring, 1, True, numberOfRegisters)
def testWrongInputType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._longToBytestring, value, True, 2)
self.assertRaises(TypeError, minimalmodbus._longToBytestring, 1, True, value)
for value in _NOT_BOOLEANS:
self.assertRaises(TypeError, minimalmodbus._longToBytestring, 1, value, 2)
class TestBytestringToLong(ExtendedTestCase):
knownValues=TestLongToBytestring.knownValues
def testKnownValues(self):
for knownvalue, signed, numberOfRegisters, bytestring in self.knownValues:
resultvalue = minimalmodbus._bytestringToLong(bytestring, signed, numberOfRegisters)
self.assertEqual(resultvalue, knownvalue)
def testWrongInputValue(self):
for inputstring in ['', 'A', 'AA', 'AAA', 'AAAAA']:
self.assertRaises(ValueError, minimalmodbus._bytestringToLong, inputstring, True, 2)
for numberOfRegisters in [0, 1, 3, 4, 5, 6, 7, 8, 16]:
self.assertRaises(ValueError, minimalmodbus._bytestringToLong, 'AAAA', True, numberOfRegisters)
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._bytestringToLong, value, True, 2)
for value in _NOT_BOOLEANS:
self.assertRaises(TypeError, minimalmodbus._bytestringToLong, 'AAAA', value, 2)
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._bytestringToLong, 'AAAA', True, value)
class TestSanityLong(ExtendedTestCase):
knownValues=TestLongToBytestring.knownValues
def testSanity(self):
for value, signed, numberOfRegisters, bytestring in self.knownValues:
resultvalue = minimalmodbus._bytestringToLong( \
minimalmodbus._longToBytestring(value, signed, numberOfRegisters), signed, numberOfRegisters)
self.assertEqual(resultvalue, value)
class TestFloatToBytestring(ExtendedTestCase):
# Use this online calculator:
# http://babbage.cs.qc.cuny.edu/IEEE-754/index.xhtml
# See also examples in
# http://en.wikipedia.org/wiki/Single-precision_floating-point_format
# http://en.wikipedia.org/wiki/Double-precision_floating-point_format
knownValues=[
(1, 2, '\x3f\x80\x00\x00'),
(1.0, 2, '\x3f\x80\x00\x00'), # wikipedia
(1.0, 2, '?\x80\x00\x00'),
(1.1, 2, '\x3f\x8c\xcc\xcd'),
(100, 2, '\x42\xc8\x00\x00'),
(100.0, 2, '\x42\xc8\x00\x00'),
(1.0e5, 2, '\x47\xc3\x50\x00'),
(1.1e9, 2, '\x4e\x83\x21\x56'),
(1.0e16, 2, '\x5a\x0e\x1b\xca'),
(1.5e16, 2, '\x5a\x55\x29\xaf'),
(3.65e30, 2, '\x72\x38\x47\x25'),
(-1.1, 2, '\xbf\x8c\xcc\xcd'),
(-2, 2, '\xc0\x00\x00\x00'),
(-3.6e30, 2, '\xf2\x35\xc0\xe9'),
(1.0, 4, '\x3f\xf0\x00\x00\x00\x00\x00\x00'),
(2, 4, '\x40\x00\x00\x00\x00\x00\x00\x00'),
(1.1e9, 4, '\x41\xd0\x64\x2a\xc0\x00\x00\x00'),
(3.65e30, 4, '\x46\x47\x08\xe4\x9e\x2f\x4d\x62'),
(2.42e300,4, '\x7e\x4c\xe8\xa5\x67\x1f\x46\xa0'),
(-1.1, 4, '\xbf\xf1\x99\x99\x99\x99\x99\x9a'),
(-2, 4, '\xc0\x00\x00\x00\x00\x00\x00\x00'),
(-3.6e30, 4, '\xc6\x46\xb8\x1d\x1a\x43\xb2\x06'),
]
def testKnownValues(self):
for value, numberOfRegisters, knownstring in self.knownValues:
resultstring = minimalmodbus._floatToBytestring(value, numberOfRegisters)
self.assertEqual(resultstring, knownstring)
self.assertEqual(minimalmodbus._floatToBytestring(1.5e999, 2), '\x7f\x80\x00\x00') # +inf
def testWrongInputValue(self):
# Note: Out of range will not necessarily raise any error, instead it will indicate +inf etc.
for numberOfRegisters in [0, 1, 3, 5, 6, 7, 8, 16]:
self.assertRaises(ValueError, minimalmodbus._floatToBytestring, 1.1, numberOfRegisters)
def testWrongInputType(self):
for value in _NOT_NUMERICALS:
self.assertRaises(TypeError, minimalmodbus._floatToBytestring, value, 2)
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._floatToBytestring, 1.1, value)
class TestBytestringToFloat(ExtendedTestCase):
knownValues=TestFloatToBytestring.knownValues
def testKnownValues(self):
for knownvalue, numberOfRegisters, bytestring in self.knownValues:
resultvalue = minimalmodbus._bytestringToFloat(bytestring, numberOfRegisters)
self.assertAlmostEqualRatio(resultvalue, knownvalue)
def testWrongInputValue(self):
for bytestring in ['', 'A', 'AB', 'ABC', 'ABCDE', 'ABCDEF', 'ABCDEFG']:
self.assertRaises(ValueError, minimalmodbus._bytestringToFloat, bytestring, 2)
self.assertRaises(ValueError, minimalmodbus._bytestringToFloat, bytestring, 4)
for numberOfRegisters in [0, 1, 3, 5, 6, 7, 8, 16]:
self.assertRaises(ValueError, minimalmodbus._bytestringToFloat, 'ABCD', numberOfRegisters)
self.assertRaises(ValueError, minimalmodbus._bytestringToFloat, 'ABCDEFGH', numberOfRegisters)
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._bytestringToFloat, value, 2)
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._bytestringToFloat, 1.1, value)
class TestSanityFloat(ExtendedTestCase):
knownValues=TestFloatToBytestring.knownValues
def testSanity(self):
for value, numberOfRegisters, knownstring in self.knownValues:
resultvalue = minimalmodbus._bytestringToFloat( \
minimalmodbus._floatToBytestring(value, numberOfRegisters), numberOfRegisters)
self.assertAlmostEqualRatio(resultvalue, value)
class TestValuelistToBytestring(ExtendedTestCase):
knownValues=[
([1], 1, '\x00\x01'),
([0, 0], 2, '\x00\x00\x00\x00'),
([1, 2], 2, '\x00\x01\x00\x02'),
([1, 256], 2, '\x00\x01\x01\x00'),
([1, 2, 3, 4], 4, '\x00\x01\x00\x02\x00\x03\x00\x04'),
([1, 2, 3, 4, 5], 5, '\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05'),
]
def testKnownValues(self):
for value, numberOfRegisters, knownstring in self.knownValues:
resultstring = minimalmodbus._valuelistToBytestring(value, numberOfRegisters)
self.assertEqual(resultstring, knownstring)
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._valuelistToBytestring, [1, 2, 3, 4], 1)
self.assertRaises(ValueError, minimalmodbus._valuelistToBytestring, [1, 2, 3, 4], -4)
def testWrongInputType(self):
for value in _NOT_INTLISTS:
self.assertRaises(TypeError, minimalmodbus._valuelistToBytestring, value, 4)
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._valuelistToBytestring, [1, 2, 3, 4], value)
class TestBytestringToValuelist(ExtendedTestCase):
knownValues=TestValuelistToBytestring.knownValues
def testKnownValues(self):
for knownlist, numberOfRegisters, bytestring in self.knownValues:
resultlist = minimalmodbus._bytestringToValuelist(bytestring, numberOfRegisters)
self.assertEqual(resultlist, knownlist)
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._bytestringToValuelist, '\x00\x01\x00\x02', 1)
self.assertRaises(ValueError, minimalmodbus._bytestringToValuelist, '', 1)
self.assertRaises(ValueError, minimalmodbus._bytestringToValuelist, '\x00\x01', 0)
self.assertRaises(ValueError, minimalmodbus._bytestringToValuelist, '\x00\x01', -1)
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._bytestringToValuelist, value, 1)
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._bytestringToValuelist, 'A', value)
class TestSanityValuelist(ExtendedTestCase):
knownValues=TestValuelistToBytestring.knownValues
def testSanity(self):
for valuelist, numberOfRegisters, bytestring in self.knownValues:
resultlist = minimalmodbus._bytestringToValuelist( \
minimalmodbus._valuelistToBytestring(valuelist, numberOfRegisters), numberOfRegisters)
self.assertEqual(resultlist, valuelist)
class TestTextstringToBytestring(ExtendedTestCase):
knownValues = [
('A', 1, 'A '),
('AB', 1, 'AB'),
('ABC', 2, 'ABC '),
('ABCD', 2, 'ABCD'),
('A', 16, 'A'+' '*31),
('A', 32, 'A'+' '*63),
]
def testKnownValues(self):
for textstring, numberOfRegisters, knownstring in self.knownValues:
resultstring = minimalmodbus._textstringToBytestring(textstring, numberOfRegisters)
self.assertEqual(resultstring, knownstring)
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._textstringToBytestring, 'ABC', 1)
self.assertRaises(ValueError, minimalmodbus._textstringToBytestring, '', 1)
self.assertRaises(ValueError, minimalmodbus._textstringToBytestring, 'A', -1)
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._textstringToBytestring, value, 1)
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._textstringToBytestring, 'AB', value)
class TestBytestringToTextstring(ExtendedTestCase):
knownValues=TestTextstringToBytestring.knownValues
def testKnownValues(self):
for knownstring, numberOfRegisters, bytestring in self.knownValues:
resultstring = minimalmodbus._bytestringToTextstring(bytestring, numberOfRegisters)
self.assertEqual(resultstring.strip(), knownstring)
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._bytestringToTextstring, 'A', 1)
self.assertRaises(ValueError, minimalmodbus._bytestringToTextstring, '', 1)
self.assertRaises(ValueError, minimalmodbus._bytestringToTextstring, '', 0)
self.assertRaises(ValueError, minimalmodbus._bytestringToTextstring, 'ABC', 1)
self.assertRaises(ValueError, minimalmodbus._bytestringToTextstring, 'AB', 0)
self.assertRaises(ValueError, minimalmodbus._bytestringToTextstring, 'AB', -1)
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._bytestringToTextstring, value, 1)
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._bytestringToTextstring, 'AB', value)
class TestSanityTextstring(ExtendedTestCase):
knownValues=TestTextstringToBytestring.knownValues
def testSanity(self):
for textstring, numberOfRegisters, bytestring in self.knownValues:
resultstring = minimalmodbus._bytestringToTextstring( \
minimalmodbus._textstringToBytestring(textstring, numberOfRegisters), numberOfRegisters)
self.assertEqual( resultstring.strip(), textstring )
class TestPack(ExtendedTestCase):
knownValues=[
(-77, '>h', '\xff\xb3'), # (Signed) short (2 bytes)
(-1, '>h', '\xff\xff'),
(-770, '>h', '\xfc\xfe'),
(-32768, '>h', '\x80\x00'),
(32767, '>h', '\x7f\xff'),
(770, '>H', '\x03\x02'), # Unsigned short (2 bytes)
(65535, '>H', '\xff\xff'),
(75000, '>l', '\x00\x01\x24\xf8'), # (Signed) long (4 bytes)
(-1, '>l', '\xff\xff\xff\xff'),
(-2147483648, '>l', '\x80\x00\x00\x00'),
(-200000000, '>l', '\xf4\x14\x3e\x00'),
(1, '>L', '\x00\x00\x00\x01'), # Unsigned long (4 bytes)
(75000, '>L', '\x00\x01\x24\xf8'),
(2147483648, '>L', '\x80\x00\x00\x00'),
(2147483647, '>L', '\x7f\xff\xff\xff'),
(1.0, '>f', '\x3f\x80\x00\x00'), # Float (4 bytes)
(1.0e5, '>f', '\x47\xc3\x50\x00'),
(1.0e16, '>f', '\x5a\x0e\x1b\xca'),
(3.65e30, '>f', '\x72\x38\x47\x25'),
(-2, '>f', '\xc0\x00\x00\x00'),
(-3.6e30, '>f', '\xf2\x35\xc0\xe9'),
(1.0, '>d', '\x3f\xf0\x00\x00\x00\x00\x00\x00'), # Double (8 bytes)
(2, '>d', '\x40\x00\x00\x00\x00\x00\x00\x00'),
(1.1e9, '>d', '\x41\xd0\x64\x2a\xc0\x00\x00\x00'),
(3.65e30, '>d', '\x46\x47\x08\xe4\x9e\x2f\x4d\x62'),
(2.42e300, '>d', '\x7e\x4c\xe8\xa5\x67\x1f\x46\xa0'),
(-1.1, '>d', '\xbf\xf1\x99\x99\x99\x99\x99\x9a'),
(-2, '>d', '\xc0\x00\x00\x00\x00\x00\x00\x00'),
]
def testKnownValues(self):
for value, formatstring, knownstring in self.knownValues:
resultstring = minimalmodbus._pack(formatstring, value)
self.assertEqual(resultstring, knownstring)
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._pack, 'ABC', 35)
self.assertRaises(ValueError, minimalmodbus._pack, '', 35)
if _runTestsForNewVersion: # For Python2.6 compatibility
self.assertRaises(ValueError, minimalmodbus._pack, '>H', -35)
self.assertRaises(ValueError, minimalmodbus._pack, '>L', -35)
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._pack, value, 1)
for value in ['1', ['1'], [1], ['\x00\x2d\x00\x58'], ['A', 'B', 'C'], 'ABC']:
self.assertRaises(ValueError, minimalmodbus._pack, '>h', value)
class TestUnpack(ExtendedTestCase):
knownValues=TestPack.knownValues
def testKnownValues(self):
for knownvalue, formatstring, bytestring in self.knownValues:
resultvalue = minimalmodbus._unpack(formatstring, bytestring)
self.assertAlmostEqualRatio(resultvalue, knownvalue)
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._unpack, 'ABC', '\xff\xb3')
self.assertRaises(ValueError, minimalmodbus._unpack, '', '\xff\xb3')
self.assertRaises(ValueError, minimalmodbus._unpack, '>h', '')
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._unpack, value, '\xff\xb3')
self.assertRaises(TypeError, minimalmodbus._unpack, '>h', value)
class TestSanityPackUnpack(ExtendedTestCase):
knownValues=TestPack.knownValues
def testSanity(self):
for value, formatstring, bytestring in self.knownValues:
resultstring = minimalmodbus._pack(formatstring, minimalmodbus._unpack(formatstring, bytestring))
self.assertEqual(resultstring, bytestring)
class TestHexencode(ExtendedTestCase):
knownValues=[
('', False, ''),
('7', False, '37'),
('J', False, '4A'),
('\x5d', False, '5D'),
('\x04', False, '04'),
('\x04\x5d',False, '045D'),
('mn', False, '6D6E'),
('Katt1', False, '4B61747431'),
('', True, ''),
('7', True, '37'),
('J', True, '4A'),
('\x5d', True, '5D'),
('\x04', True, '04'),
('\x04\x5d',True, '04 5D'),
('mn', True, '6D 6E'),
('Katt1', True, '4B 61 74 74 31'),
]
def testKnownValues(self):
for value, insert_spaces, knownstring in self.knownValues:
resultstring = minimalmodbus._hexencode(value, insert_spaces)
self.assertEqual(resultstring, knownstring)
def testWrongInputValue(self):
pass
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._hexencode, value)
class TestHexdecode(ExtendedTestCase):
knownValues=TestHexencode.knownValues
def testKnownValues(self):
for knownstring, insert_spaces, value in self.knownValues:
if not insert_spaces:
resultstring = minimalmodbus._hexdecode(value)
self.assertEqual(resultstring, knownstring)
self.assertEqual(minimalmodbus._hexdecode('4A'), 'J')
self.assertEqual(minimalmodbus._hexdecode('4a'), 'J')
def testAllowLowercase(self):
minimalmodbus._hexdecode('Aa')
minimalmodbus._hexdecode('aa23')
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._hexdecode, 'A')
self.assertRaises(ValueError, minimalmodbus._hexdecode, 'AAA')
self.assertRaises(TypeError, minimalmodbus._hexdecode, 'AG')
def testWrongInputType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._hexdecode, value)
class TestSanityHexencodeHexdecode(ExtendedTestCase):
knownValues=TestHexencode.knownValues
def testKnownValues(self):
for value, insert_spaces, knownstring in self.knownValues:
if not insert_spaces:
resultstring = minimalmodbus._hexdecode(minimalmodbus._hexencode(value))
self.assertEqual(resultstring, value)
def testKnownValuesLoop(self):
"""Loop through all bytestrings of length two."""
if ALSO_TIME_CONSUMING_TESTS:
RANGE_VALUE = 256
for i in range(RANGE_VALUE):
for j in range(RANGE_VALUE):
bytestring = chr(i) + chr(j)
resultstring = minimalmodbus._hexdecode(minimalmodbus._hexencode(bytestring))
self.assertEqual(resultstring, bytestring)
class TestBitResponseToValue(ExtendedTestCase):
def testKnownValues(self):
self.assertEqual(minimalmodbus._bitResponseToValue('\x00'), 0)
self.assertEqual(minimalmodbus._bitResponseToValue('\x01'), 1)
def testWrongValues(self):
self.assertRaises(ValueError, minimalmodbus._bitResponseToValue, 'ABC') # Too long string
self.assertRaises(ValueError, minimalmodbus._bitResponseToValue, 'A') # Wrong string
self.assertRaises(ValueError, minimalmodbus._bitResponseToValue, '\x03') # Wrong string
def testWrongType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._bitResponseToValue, value)
class TestCreateBitPattern(ExtendedTestCase):
knownValues=[
(5, 0, '\x00\x00'),
(5, 1, '\xff\x00' ),
(15, 0, '\x00'),
(15, 1, '\x01'),
]
def testKnownValues(self):
for functioncode, value, knownresult in self.knownValues:
resultvalue = minimalmodbus._createBitpattern(functioncode, value)
self.assertEqual(resultvalue, knownresult)
def testWrongFunctionCode(self):
self.assertRaises(ValueError, minimalmodbus._createBitpattern, 16, 1)
self.assertRaises(ValueError, minimalmodbus._createBitpattern, -1, 1)
self.assertRaises(ValueError, minimalmodbus._createBitpattern, 128, 1)
def testFunctionCodeNotInteger(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._createBitpattern, value, 1)
def testWrongValue(self):
self.assertRaises(ValueError, minimalmodbus._createBitpattern, 5, 2)
self.assertRaises(ValueError, minimalmodbus._createBitpattern, 5, 222)
self.assertRaises(ValueError, minimalmodbus._createBitpattern, 5, -1)
self.assertRaises(ValueError, minimalmodbus._createBitpattern, 15, 2)
self.assertRaises(ValueError, minimalmodbus._createBitpattern, 15, 222)
self.assertRaises(ValueError, minimalmodbus._createBitpattern, 15, -1)
def testValueNotInteger(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._createBitpattern, 5, value)
self.assertRaises(TypeError, minimalmodbus._createBitpattern, 15, value)
############################
# Test number manipulation #
############################
class TestTwosComplement(ExtendedTestCase):
knownValues=[
(0, 8, 0),
(1, 8, 1),
(127, 8, 127),
(-128, 8, 128),
(-127, 8, 129),
(-1, 8, 255),
(0, 16, 0),
(1, 16, 1),
(32767, 16, 32767),
(-32768, 16, 32768),
(-32767, 16, 32769),
(-1, 16, 65535),
]
def testKnownValues(self):
for x, bits, knownresult in self.knownValues:
result = minimalmodbus._twosComplement(x, bits)
self.assertEqual(result, knownresult)
def testOutOfRange(self):
self.assertRaises(ValueError, minimalmodbus._twosComplement, 128, 8)
self.assertRaises(ValueError, minimalmodbus._twosComplement, 1000000, 8)
self.assertRaises(ValueError, minimalmodbus._twosComplement, -129, 8)
self.assertRaises(ValueError, minimalmodbus._twosComplement, 32768, 16)
self.assertRaises(ValueError, minimalmodbus._twosComplement, 1000000, 16)
self.assertRaises(ValueError, minimalmodbus._twosComplement, -32769, 16)
self.assertRaises(ValueError, minimalmodbus._twosComplement, 1, 0)
self.assertRaises(ValueError, minimalmodbus._twosComplement, 1, -1)
self.assertRaises(ValueError, minimalmodbus._twosComplement, 1, -2)
self.assertRaises(ValueError, minimalmodbus._twosComplement, 1, -100)
def testWrongInputType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._twosComplement, value, 8)
class TestFromTwosComplement(ExtendedTestCase):
knownValues=TestTwosComplement.knownValues
def testKnownValues(self):
for knownresult, bits, x in self.knownValues:
result = minimalmodbus._fromTwosComplement(x, bits)
self.assertEqual(result, knownresult)
def testOutOfRange(self):
self.assertRaises(ValueError, minimalmodbus._fromTwosComplement, 256, 8)
self.assertRaises(ValueError, minimalmodbus._fromTwosComplement, 1000000, 8)
self.assertRaises(ValueError, minimalmodbus._fromTwosComplement, -1, 8)
self.assertRaises(ValueError, minimalmodbus._fromTwosComplement, 65536, 16)
self.assertRaises(ValueError, minimalmodbus._fromTwosComplement, 1000000, 16)
self.assertRaises(ValueError, minimalmodbus._fromTwosComplement, -1, 16)
self.assertRaises(ValueError, minimalmodbus._fromTwosComplement, 1, 0)
self.assertRaises(ValueError, minimalmodbus._fromTwosComplement, 1, -1)
self.assertRaises(ValueError, minimalmodbus._fromTwosComplement, 1, -2)
self.assertRaises(ValueError, minimalmodbus._fromTwosComplement, 1, -100)
def testWrongInputType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._fromTwosComplement, value, 8)
self.assertRaises(TypeError, minimalmodbus._fromTwosComplement, 1, value)
class TestSanityTwosComplement(ExtendedTestCase):
knownValues = [1, 2, 4, 8, 12, 16]
def testSanity(self):
if ALSO_TIME_CONSUMING_TESTS:
for bits in self.knownValues:
for x in range(2**bits):
resultvalue = minimalmodbus._twosComplement( minimalmodbus._fromTwosComplement(x, bits), bits )
self.assertEqual(resultvalue, x)
#########################
# Test bit manipulation #
#########################
class TestSetBitOn(ExtendedTestCase):
knownValues=[
(4,0,5),
(4,1,6),
(1,1,3),
]
def testKnownValues(self):
for x, bitnum, knownresult in self.knownValues:
result = minimalmodbus._setBitOn(x, bitnum)
self.assertEqual(result, knownresult)
def testWrongInputValue(self):
self.assertRaises(ValueError, minimalmodbus._setBitOn, 1, -1)
self.assertRaises(ValueError, minimalmodbus._setBitOn, -2, 1)
def testWrongInputType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._setBitOn, value, 1)
self.assertRaises(TypeError, minimalmodbus._setBitOn, 1, value)
############################
# Error checking functions #
############################
class TestCalculateCrcString(ExtendedTestCase):
knownValues=[
('\x02\x07','\x41\x12'), # Example from MODBUS over Serial Line Specification and Implementation Guide V1.02
('ABCDE', '\x0fP'),
]
def testKnownValues(self):
for inputstring, knownresult in self.knownValues:
resultstring = minimalmodbus._calculateCrcString(inputstring)
self.assertEqual(resultstring, knownresult)
def testCalculationTime(self):
teststrings = [minimalmodbus._numToTwoByteString(i) for i in range(2**16)]
minimalmodbus._print_out("\n\n Measuring CRC calculation time. Running {} calculations ...".format(
len(teststrings)))
start_time = time.time()
for teststring in teststrings:
minimalmodbus._calculateCrcString(teststring)
calculation_time = time.time() - start_time
minimalmodbus._print_out("CRC calculation time: {} calculations took {:.3f} s ({} s per calculation)\n\n".format(
len(teststrings), calculation_time, calculation_time/float(len(teststrings))))
def testNotStringInput(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._calculateCrcString, value)
class TestCalculateLrcString(ExtendedTestCase):
knownValues=[
('ABCDE', '\xb1'),
('\x02\x30\x30\x31\x23\x03','\x47'), # From C# example on http://en.wikipedia.org/wiki/Longitudinal_redundancy_check
]
def testKnownValues(self):
for inputstring, knownresult in self.knownValues:
resultstring = minimalmodbus._calculateLrcString(inputstring)
self.assertEqual(resultstring, knownresult)
def testNotStringInput(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._calculateLrcString, value)
class TestCheckFunctioncode(ExtendedTestCase):
def testCorrectFunctioncode(self):
minimalmodbus._checkFunctioncode( 4, [4, 5] )
def testCorrectFunctioncodeNoRange(self):
minimalmodbus._checkFunctioncode( 4, None )
minimalmodbus._checkFunctioncode( 75, None )
def testWrongFunctioncode(self):
self.assertRaises(ValueError, minimalmodbus._checkFunctioncode, 3, [4, 5])
self.assertRaises(ValueError, minimalmodbus._checkFunctioncode, 3, [])
def testWrongFunctioncodeNoRange(self):
self.assertRaises(ValueError, minimalmodbus._checkFunctioncode, 1000, None)
self.assertRaises(ValueError, minimalmodbus._checkFunctioncode, -1, None)
def testWrongFunctioncodeType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._checkFunctioncode, value, [4, 5])
def testWrongFunctioncodeListValues(self):
self.assertRaises(ValueError, minimalmodbus._checkFunctioncode, -1, [-1, 5])
self.assertRaises(ValueError, minimalmodbus._checkFunctioncode, 128, [4, 128])
def testWrongListType(self):
self.assertRaises(TypeError, minimalmodbus._checkFunctioncode, 4, 4)
self.assertRaises(TypeError, minimalmodbus._checkFunctioncode, 4, 'ABC')
self.assertRaises(TypeError, minimalmodbus._checkFunctioncode, 4, (4, 5))
self.assertRaises(ValueError, minimalmodbus._checkFunctioncode, 4, [4, -23])
self.assertRaises(ValueError, minimalmodbus._checkFunctioncode, 4, [4, 128])
self.assertRaises(TypeError, minimalmodbus._checkFunctioncode, 4, [4, '5'])
self.assertRaises(TypeError, minimalmodbus._checkFunctioncode, 4, [4, None])
self.assertRaises(TypeError, minimalmodbus._checkFunctioncode, 4, [4, [5]])
self.assertRaises(TypeError, minimalmodbus._checkFunctioncode, 4, [4.0, 5])
class TestCheckSlaveaddress(ExtendedTestCase):
def testKnownValues(self):
minimalmodbus._checkSlaveaddress( 0 )
minimalmodbus._checkSlaveaddress( 1 )
minimalmodbus._checkSlaveaddress( 10 )
minimalmodbus._checkSlaveaddress( 247 )
def testWrongValues(self):
self.assertRaises(ValueError, minimalmodbus._checkSlaveaddress, -1)
self.assertRaises(ValueError, minimalmodbus._checkSlaveaddress, 248)
def testNotIntegerInput(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._checkSlaveaddress, value)
class TestCheckMode(ExtendedTestCase):
def testKnownValues(self):
minimalmodbus._checkMode('ascii')
minimalmodbus._checkMode('rtu')
def testWrongValues(self):
self.assertRaises(ValueError, minimalmodbus._checkMode, 'asc')
self.assertRaises(ValueError, minimalmodbus._checkMode, 'ASCII')
self.assertRaises(ValueError, minimalmodbus._checkMode, 'RTU')
self.assertRaises(ValueError, minimalmodbus._checkMode, '')
self.assertRaises(ValueError, minimalmodbus._checkMode, 'ascii ')
self.assertRaises(ValueError, minimalmodbus._checkMode, ' rtu')
def testNotIntegerInput(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._checkMode, value)
class TestCheckRegisteraddress(ExtendedTestCase):
def testKnownValues(self):
minimalmodbus._checkRegisteraddress( 0 )
minimalmodbus._checkRegisteraddress( 1 )
minimalmodbus._checkRegisteraddress( 10 )
minimalmodbus._checkRegisteraddress( 65535 )
def testWrongValues(self):
self.assertRaises(ValueError, minimalmodbus._checkRegisteraddress, -1)
self.assertRaises(ValueError, minimalmodbus._checkRegisteraddress, 65536)
def testWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._checkRegisteraddress, value)
class TestCheckResponseNumberOfBytes(ExtendedTestCase):
def testCorrectNumberOfBytes(self):
minimalmodbus._checkResponseByteCount('\x02\x03\x02')
def testWrongNumberOfBytes(self):
self.assertRaises(ValueError, minimalmodbus._checkResponseByteCount, '\x03\x03\x02')
self.assertRaises(ValueError, minimalmodbus._checkResponseByteCount, 'ABC')
def testNotStringInput(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._checkResponseByteCount, value)
class TestCheckResponseRegisterAddress(ExtendedTestCase):
def testCorrectResponseRegisterAddress(self):
minimalmodbus._checkResponseRegisterAddress( '\x00\x2d\x00\x58', 45)
minimalmodbus._checkResponseRegisterAddress( '\x00\x18\x00\x01', 24)
minimalmodbus._checkResponseRegisterAddress( '\x00\x47\xff\x00', 71)
minimalmodbus._checkResponseRegisterAddress( '\x00\x48\x00\x01', 72)
def testWrongResponseRegisterAddress(self):
self.assertRaises(ValueError, minimalmodbus._checkResponseRegisterAddress, '\x00\x2d\x00\x58', 46)
def testTooShortString(self):
self.assertRaises(ValueError, minimalmodbus._checkResponseRegisterAddress, '\x00', 46)
def testNotString(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._checkResponseRegisterAddress, value, 45)
def testWrongAddress(self):
self.assertRaises(ValueError, minimalmodbus._checkResponseRegisterAddress, '\x00\x2d\x00\x58', -2)
self.assertRaises(ValueError, minimalmodbus._checkResponseRegisterAddress, '\x00\x2d\x00\x58', 65536)
def testAddressNotInteger(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._checkResponseRegisterAddress, '\x00\x2d\x00\x58', value)
class TestCheckResponseNumberOfRegisters(ExtendedTestCase):
def testCorrectResponseNumberOfRegisters(self):
minimalmodbus._checkResponseNumberOfRegisters( '\x00\x18\x00\x01', 1 )
minimalmodbus._checkResponseNumberOfRegisters( '\x00#\x00\x01', 1 )
minimalmodbus._checkResponseNumberOfRegisters( '\x00\x34\x00\x02', 2 )
def testWrongResponseNumberOfRegisters(self):
self.assertRaises(ValueError, minimalmodbus._checkResponseNumberOfRegisters, '\x00#\x00\x01', 4 )
def testTooShortString(self):
self.assertRaises(ValueError, minimalmodbus._checkResponseNumberOfRegisters, '\x00', 1 )
def testNotString(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._checkResponseNumberOfRegisters, value, 1 )
def testWrongResponseNumberOfRegistersRange(self):
self.assertRaises(ValueError, minimalmodbus._checkResponseNumberOfRegisters, '\x00\x18\x00\x00', 0 )
self.assertRaises(ValueError, minimalmodbus._checkResponseNumberOfRegisters, '\x00\x18\x00\x01', -1 )
self.assertRaises(ValueError, minimalmodbus._checkResponseNumberOfRegisters, '\x00\x18\x00\x01', 65536 )
def testNumberOfRegistersNotInteger(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._checkResponseNumberOfRegisters, '\x00\x18\x00\x01', value )
class TestCheckResponseWriteData(ExtendedTestCase):
def testCorrectResponseWritedata(self):
minimalmodbus._checkResponseWriteData('\x00\x2d\x00\x58', '\x00\x58')
minimalmodbus._checkResponseWriteData('\x00\x2d\x00\x58', minimalmodbus._numToTwoByteString(88))
minimalmodbus._checkResponseWriteData('\x00\x47\xff\x00', '\xff\x00')
minimalmodbus._checkResponseWriteData('\x00\x47\xff\x00', minimalmodbus._numToTwoByteString(65280))
minimalmodbus._checkResponseWriteData('\x00\x2d\x00\x58ABCDEFGHIJKLMNOP', '\x00\x58')
def testWrongResponseWritedata(self):
self.assertRaises(ValueError, minimalmodbus._checkResponseWriteData, '\x00\x2d\x00\x58', '\x00\x59')
self.assertRaises(ValueError, minimalmodbus._checkResponseWriteData, '\x00\x2d\x00\x58', minimalmodbus._numToTwoByteString(89))
self.assertRaises(ValueError, minimalmodbus._checkResponseWriteData, '\x00\x47\xff\x00', '\xff\x01')
def testNotString(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._checkResponseWriteData, value, '\x00\x58')
self.assertRaises(TypeError, minimalmodbus._checkResponseWriteData, '\x00\x2d\x00\x58', value)
def testTooShortString(self):
self.assertRaises(ValueError, minimalmodbus._checkResponseWriteData, '\x00\x58', '\x00\x58')
self.assertRaises(ValueError, minimalmodbus._checkResponseWriteData, '', '\x00\x58')
self.assertRaises(ValueError, minimalmodbus._checkResponseWriteData, '\x00\x2d\x00\x58', '\x58')
self.assertRaises(ValueError, minimalmodbus._checkResponseWriteData, '\x00\x2d\x00\x58', '')
def testTooLongString(self):
self.assertRaises(ValueError, minimalmodbus._checkResponseWriteData, '\x00\x2d\x00\x58', '\x00\x58\x00')
class TestCheckString(ExtendedTestCase):
def testKnownValues(self):
minimalmodbus._checkString('DEF', minlength=3, maxlength=3, description='ABC' )
minimalmodbus._checkString('DEF', minlength=0, maxlength=100, description='ABC' )
def testTooShort(self):
self.assertRaises(ValueError, minimalmodbus._checkString, 'DE', minlength=3, maxlength=3, description='ABC')
self.assertRaises(ValueError, minimalmodbus._checkString, 'DEF', minlength=10, maxlength=3, description='ABC')
def testTooLong(self):
self.assertRaises(ValueError, minimalmodbus._checkString, 'DEFG', minlength=1, maxlength=3, description='ABC')
def testInconsistentLengthlimits(self):
self.assertRaises(ValueError, minimalmodbus._checkString, 'DEFG', minlength=4, maxlength=3, description='ABC')
self.assertRaises(ValueError, minimalmodbus._checkString, 'DEF', minlength=-3, maxlength=3, description='ABC')
self.assertRaises(ValueError, minimalmodbus._checkString, 'DEF', minlength=3, maxlength=-3, description='ABC')
def testInputNotString(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._checkString, value, minlength=3, maxlength=3, description='ABC')
def testNotIntegerInput(self):
for value in _NOT_INTERGERS_OR_NONE:
self.assertRaises(TypeError, minimalmodbus._checkString, 'DEF', minlength=value, maxlength=3, description='ABC')
self.assertRaises(TypeError, minimalmodbus._checkString, 'DEF', minlength=3, maxlength=value, description='ABC')
def testDescriptionNotString(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._checkString, 'DEF', minlength=3, maxlength=3, description=value)
class TestCheckInt(ExtendedTestCase):
def testKnownValues(self):
minimalmodbus._checkInt(47, minvalue=None, maxvalue=None, description='ABC')
minimalmodbus._checkInt(47, minvalue=40, maxvalue=50, description='ABC')
minimalmodbus._checkInt(47, minvalue=-40, maxvalue=50, description='ABC')
minimalmodbus._checkInt(47, description='ABC', maxvalue=50, minvalue=40)
minimalmodbus._checkInt(47, minvalue=None, maxvalue=50, description='ABC')
minimalmodbus._checkInt(47, minvalue=40, maxvalue=None, description='ABC')
def testTooLargeValue(self):
self.assertRaises(ValueError, minimalmodbus._checkInt, 47, minvalue=30, maxvalue=40, description='ABC')
self.assertRaises(ValueError, minimalmodbus._checkInt, 47, maxvalue=46)
def testTooSmallValue(self):
self.assertRaises(ValueError, minimalmodbus._checkInt, 47, minvalue=48)
self.assertRaises(ValueError, minimalmodbus._checkInt, 47, minvalue=48, maxvalue=None, description='ABC')
def testInconsistentLimits(self):
self.assertRaises(ValueError, minimalmodbus._checkInt, 47, minvalue=47, maxvalue=45, description='ABC')
def testWrongInputType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, minimalmodbus._checkInt, value, minvalue=40)
for value in _NOT_INTERGERS_OR_NONE:
self.assertRaises(TypeError, minimalmodbus._checkInt, 47, minvalue=value, maxvalue=50, description='ABC')
self.assertRaises(TypeError, minimalmodbus._checkInt, 47, minvalue=40, maxvalue=value, description='ABC')
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._checkInt, 47, minvalue=40, maxvalue=50, description=value)
class TestCheckNumerical(ExtendedTestCase):
def testKnownValues(self):
minimalmodbus._checkNumerical(47, minvalue=None, maxvalue=None, description='ABC')
minimalmodbus._checkNumerical(47, minvalue=40, maxvalue=50, description='ABC')
minimalmodbus._checkNumerical(47, minvalue=-40, maxvalue=50, description='ABC')
minimalmodbus._checkNumerical(47, description='ABC', maxvalue=50, minvalue=40)
minimalmodbus._checkNumerical(47, minvalue=None, maxvalue=50, description='ABC')
minimalmodbus._checkNumerical(47, minvalue=40, maxvalue=None, description='ABC')
minimalmodbus._checkNumerical(47.0, minvalue=40)
minimalmodbus._checkNumerical(47, minvalue=40.0, maxvalue=50, description='ABC')
minimalmodbus._checkNumerical(47.0, minvalue=40, maxvalue=None, description='ABC' )
minimalmodbus._checkNumerical(47.0, minvalue=40.0, maxvalue=50.0, description='ABC' )
def testTooLargeValue(self):
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47.0, minvalue=30, maxvalue=40, description='ABC')
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47.0, minvalue=30.0, maxvalue=40.0, description='ABC')
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47, maxvalue=46.0)
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47.0, maxvalue=46.0)
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47.0, maxvalue=46)
def testTooSmallValue(self):
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47.0, minvalue=48)
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47.0, minvalue=48.0)
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47, minvalue=48.0)
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47, minvalue=48, maxvalue=None, description='ABC')
def testInconsistentLimits(self):
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47, minvalue=47, maxvalue=45, description='ABC')
self.assertRaises(ValueError, minimalmodbus._checkNumerical, 47.0, minvalue=47.0, maxvalue=45.0, description='ABC')
def testNotNumericInput(self):
for value in _NOT_NUMERICALS:
self.assertRaises(TypeError, minimalmodbus._checkNumerical, value, minvalue=40.0)
for value in _NOT_NUMERICALS_OR_NONE:
self.assertRaises(TypeError, minimalmodbus._checkNumerical, 47.0, minvalue=value, maxvalue=50.0, description='ABC')
self.assertRaises(TypeError, minimalmodbus._checkNumerical, 47.0, minvalue=40.0, maxvalue=value, description='ABC')
def testDescriptionNotString(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._checkNumerical, 47.0, minvalue=40, maxvalue=50, description=value)
class TestCheckBool(ExtendedTestCase):
def testKnownValues(self):
minimalmodbus._checkBool(True, description='ABC')
minimalmodbus._checkBool(False, description='ABC')
def testWrongType(self):
for value in _NOT_BOOLEANS:
self.assertRaises(TypeError, minimalmodbus._checkBool, value, description='ABC')
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._checkBool, True, description=value)
#####################
# Development tools #
#####################
class TestGetDiagnosticString(ExtendedTestCase):
def testReturnsString(self):
resultstring = minimalmodbus._getDiagnosticString()
self.assertTrue( len(resultstring) > 100) # For Python 2.6 compatibility
class TestPrintOut(ExtendedTestCase):
def testKnownValues(self):
minimalmodbus._print_out('ABCDEFGHIJKL')
def testInputNotString(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, minimalmodbus._print_out, value)
# TODO: TestInterpretRawMessage
# TODO: TestInterpretPayload
###########################################
# Communication using a dummy serial port #
###########################################
class TestDummyCommunication(ExtendedTestCase):
## Test fixture ##
def setUp(self):
# Prepare a dummy serial port to have proper responses
dummy_serial.VERBOSE = False
dummy_serial.RESPONSES = RTU_RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInResponseDictionary'
dummy_serial.DEFAULT_TIMEOUT = 0.01
# Monkey-patch a dummy serial port for testing purpose
minimalmodbus.serial.Serial = dummy_serial.Serial
# Initialize a (dummy) instrument
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = False
self.instrument = minimalmodbus.Instrument('DUMMYPORTNAME', 1, minimalmodbus.MODE_RTU) # port name, slave address (in decimal)
self.instrument.debug = False
## Read bit ##
def testReadBit(self):
self.assertEqual( self.instrument.read_bit(61), 1 ) # Functioncode 2
self.assertEqual( self.instrument.read_bit(61, functioncode=2), 1 )
self.assertEqual( self.instrument.read_bit(61, 2), 1 )
self.assertEqual( self.instrument.read_bit(62, functioncode=1), 0 ) # Functioncode 1
self.assertEqual( self.instrument.read_bit(62, 1), 0 )
def testReadBitWrongValue(self):
self.assertRaises(ValueError, self.instrument.read_bit, -1) # Wrong register address
self.assertRaises(ValueError, self.instrument.read_bit, 65536)
self.assertRaises(ValueError, self.instrument.read_bit, 62, 0) # Wrong function code
self.assertRaises(ValueError, self.instrument.read_bit, 62, -1)
self.assertRaises(ValueError, self.instrument.read_bit, 62, 128)
def testReadBitWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.read_bit, value)
self.assertRaises(TypeError, self.instrument.read_bit, 62, value)
def testReadBitWithWrongByteCountResponse(self):
self.assertRaises(ValueError, self.instrument.read_bit, 63) # Functioncode 2. Slave gives wrong byte count.
def testReadBitWithNoResponse(self):
self.assertRaises(IOError, self.instrument.read_bit, 64) # Functioncode 2. Slave gives no response.
## Write bit ##
def testWriteBit(self):
self.instrument.write_bit(71, 1)
self.instrument.write_bit(71, 1, 5)
self.instrument.write_bit(71, 1, functioncode=5)
self.instrument.write_bit(72, 1, 15)
self.instrument.write_bit(72, 1, functioncode=15)
def testWriteBitWrongValue(self):
self.assertRaises(ValueError, self.instrument.write_bit, 65536, 1) # Wrong register address
self.assertRaises(ValueError, self.instrument.write_bit, -1, 1)
self.assertRaises(ValueError, self.instrument.write_bit, 71, 10) # Wrong bit value
self.assertRaises(ValueError, self.instrument.write_bit, 71, -5)
self.assertRaises(ValueError, self.instrument.write_bit, 71, 10, 5)
self.assertRaises(ValueError, self.instrument.write_bit, 71, 1, 6) # Wrong function code
self.assertRaises(ValueError, self.instrument.write_bit, 71, 1, -1)
self.assertRaises(ValueError, self.instrument.write_bit, 71, 1, 0)
self.assertRaises(ValueError, self.instrument.write_bit, 71, 1, 128)
def testWriteBitWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.write_bit, value, 1)
self.assertRaises(TypeError, self.instrument.write_bit, 71, value)
self.assertRaises(TypeError, self.instrument.write_bit, 71, 1, value)
def testWriteBitWithWrongRegisternumbersResponse(self):
self.assertRaises(ValueError, self.instrument.write_bit, 73, 1, functioncode=15) # Slave gives wrong number of registers
def testWriteBitWithWrongWritedataResponse(self):
self.assertRaises(ValueError, self.instrument.write_bit, 74, 1) # Slave gives wrong write data
## Read register ##
def testReadRegister(self):
self.assertEqual( self.instrument.read_register(289), 770)
self.assertEqual( self.instrument.read_register(5), 184)
self.assertEqual( self.instrument.read_register(289, 0), 770)
self.assertEqual( self.instrument.read_register(289, 0, 3), 770) # functioncode 3
self.assertEqual( self.instrument.read_register(14, 0, 4), 880) # functioncode 4
self.assertAlmostEqual( self.instrument.read_register(289, 1), 77.0)
self.assertAlmostEqual( self.instrument.read_register(289, 2), 7.7)
self.assertEqual( self.instrument.read_register(101), 65531)
self.assertEqual( self.instrument.read_register(101, signed=True), -5)
def testReadRegisterWrongValue(self):
self.assertRaises(ValueError, self.instrument.read_register, -1) # Wrong register address
self.assertRaises(ValueError, self.instrument.read_register, -1, 0, 3)
self.assertRaises(ValueError, self.instrument.read_register, 65536)
self.assertRaises(ValueError, self.instrument.read_register, 289, -1) # Wrong number of decimals
self.assertRaises(ValueError, self.instrument.read_register, 289, 100)
self.assertRaises(ValueError, self.instrument.read_register, 289, 0, 5) # Wrong function code
self.assertRaises(ValueError, self.instrument.read_register, 289, 0, -4)
def testReadRegisterWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.read_register, value, 0, 3)
self.assertRaises(TypeError, self.instrument.read_register, 289, value)
self.assertRaises(TypeError, self.instrument.read_register, 289, 0, value)
## Write register ##
def testWriteRegister(self):
self.instrument.write_register(35, 20)
self.instrument.write_register(35, 20, functioncode = 16)
self.instrument.write_register(35, 20.0)
self.instrument.write_register(24, 50)
self.instrument.write_register(45, 88, functioncode = 6)
self.instrument.write_register(101, 5)
self.instrument.write_register(101, 5, signed=True)
self.instrument.write_register(101, 5, 1)
self.instrument.write_register(101, -5, signed=True)
self.instrument.write_register(101, -5, 1, signed=True)
def testWriteRegisterWithDecimals(self):
self.instrument.write_register(35, 2.0, 1)
self.instrument.write_register(45, 8.8, 1, functioncode = 6)
def testWriteRegisterWrongValue(self):
self.assertRaises(ValueError, self.instrument.write_register, -1, 20) # Wrong address
self.assertRaises(ValueError, self.instrument.write_register, 65536, 20)
self.assertRaises(ValueError, self.instrument.write_register, 35, -1) # Wrong register value
self.assertRaises(ValueError, self.instrument.write_register, 35, 65536)
self.assertRaises(ValueError, self.instrument.write_register, 35, 20, -1) # Wrong number of decimals
self.assertRaises(ValueError, self.instrument.write_register, 35, 20, 100)
self.assertRaises(ValueError, self.instrument.write_register, 35, 20, functioncode = 12 ) # Wrong function code
self.assertRaises(ValueError, self.instrument.write_register, 35, 20, functioncode = -4 )
self.assertRaises(ValueError, self.instrument.write_register, 35, 20, functioncode = 129 )
def testWriteRegisterWrongType(self):
for value in _NOT_NUMERICALS:
self.assertRaises(TypeError, self.instrument.write_register, value, 20)
self.assertRaises(TypeError, self.instrument.write_register, 35, value)
self.assertRaises(TypeError, self.instrument.write_register, 35, 20, value)
self.assertRaises(TypeError, self.instrument.write_register, 35, 20, functioncode = value)
def testWriteRegisterWithWrongCrcResponse(self):
self.assertRaises(ValueError, self.instrument.write_register, 51, 99) # Slave gives wrong CRC
def testWriteRegisterSuppressErrorMessageAtWrongCRC(self):
try:
self.instrument.write_register(51, 99) # Slave gives wrong CRC
except ValueError:
minimalmodbus._print_out('Minimalmodbus: An error was suppressed.')
def testWriteRegisterWithWrongSlaveaddressResponse(self):
self.assertRaises(ValueError, self.instrument.write_register, 54, 99) # Slave gives wrong slaveaddress
def testWriteRegisterWithWrongFunctioncodeResponse(self):
self.assertRaises(ValueError, self.instrument.write_register, 55, 99) # Slave gives wrong functioncode
self.assertRaises(ValueError, self.instrument.write_register, 56, 99) # Slave indicates an error
def testWriteRegisterWithWrongRegisteraddressResponse(self):
self.assertRaises(ValueError, self.instrument.write_register, 53, 99) # Slave gives wrong registeraddress
def testWriteRegisterWithWrongRegisternumbersResponse(self):
self.assertRaises(ValueError, self.instrument.write_register, 52, 99) # Slave gives wrong number of registers
def testWriteRegisterWithWrongWritedataResponse(self):
self.assertRaises(ValueError, self.instrument.write_register, 55, 99, functioncode = 6) # Functioncode 6. Slave gives wrong write data.
## Read Long ##
def testReadLong(self):
self.assertEqual( self.instrument.read_long(102), 4294967295)
self.assertEqual( self.instrument.read_long(102, signed=True), -1)
def testReadLongWrongValue(self):
self.assertRaises(ValueError, self.instrument.read_long, -1) # Wrong register address
self.assertRaises(ValueError, self.instrument.read_long, 65536)
self.assertRaises(ValueError, self.instrument.read_long, 102, 1) # Wrong function code
self.assertRaises(ValueError, self.instrument.read_long, 102, -1)
self.assertRaises(ValueError, self.instrument.read_long, 102, 256)
def testReadLongWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.read_long, value)
self.assertRaises(TypeError, self.instrument.read_long, 102, value)
for value in _NOT_BOOLEANS:
self.assertRaises(TypeError, self.instrument.read_long, 102, signed=value)
## Write Long ##
def testWriteLong(self):
self.instrument.write_long(102, 5)
self.instrument.write_long(102, 5, signed=True)
self.instrument.write_long(102, -5, signed=True)
self.instrument.write_long(102, 3, False)
self.instrument.write_long(102, -3, True)
def testWriteLongWrongValue(self):
self.assertRaises(ValueError, self.instrument.write_long, -1, 5) # Wrong register address
self.assertRaises(ValueError, self.instrument.write_long, 65536, 5)
self.assertRaises(ValueError, self.instrument.write_long, 102, 888888888888888888888) # Wrong value to write to slave
if _runTestsForNewVersion: # For Python2.6 compatibility
self.assertRaises(ValueError, self.instrument.write_long, 102, -5, signed=False) # Wrong value to write to slave
def testWriteLongWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.write_long, value, 5)
self.assertRaises(TypeError, self.instrument.write_long, 102, value)
for value in _NOT_BOOLEANS:
self.assertRaises(TypeError, self.instrument.write_long, 102, 5, signed=value)
## Read Float ##
def testReadFloat(self):
self.assertEqual( self.instrument.read_float(103), 1.0 )
self.assertEqual( self.instrument.read_float(103, 3), 1.0 )
self.assertEqual( self.instrument.read_float(103, 3, 2), 1.0 )
self.assertEqual( self.instrument.read_float(103, 3, 4), -2.0 )
self.assertAlmostEqualRatio( self.instrument.read_float(103, 4, 2), 3.65e30 ) # Function code 4
def testReadFloatWrongValue(self):
self.assertRaises(ValueError, self.instrument.read_float, -1) # Wrong register address
self.assertRaises(ValueError, self.instrument.read_float, -1, 3)
self.assertRaises(ValueError, self.instrument.read_float, -1, 3, 2)
self.assertRaises(ValueError, self.instrument.read_float, 65536)
self.assertRaises(ValueError, self.instrument.read_float, 103, 1) # Wrong function code
self.assertRaises(ValueError, self.instrument.read_float, 103, -1)
self.assertRaises(ValueError, self.instrument.read_float, 103, 256)
for value in [-1, 0, 1, 3, 5, 6, 7, 8, 16]:
self.assertRaises(ValueError, self.instrument.read_float, 103, 3, value) # Wrong number of registers
def testReadFloatWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.read_float, value, 3, 2)
self.assertRaises(TypeError, self.instrument.read_float, 103, value, 2)
self.assertRaises(TypeError, self.instrument.read_float, 103, 3, value)
## Write Float ##
def testWriteFloat(self):
self.instrument.write_float(103, 1.1)
self.instrument.write_float(103, 1.1, 4)
def testWriteFloatWrongValue(self):
self.assertRaises(ValueError, self.instrument.write_float, -1, 1.1) # Wrong register address
self.assertRaises(ValueError, self.instrument.write_float, 65536, 1.1)
for value in [-1, 0, 1, 3, 5, 6, 7, 8, 16]:
self.assertRaises(ValueError, self.instrument.write_float, 103, 1.1, value) # Wrong number of registers
def testWriteFloatWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.write_float, value, 1.1)
self.assertRaises(TypeError, self.instrument.write_float, 103, 1.1, value)
for value in _NOT_NUMERICALS:
self.assertRaises(TypeError, self.instrument.write_float, 103, value)
## Read String ##
def testReadString(self):
self.assertEqual( self.instrument.read_string(104, 1), 'AB')
self.assertEqual( self.instrument.read_string(104, 4), 'ABCDEFGH')
self.assertEqual( self.instrument.read_string(104, 4, 3), 'ABCDEFGH')
def testReadStringWrongValue(self):
self.assertRaises(ValueError, self.instrument.read_string, -1) # Wrong register address
self.assertRaises(ValueError, self.instrument.read_string, 65536)
self.assertRaises(ValueError, self.instrument.read_string, 104, -1) # Wrong number of registers
self.assertRaises(ValueError, self.instrument.read_string, 104, 256)
self.assertRaises(ValueError, self.instrument.read_string, 104, 4, 1) # Wrong function code
self.assertRaises(ValueError, self.instrument.read_string, 104, 4, -1)
self.assertRaises(ValueError, self.instrument.read_string, 104, 4, 256)
def testReadStringWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.read_string, value, 1)
self.assertRaises(TypeError, self.instrument.read_string, value, 4)
self.assertRaises(TypeError, self.instrument.read_string, 104, value)
self.assertRaises(TypeError, self.instrument.read_string, 104, 4, value)
## Write String ##
def testWriteString(self):
self.instrument.write_string(104, 'A', 1)
self.instrument.write_string(104, 'A', 4)
self.instrument.write_string(104, 'ABCDEFGH', 4)
def testWriteStringWrongValue(self):
self.assertRaises(ValueError, self.instrument.write_string, -1, 'A') # Wrong register address
self.assertRaises(ValueError, self.instrument.write_string, 65536, 'A')
self.assertRaises(ValueError, self.instrument.write_string, 104, 'AAA', 1) # Too long string
self.assertRaises(ValueError, self.instrument.write_string, 104, 'ABCDEFGHI', 4)
self.assertRaises(ValueError, self.instrument.write_string, 104, 'A', -1) # Wrong number of registers
self.assertRaises(ValueError, self.instrument.write_string, 104, 'A', 256)
def testWriteStringWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.write_string, value, 'A')
self.assertRaises(TypeError, self.instrument.write_string, 104, 'A', value)
for value in _NOT_STRINGS:
self.assertRaises(TypeError, self.instrument.write_string, 104, value, 4)
## Read Registers ##
def testReadRegisters(self):
self.assertEqual( self.instrument.read_registers(105, 1), [16] )
self.assertEqual( self.instrument.read_registers(105, 3), [16, 32, 64] )
def testReadRegistersWrongValue(self):
self.assertRaises(ValueError, self.instrument.read_registers, -1, 1) # Wrong register address
self.assertRaises(ValueError, self.instrument.read_registers, 65536, 1)
self.assertRaises(ValueError, self.instrument.read_registers, 105, -1) # Wrong number of registers
self.assertRaises(ValueError, self.instrument.read_registers, 105, 256)
self.assertRaises(ValueError, self.instrument.read_registers, 105, 1, 1) # Wrong function code
self.assertRaises(ValueError, self.instrument.read_registers, 105, 1, 256)
self.assertRaises(ValueError, self.instrument.read_registers, 105, 1, -1)
def testReadRegistersWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.read_registers, value, 1)
self.assertRaises(TypeError, self.instrument.read_registers, 105, value)
self.assertRaises(TypeError, self.instrument.read_registers, 105, 1, value)
## Write Registers ##
def testWriteRegisters(self):
self.instrument.write_registers(105, [2])
self.instrument.write_registers(105, [2, 4, 8])
def testWriteRegistersWrongValue(self):
self.assertRaises(ValueError, self.instrument.write_registers, -1, [2]) # Wrong register address
self.assertRaises(ValueError, self.instrument.write_registers, 65536, [2])
self.assertRaises(ValueError, self.instrument.write_registers, 105, []) # Wrong list value
self.assertRaises(ValueError, self.instrument.write_registers, 105, [-1])
def testWriteRegistersWrongType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument.write_registers, value, [2])
for value in _NOT_INTLISTS:
self.assertRaises(TypeError, self.instrument.write_registers, 105, value)
## Generic command ##
def testGenericCommand(self):
# write_bit(71, 1)
self.instrument._genericCommand(5, 71, value=1)
# write_register(35, 20)
self.instrument._genericCommand(16, 35, value=20)
# write_register(45, 88)
self.instrument._genericCommand(6, 45, value=88)
# write_long(102, 5)
self.instrument._genericCommand(16, 102, value=5, numberOfRegisters=2, payloadformat='long')
# write_float(103, 1.1)
self.instrument._genericCommand(16, 103, value=1.1, numberOfRegisters=2, payloadformat='float')
# write_string(104, 'A', 1)
self.instrument._genericCommand(16, 104, value='A', numberOfRegisters=1, payloadformat='string')
# write_registers(105, [2, 4, 8])
self.instrument._genericCommand(16, 105, value=[2, 4, 8], numberOfRegisters=3, payloadformat='registers')
# read_register(289)
self.assertEqual( self.instrument._genericCommand(3, 289), 770)
# read_bit(61)
self.assertEqual( self.instrument._genericCommand(2, 61), 1)
# read_register(101, signed = True)
self.assertEqual( self.instrument._genericCommand(3, 101, signed=True), -5)
# read_register(289, 1)
self.assertAlmostEqual( self.instrument._genericCommand(3, 289, numberOfDecimals=1), 77.0)
# read_long(102)
self.assertEqual( self.instrument._genericCommand(3, 102, numberOfRegisters=2, payloadformat='long'),
4294967295)
# read_float(103)
self.assertAlmostEqual( self.instrument._genericCommand(3, 103, numberOfRegisters=2, payloadformat='float'),
1.0)
# read_string(104, 1)
self.assertEqual( self.instrument._genericCommand(3, 104, numberOfRegisters=1, payloadformat='string'),
'AB')
# read_registers(105, 3)
self.assertEqual( self.instrument._genericCommand(3, 105, numberOfRegisters=3, payloadformat='registers'),
[16, 32, 64])
def testGenericCommandWrongValue(self):
self.assertRaises(ValueError, self.instrument._genericCommand, 35, 289) # Wrong functioncode
self.assertRaises(ValueError, self.instrument._genericCommand, -1, 289)
self.assertRaises(ValueError, self.instrument._genericCommand, 128, 289)
self.assertRaises(ValueError, self.instrument._genericCommand, 3, -1) # Wrong registeraddress
self.assertRaises(ValueError, self.instrument._genericCommand, 3, 65536)
self.assertRaises(ValueError, self.instrument._genericCommand, 3, 289, numberOfDecimals=-1)
self.assertRaises(ValueError, self.instrument._genericCommand, 3, 289, numberOfRegisters=-1)
self.assertRaises(ValueError, self.instrument._genericCommand, 3, 289, payloadformat='ABC')
def testGenericCommandWrongValueCombinations(self):
# Bit
self.assertRaises(ValueError, self.instrument._genericCommand, 5, 71, value=1, numberOfRegisters=2)
# Register
self.assertRaises(TypeError, self.instrument._genericCommand, 6, 45, value='a')
self.assertRaises(ValueError, self.instrument._genericCommand, 6, 45, value=88, numberOfRegisters=2)
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 35, value=20, numberOfRegisters=2)
# Float
self.assertRaises(TypeError, self.instrument._genericCommand, 16, 105, value=[2, 4, 8], numberOfRegisters=2, payloadformat='float')
self.assertRaises(TypeError, self.instrument._genericCommand, 16, 105, value='ABC', numberOfRegisters=2, payloadformat='float')
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 105, value=None, numberOfRegisters=2, payloadformat='float')
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 105, value=3.3, numberOfRegisters=2, payloadformat='float', numberOfDecimals=1)
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 105, value=3.3, numberOfRegisters=2, payloadformat='float', signed=True)
# String
self.assertRaises(ValueError, self.instrument._genericCommand, 1, 104, value='A', numberOfRegisters=1, payloadformat='string')
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 104, value='ABC', numberOfRegisters=1, payloadformat='string')
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 104, value=None, numberOfRegisters=1, payloadformat='string')
self.assertRaises(TypeError, self.instrument._genericCommand, 16, 104, value=22, numberOfRegisters=1, payloadformat='string')
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 104, value='A', numberOfRegisters=1, payloadformat='string', signed=True)
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 104, value='A', numberOfRegisters=1, payloadformat='string', numberOfDecimals=1)
# Registers
self.assertRaises(TypeError, self.instrument._genericCommand, 16, 105, value=1, numberOfRegisters=1, payloadformat='registers')
self.assertRaises(TypeError, self.instrument._genericCommand, 16, 105, value='A', numberOfRegisters=1, payloadformat='registers')
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 105, value=[2, 4, 8], numberOfRegisters=1, payloadformat='registers')
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 105, value=None, numberOfRegisters=3, payloadformat='registers')
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 105, value=[2, 4, 8], numberOfRegisters=3, payloadformat='registers', signed=True)
self.assertRaises(ValueError, self.instrument._genericCommand, 16, 105, value=[2, 4, 8], numberOfRegisters=3, payloadformat='registers', numberOfDecimals=1)
def testGenericCommandWrongType(self):
# Note: The parameter 'value' type is dependent on the other parameters. See tests above.
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument._genericCommand, value, 289) # Function code
self.assertRaises(TypeError, self.instrument._genericCommand, 3, value) # Register address
self.assertRaises(TypeError, self.instrument._genericCommand, 3, 289, numberOfDecimals=value)
self.assertRaises(TypeError, self.instrument._genericCommand, 3, 289, numberOfRegisters=value)
for value in _NOT_BOOLEANS:
self.assertRaises(TypeError, self.instrument._genericCommand, 3, 289, signed=value)
for value in _NOT_STRINGS_OR_NONE:
self.assertRaises(ValueError, self.instrument._genericCommand, 3, 289, payloadformat=value)
## Perform command ##
def testPerformcommandKnownResponse(self):
self.assertEqual( self.instrument._performCommand(16, 'TESTCOMMAND'), 'TRsp') # Total response length should be 8 bytes
self.assertEqual( self.instrument._performCommand(75, 'TESTCOMMAND2'), 'TESTCOMMANDRESPONSE2')
self.assertEqual( self.instrument._performCommand(2, '\x00\x3d\x00\x01'), '\x01\x01' ) # Read bit register 61 on slave 1 using function code 2.
def testPerformcommandWrongSlaveResponse(self):
self.assertRaises(ValueError, self.instrument._performCommand, 1, 'TESTCOMMAND') # Wrong slave address in response
self.assertRaises(ValueError, self.instrument._performCommand, 2, 'TESTCOMMAND') # Wrong function code in response
self.assertRaises(ValueError, self.instrument._performCommand, 3, 'TESTCOMMAND') # Wrong crc in response
self.assertRaises(ValueError, self.instrument._performCommand, 4, 'TESTCOMMAND') # Too short response message from slave
self.assertRaises(ValueError, self.instrument._performCommand, 5, 'TESTCOMMAND') # Error indication from slave
def testPerformcommandWrongInputValue(self):
self.assertRaises(ValueError, self.instrument._performCommand, -1, 'TESTCOMMAND') # Wrong function code
self.assertRaises(ValueError, self.instrument._performCommand, 128, 'TESTCOMMAND')
def testPerformcommandWrongInputType(self):
for value in _NOT_INTERGERS:
self.assertRaises(TypeError, self.instrument._performCommand, value, 'TESTCOMMAND')
for value in _NOT_STRINGS:
self.assertRaises(TypeError, self.instrument._performCommand, 16, value)
## Communicate ##
def testCommunicateKnownResponse(self):
self.assertEqual( self.instrument._communicate('TESTMESSAGE', _LARGE_NUMBER_OF_BYTES), 'TESTRESPONSE' )
def testCommunicateWrongType(self):
for value in _NOT_STRINGS:
self.assertRaises(TypeError, self.instrument._communicate, value, _LARGE_NUMBER_OF_BYTES)
def testCommunicateNoMessage(self):
self.assertRaises(ValueError, self.instrument._communicate, '', _LARGE_NUMBER_OF_BYTES)
def testCommunicateNoResponse(self):
self.assertRaises(IOError, self.instrument._communicate, 'MessageForEmptyResponse', _LARGE_NUMBER_OF_BYTES)
def testCommunicateLocalEcho(self):
self.instrument.handle_local_echo = True
self.assertEqual( self.instrument._communicate('TESTMESSAGE2', _LARGE_NUMBER_OF_BYTES), 'TESTRESPONSE2' )
def testCommunicateWrongLocalEcho(self):
self.instrument.handle_local_echo = True
self.assertRaises(IOError, self.instrument._communicate, 'TESTMESSAGE3', _LARGE_NUMBER_OF_BYTES)
## __repr__ ##
def testRepresentation(self):
representation = repr(self.instrument)
self.assertTrue( 'minimalmodbus.Instrument<id=' in representation )
self.assertTrue( ', address=1, mode=rtu, close_port_after_each_call=False, precalculate_read_size=True, debug=False, serial=dummy_serial.Serial<id=' in representation )
self.assertTrue( ", open=True>(port=" in representation )
## Test the dummy serial port itself ##
def testReadPortClosed(self):
self.instrument.serial.close()
self.assertRaises(IOError, self.instrument.serial.read, 1000)
def testWritePortClosed(self):
self.instrument.serial.close()
self.assertRaises(IOError, self.instrument.write_bit, 71, 1)
def testPortAlreadyOpen(self):
self.assertRaises(IOError, self.instrument.serial.open)
def testPortAlreadyClosed(self):
self.instrument.serial.close()
self.assertRaises(IOError, self.instrument.serial.close)
## Tear down test fixture ##
def tearDown(self):
self.instrument = None
del(self.instrument)
class TestDummyCommunicationOmegaSlave1(ExtendedTestCase):
def setUp(self):
dummy_serial.VERBOSE = False
dummy_serial.RESPONSES = RTU_RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInResponseDictionary'
dummy_serial.DEFAULT_TIMEOUT = 0.01
minimalmodbus.serial.Serial = dummy_serial.Serial
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = False
self.instrument = minimalmodbus.Instrument('DUMMYPORTNAME', 1) # port name, slave address (in decimal)
def testReadBit(self):
self.assertEqual( self.instrument.read_bit(2068), 1 )
def testWriteBit(self):
self.instrument.write_bit(2068, 0)
self.instrument.write_bit(2068, 1)
def testReadRegister(self):
self.assertAlmostEqual( self.instrument.read_register(4097, 1), 823.6 )
def testWriteRegister(self):
self.instrument.write_register(4097, 700.0, 1)
self.instrument.write_register(4097, 823.6, 1)
def tearDown(self):
self.instrument = None
del(self.instrument)
class TestDummyCommunicationOmegaSlave10(ExtendedTestCase):
def setUp(self):
dummy_serial.VERBOSE = False
dummy_serial.RESPONSES = RTU_RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInResponseDictionary'
dummy_serial.DEFAULT_TIMEOUT = 0.01
minimalmodbus.serial.Serial = dummy_serial.Serial
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = False
self.instrument = minimalmodbus.Instrument('DUMMYPORTNAME', 10) # port name, slave address (in decimal)
def testReadBit(self):
self.assertEqual( self.instrument.read_bit(2068), 1 )
def testWriteBit(self):
self.instrument.write_bit(2068, 0)
self.instrument.write_bit(2068, 1)
def testReadRegister(self):
self.assertAlmostEqual( self.instrument.read_register(4096, 1), 25.0 )
self.assertAlmostEqual( self.instrument.read_register(4097, 1), 325.8 )
def testWriteRegister(self):
self.instrument.write_register(4097, 325.8, 1)
self.instrument.write_register(4097, 20.0, 1)
self.instrument.write_register(4097, 200.0, 1)
def tearDown(self):
self.instrument = None
del(self.instrument)
class TestDummyCommunicationDTB4824_RTU(ExtendedTestCase):
def setUp(self):
dummy_serial.VERBOSE = False
dummy_serial.RESPONSES = RTU_RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInResponseDictionary'
dummy_serial.DEFAULT_TIMEOUT = 0.01
minimalmodbus.serial.Serial = dummy_serial.Serial
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = False
self.instrument = minimalmodbus.Instrument('DUMMYPORTNAME', 7) # port name, slave address (in decimal)
def testReadBit(self):
self.assertEqual( self.instrument.read_bit(0x0800), 0) # LED AT
self.assertEqual( self.instrument.read_bit(0x0801), 0) # LED Out1
self.assertEqual( self.instrument.read_bit(0x0802), 0) # LED Out2
self.assertEqual( self.instrument.read_bit(0x0814), 0) # RUN/STOP
def testWriteBit(self):
self.instrument.write_bit(0x0810, 1) # "Communication write in enabled".
self.instrument.write_bit(0x0814, 0) # STOP
self.instrument.write_bit(0x0814, 1) # RUN
def testReadBits(self):
self.assertEqual( self.instrument._performCommand(2, '\x08\x10\x00\x09'), '\x02\x07\x00')
def testReadRegister(self):
self.assertEqual( self.instrument.read_register(0x1000), 64990) # Process value (PV)
self.assertAlmostEqual( self.instrument.read_register(0x1001, 1), 80.0 ) # Setpoint (SV).
self.assertEqual( self.instrument.read_register(0x1004), 14) # Sensor type.
self.assertEqual( self.instrument.read_register(0x1005), 1) # Control method
self.assertEqual( self.instrument.read_register(0x1006), 0) # Heating/cooling selection.
self.assertAlmostEqual( self.instrument.read_register(0x1012, 1), 0.0 ) # Output 1
self.assertAlmostEqual( self.instrument.read_register(0x1013, 1), 0.0 ) # Output 2
self.assertEqual( self.instrument.read_register(0x1023), 0) # System alarm setting
self.assertEqual( self.instrument.read_register(0x102A), 0) # LED status
self.assertEqual( self.instrument.read_register(0x102B), 15) # Pushbutton status
self.assertEqual( self.instrument.read_register(0x102F), 400) # Firmware version
def testReadRegisters(self):
self.assertEqual( self.instrument.read_registers(0x1000, 2), [64990, 350]) # Process value (PV) and setpoint (SV).
def testWriteRegister(self):
self.instrument.write_register(0x1001, 0x0320, functioncode=6) # Setpoint of 80.0 degrees
self.instrument.write_register(0x1001, 25, 1, functioncode=6) # Setpoint
def tearDown(self):
self.instrument = None
del(self.instrument)
class TestDummyCommunicationDTB4824_ASCII(ExtendedTestCase):
def setUp(self):
dummy_serial.VERBOSE = False
dummy_serial.RESPONSES = ASCII_RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInResponseDictionary'
dummy_serial.DEFAULT_TIMEOUT = 0.01
minimalmodbus.serial.Serial = dummy_serial.Serial
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = False
self.instrument = minimalmodbus.Instrument('DUMMYPORTNAME', 7, 'ascii') # port name, slave address (in decimal), mode
def testReadBit(self):
self.assertEqual( self.instrument.read_bit(0x0800), 0) # LED AT
self.assertEqual( self.instrument.read_bit(0x0801), 1) # LED Out1
self.assertEqual( self.instrument.read_bit(0x0802), 0) # LED Out2
self.assertEqual( self.instrument.read_bit(0x0814), 1) # RUN/STOP
def testWriteBit(self):
self.instrument.write_bit(0x0810, 1) # "Communication write in enabled".
self.instrument.write_bit(0x0814, 0) # STOP
self.instrument.write_bit(0x0814, 1) # RUN
def testReadBits(self):
self.assertEqual( self.instrument._performCommand(2, '\x08\x10\x00\x09'), '\x02\x17\x00')
def testReadRegister(self):
self.assertEqual( self.instrument.read_register(0x1000), 64990) # Process value (PV)
self.assertAlmostEqual( self.instrument.read_register(0x1001, 1), 80.0 ) # Setpoint (SV).
self.assertEqual( self.instrument.read_register(0x1004), 14) # Sensor type.
self.assertEqual( self.instrument.read_register(0x1005), 1) # Control method
self.assertEqual( self.instrument.read_register(0x1006), 0) # Heating/cooling selection.
self.assertAlmostEqual( self.instrument.read_register(0x1012, 1), 100.0 ) # Output 1
self.assertAlmostEqual( self.instrument.read_register(0x1013, 1), 0.0 ) # Output 2
self.assertEqual( self.instrument.read_register(0x1023), 0) # System alarm setting
self.assertEqual( self.instrument.read_register(0x102A), 64) # LED status
self.assertEqual( self.instrument.read_register(0x102B), 15) # Pushbutton status
self.assertEqual( self.instrument.read_register(0x102F), 400) # Firmware version
def testReadRegisters(self):
self.assertEqual( self.instrument.read_registers(0x1000, 2), [64990, 350]) # Process value (PV) and setpoint (SV).
def testWriteRegister(self):
self.instrument.write_register(0x1001, 0x0320, functioncode=6) # Setpoint of 80.0 degrees
self.instrument.write_register(0x1001, 25, 1, functioncode=6) # Setpoint
def tearDown(self):
self.instrument = None
del(self.instrument)
class TestDummyCommunicationWithPortClosure(ExtendedTestCase):
def setUp(self):
dummy_serial.VERBOSE = False
dummy_serial.RESPONSES = RTU_RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInResponseDictionary'
dummy_serial.DEFAULT_TIMEOUT = 0.01
minimalmodbus.serial.Serial = dummy_serial.Serial
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True # Mimic a WindowsXP serial port
self.instrument = minimalmodbus.Instrument('DUMMYPORTNAME', 1) # port name, slave address (in decimal)
def testReadRegisterSeveralTimes(self):
self.assertEqual( self.instrument.read_register(289), 770 )
self.assertEqual( self.instrument.read_register(289), 770 )
self.assertEqual( self.instrument.read_register(289), 770 )
def testPortAlreadyOpen(self):
self.assertEqual( self.instrument.read_register(289), 770 )
self.instrument.serial.open()
self.assertRaises(IOError, self.instrument.read_register, 289)
def testPortAlreadyClosed(self):
self.assertEqual( self.instrument.read_register(289), 770 )
self.assertRaises(IOError, self.instrument.serial.close)
def tearDown(self):
try:
self.instrument.serial.close()
except:
pass
self.instrument = None
del(self.instrument)
class TestVerboseDummyCommunicationWithPortClosure(ExtendedTestCase):
def setUp(self):
dummy_serial.VERBOSE = True
dummy_serial.RESPONSES = RTU_RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInResponseDictionary'
dummy_serial.DEFAULT_TIMEOUT = 0.01
minimalmodbus.serial.Serial = dummy_serial.Serial
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True # Mimic a WindowsXP serial port
self.instrument = minimalmodbus.Instrument('DUMMYPORTNAME', 1) # port name, slave address (in decimal)
def testReadRegister(self):
self.assertEqual( self.instrument.read_register(289), 770 )
def tearDown(self):
try:
self.instrument.serial.close()
except:
pass
self.instrument = None
del(self.instrument)
class TestDummyCommunicationDebugmode(ExtendedTestCase):
def setUp(self):
dummy_serial.VERBOSE = False
dummy_serial.RESPONSES = RTU_RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInResponseDictionary'
dummy_serial.DEFAULT_TIMEOUT = 0.01
minimalmodbus.serial.Serial = dummy_serial.Serial
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = False
self.instrument = minimalmodbus.Instrument('DUMMYPORTNAME', 1) # port name, slave address (in decimal)
self.instrument.debug = True
def testReadRegister(self):
self.assertEqual( self.instrument.read_register(289), 770 )
def tearDown(self):
self.instrument = None
del(self.instrument)
class TestDummyCommunicationHandleLocalEcho(ExtendedTestCase):
def setUp(self):
dummy_serial.VERBOSE = True
dummy_serial.RESPONSES = RTU_RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInResponseDictionary'
dummy_serial.DEFAULT_TIMEOUT = 0.01
minimalmodbus.serial.Serial = dummy_serial.Serial
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = False
self.instrument = minimalmodbus.Instrument('DUMMYPORTNAME', 20) # port name, slave address (in decimal)
self.instrument.debug = True
self.instrument.handle_local_echo = True
def testReadRegister(self):
self.assertEqual( self.instrument.read_register(289), 770 )
def testReadRegisterWrongEcho(self):
self.assertRaises(IOError, self.instrument.read_register, 290)
def tearDown(self):
self.instrument = None
del(self.instrument)
RTU_RESPONSES = {}
GOOD_RTU_RESPONSES = {}
WRONG_RTU_RESPONSES = {}
ASCII_RESPONSES = {}
GOOD_ASCII_RESPONSES = {}
WRONG_ASCII_RESPONSES = {}
"""A dictionary of respones from a dummy instrument.
The key is the message (string) sent to the serial port, and the item is the response (string)
from the dummy serial port.
"""
# Note that the string 'AAAAAAA' might be easier to read if grouped,
# like 'AA' + 'AAAA' + 'A' for the initial part (address etc) + payload + CRC.
# ## READ BIT ##
# Read bit register 61 on slave 1 using function code 2. Also for testing _performCommand() #
# ----------------------------------------------------------------------------------------- #
# Message: Slave address 1, function code 2. Register address 61, 1 coil. CRC.
# Response: Slave address 1, function code 2. 1 byte, value=1. CRC.
GOOD_RTU_RESPONSES['\x01\x02' + '\x00\x3d\x00\x01' + '(\x06'] = '\x01\x02' + '\x01\x01' + '`H'
# Read bit register 62 on slave 1 using function code 1 #
# ----------------------------------------------------- #
# Message: Slave address 1, function code 1. Register address 62, 1 coil. CRC.
# Response: Slave address 1, function code 1. 1 byte, value=0. CRC.
GOOD_RTU_RESPONSES['\x01\x01' + '\x00\x3e\x00\x01' + '\x9c\x06'] = '\x01\x01' + '\x01\x00' + 'Q\x88'
# Read bit register 63 on slave 1 using function code 2, slave gives wrong byte count #
# ----------------------------------------------------------------------------------- #
# Message: Slave address 1, function code 2. Register address 63, 1 coil. CRC.
# Response: Slave address 1, function code 2. 2 bytes (wrong), value=1. CRC.
WRONG_RTU_RESPONSES['\x01\x02' + '\x00\x3f\x00\x01' + '\x89\xc6'] = '\x01\x02' + '\x02\x01' + '`\xb8'
# Read bit register 64 on slave 1 using function code 2, slave gives no response #
# ------------------------------------------------------------------------------ #
# Message: Slave address 1, function code 2. Register address 64, 1 coil. CRC.
# Response: (empty string)
WRONG_RTU_RESPONSES['\x01\x02' + '\x00\x40\x00\x01' + '\xb8\x1e'] = ''
# ## WRITE BIT ##
# Write bit register 71 on slave 1 using function code 5 #
# ------------------------------------------------------ #
# Message: Slave address 1, function code 5. Register address 71, value 1 (FF00). CRC.
# Response: Slave address 1, function code 5. Register address 71, value 1 (FF00). CRC.
GOOD_RTU_RESPONSES['\x01\x05' + '\x00\x47\xff\x00' + '</'] = '\x01\x05' + '\x00\x47\xff\x00' + '</'
# Write bit register 72 on slave 1 using function code 15 #
# ------------------------------------------------------ #
# Message: Slave address 1, function code 15. Register address 72, 1 bit, 1 byte, value 1 (0100). CRC.
# Response: Slave address 1, function code 15. Register address 72, 1 bit. CRC.
GOOD_RTU_RESPONSES['\x01\x0f' + '\x00\x48\x00\x01\x01\x01' + '\x0fY'] = '\x01\x0f' + '\x00\x48\x00\x01' + '\x14\x1d'
# Write bit register 73 on slave 1 using function code 15, slave gives wrong number of registers #
# ---------------------------------------------------------------------------------------------- #
# Message: Slave address 1, function code 15. Register address 73, 1 bit, 1 byte, value 1 (0100). CRC.
# Response: Slave address 1, function code 15. Register address 73, 2 bits (wrong). CRC.
WRONG_RTU_RESPONSES['\x01\x0f' + '\x00\x49\x00\x01\x01\x01' + '2\x99'] = '\x01\x0f' + '\x00\x49\x00\x02' + '\x05\xdc'
# Write bit register 74 on slave 1 using function code 5, slave gives wrong write data #
# ------------------------------------------------------------------------------------ #
# Message: Slave address 1, function code 5. Register address 74, value 1 (FF00). CRC.
# Response: Slave address 1, function code 5. Register address 74, value 0 (0000, wrong). CRC.
WRONG_RTU_RESPONSES['\x01\x05' + '\x00\x4a\xff\x00' + '\xad\xec'] = '\x01\x05' + '\x00\x47\x00\x00' + '}\xdf'
# ## READ REGISTER ##
# Read register 289 on slave 1 using function code 3 #
# ---------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 289, 1 register. CRC.
# Response: Slave address 1, function code 3. 2 bytes, value=770. CRC=14709.
GOOD_RTU_RESPONSES['\x01\x03' + '\x01!\x00\x01' + '\xd5\xfc'] = '\x01\x03' + '\x02\x03\x02' + '\x39\x75'
# Read register 5 on slave 1 using function code 3 #
# ---------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 289, 1 register. CRC.
# Response: Slave address 1, function code 3. 2 bytes, value=184. CRC
GOOD_RTU_RESPONSES['\x01\x03' + '\x00\x05\x00\x01' + '\x94\x0b'] = '\x01\x03' + '\x02\x00\xb8' + '\xb86'
# Read register 14 on slave 1 using function code 4 #
# --------------------------------------------------#
# Message: Slave address 1, function code 4. Register address 14, 1 register. CRC.
# Response: Slave address 1, function code 4. 2 bytes, value=880. CRC.
GOOD_RTU_RESPONSES['\x01\x04' + '\x00\x0e\x00\x01' + 'P\t'] = '\x01\x04' + '\x02\x03\x70' + '\xb8$'
# Read register 101 on slave 1 using function code 3 #
# ---------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 101, 1 register. CRC.
# Response: Slave address 1, function code 3. 2 bytes, value=-5 or 65531 (depending on interpretation). CRC
GOOD_RTU_RESPONSES['\x01\x03' + '\x00e\x00\x01' + '\x94\x15'] = '\x01\x03' + '\x02\xff\xfb' + '\xb87'
# Read register 201 on slave 1 using function code 3 #
# ---------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 201, 1 register. CRC.
# Response: Slave address 1, function code 3. 2 bytes, value=9. CRC
GOOD_RTU_RESPONSES['\x01\x03' + '\x00\xc9\x00\x01' + 'T4'] = '\x01\x03' + '\x02\x00\x09' + 'xB'
# Read register 202 on slave 1 using function code 3. Too long response #
# ----------------------------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 202, 1 register. CRC.
# Response: Slave address 1, function code 3. 2 bytes (wrong!), value=9. CRC
WRONG_RTU_RESPONSES['\x01\x03' + '\x00\xca\x00\x01' + '\xa44'] = '\x01\x03' + '\x02\x00\x00\x09' + '\x84t'
# Read register 203 on slave 1 using function code 3. Too short response #
# ----------------------------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 203, 1 register. CRC.
# Response: Slave address 1, function code 3. 2 bytes (wrong!), value=9. CRC
WRONG_RTU_RESPONSES['\x01\x03' + '\x00\xcb\x00\x01' + '\xf5\xf4'] = '\x01\x03' + '\x02\x09' + '0\xbe'
# ## WRITE REGISTER ##
# Write value 50 in register 24 on slave 1 using function code 16 #
# ----------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 24, 1 register, 2 bytes, value=50. CRC.
# Response: Slave address 1, function code 16. Register address 24, 1 register. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00\x18\x00\x01\x02\x002' + '$]'] = '\x01\x10' + '\x00\x18\x00\x01' + '\x81\xce'
# Write value 20 in register 35 on slave 1 using function code 16 #
# ----------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 35, 1 register, 2 bytes, value=20. CRC.
# Response: Slave address 1, function code 16. Register address 35, 1 register. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00#\x00\x01' + '\x02\x00\x14' + '\xa1\x0c'] = '\x01\x10' + '\x00#\x00\x01' + '\xf0\x03'
# Write value 88 in register 45 on slave 1 using function code 6 #
# ---------------------------------------------------------------#
# Message: Slave address 1, function code 6. Register address 45, value=88. CRC.
# Response: Slave address 1, function code 6. Register address 45, value=88. CRC.
GOOD_RTU_RESPONSES['\x01\x06' + '\x00\x2d\x00\x58' + '\x189'] = '\x01\x06' + '\x00\x2d\x00\x58' + '\x189'
# Write value 5 in register 101 on slave 1 using function code 16 #
# ----------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 101, 1 register, 2 bytes, value=5. CRC.
# Response: Slave address 1, function code 16. Register address 101, 1 register. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00e\x00\x01\x02\x00\x05' + 'o\xa6'] = '\x01\x10' + '\x00e\x00\x01' + '\x11\xd6'
# Write value 50 in register 101 on slave 1 using function code 16 #
# ----------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 101, 1 register, 2 bytes, value=5. CRC.
# Response: Slave address 1, function code 16. Register address 101, 1 register. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00e\x00\x01\x02\x002' + '.p'] = '\x01\x10' + '\x00e\x00\x01' + '\x11\xd6'
# Write value -5 in register 101 on slave 1 using function code 16 #
# ----------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 101, 1 register, 2 bytes, value=-5. CRC.
# Response: Slave address 1, function code 16. Register address 101, 1 register. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00e\x00\x01\x02\xff\xfb' + '\xaf\xd6'] = '\x01\x10' + '\x00e\x00\x01' + '\x11\xd6'
# Write value -50 in register 101 on slave 1 using function code 16 #
# ----------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 101, 1 register, 2 bytes, value=-50. CRC.
# Response: Slave address 1, function code 16. Register address 101, 1 register. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00e\x00\x01\x02\xff\xce' + 'o\xc1'] = '\x01\x10' + '\x00e\x00\x01' + '\x11\xd6'
# Write value 99 in register 51 on slave 1 using function code 16, slave gives wrong CRC #
# ---------------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 51, 1 register, 2 bytes, value=99. CRC.
# Response: Slave address 1, function code 16. Register address 51, 1 register. Wrong CRC.
WRONG_RTU_RESPONSES['\x01\x10' + '\x00\x33\x00\x01' + '\x02\x00\x63' + '\xe3\xba'] = '\x01\x10' + '\x00\x33\x00\x01' + 'AB'
# Write value 99 in register 52 on slave 1 using function code 16, slave gives wrong number of registers #
# -------------------------------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 52, 1 register, 2 bytes, value=99. CRC.
# Response: Slave address 1, function code 16. Register address 52, 2 registers (wrong). CRC.
WRONG_RTU_RESPONSES['\x01\x10' + '\x00\x34\x00\x01' + '\x02\x00\x63' + '\xe2\r'] = '\x01\x10' + '\x00\x34\x00\x02' + '\x00\x06'
# Write value 99 in register 53 on slave 1 using function code 16, slave gives wrong register address #
# ----------------------------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 53, 1 register, 2 bytes, value=99. CRC.
# Response: Slave address 1, function code 16. Register address 54 (wrong), 1 register. CRC.
WRONG_RTU_RESPONSES['\x01\x10' + '\x00\x35\x00\x01' + '\x02\x00\x63' + '\xe3\xdc'] = '\x01\x10' + '\x00\x36\x00\x01' + '\xe1\xc7'
# Write value 99 in register 54 on slave 1 using function code 16, slave gives wrong slave address #
# ------------------------------------------------------------------------------------------------ #
# Message: Slave address 1, function code 16. Register address 54, 1 register, 2 bytes, value=99. CRC.
# Response: Slave address 2 (wrong), function code 16. Register address 54, 1 register. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00\x36\x00\x01' + '\x02\x00\x63' + '\xe3\xef'] = '\x02\x10' + '\x00\x36\x00\x01' + '\xe1\xf4'
# Write value 99 in register 55 on slave 1 using function code 16, slave gives wrong functioncode #
# ----------------------------------------------------------------------------------------------- #
# Message: Slave address 1, function code 16. Register address 55, 1 register, 2 bytes, value=99. CRC.
# Response: Slave address 1, function code 6 (wrong). Register address 55, 1 register. CRC.
WRONG_RTU_RESPONSES['\x01\x10' + '\x00\x37\x00\x01' + '\x02\x00\x63' + '\xe2>'] = '\x01\x06' + '\x00\x37\x00\x01' + '\xf9\xc4'
# Write value 99 in register 56 on slave 1 using function code 16, slave gives wrong functioncode (indicates an error) #
# -------------------------------------------------------------------------------------------------------------------- #
# Message: Slave address 1, function code 16. Register address 56, 1 register, 2 bytes, value=99. CRC.
# Response: Slave address 1, function code 144 (wrong). Register address 56, 1 register. CRC.
WRONG_RTU_RESPONSES['\x01\x10' + '\x00\x38\x00\x01' + '\x02\x00\x63' + '\xe2\xc1'] = '\x01\x90' + '\x00\x38\x00\x01' + '\x81\xda'
# Write value 99 in register 55 on slave 1 using function code 6, slave gives wrong write data #
# -------------------------------------------------------------------------------------------- #
# Message: Slave address 1, function code 6. Register address 55, value=99. CRC.
# Response: Slave address 1, function code 6. Register address 55, value=98 (wrong). CRC.
WRONG_RTU_RESPONSES['\x01\x06' + '\x00\x37\x00\x63' + 'x-'] = '\x01\x06' + '\x00\x37\x00\x62' + '\xb9\xed'
# ## READ LONG ##
# Read long (2 registers, starting at 102) on slave 1 using function code 3 #
# --------------------------------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 289, 2 registers. CRC.
# Response: Slave address 1, function code 3. 4 bytes, value=-1 or 4294967295 (depending on interpretation). CRC
GOOD_RTU_RESPONSES['\x01\x03' + '\x00f\x00\x02' + '$\x14'] = '\x01\x03' + '\x04\xff\xff\xff\xff' + '\xfb\xa7'
# ## WRITE LONG ##
# Write long (2 registers, starting at 102) on slave 1 using function code 16, with value 5. #
# -------------------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 102, 2 registers, 4 bytes, value=5. CRC.
# Response: Slave address 1, function code 16. Register address 102, 2 registers. CRC
GOOD_RTU_RESPONSES['\x01\x10' + '\x00f\x00\x02\x04\x00\x00\x00\x05' + '\xb5\xae'] = '\x01\x10' + '\x00f\x00\x02' + '\xa1\xd7'
# Write long (2 registers, starting at 102) on slave 1 using function code 16, with value -5. #
# --------------------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 102, 2 registers, 4 bytes, value=-5. CRC.
# Response: Slave address 1, function code 16. Register address 102, 2 registers. CRC
GOOD_RTU_RESPONSES['\x01\x10' + '\x00f\x00\x02\x04\xff\xff\xff\xfb' + 'u\xfa'] = '\x01\x10' + '\x00f\x00\x02' + '\xa1\xd7'
# Write long (2 registers, starting at 102) on slave 1 using function code 16, with value 3. #
# -------------------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 102, 2 registers, 4 bytes, value=3. CRC.
# Response: Slave address 1, function code 16. Register address 102, 2 registers. CRC
GOOD_RTU_RESPONSES['\x01\x10' + '\x00f\x00\x02\x04\x00\x00\x00\x03' + '5\xac'] = '\x01\x10' + '\x00f\x00\x02' + '\xa1\xd7'
# Write long (2 registers, starting at 102) on slave 1 using function code 16, with value -3. #
# --------------------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 102, 2 registers, 4 bytes, value=-3. CRC.
# Response: Slave address 1, function code 16. Register address 102, 2 registers. CRC
GOOD_RTU_RESPONSES['\x01\x10' + '\x00f\x00\x02\x04\xff\xff\xff\xfd' + '\xf5\xf8'] = '\x01\x10' + '\x00f\x00\x02' + '\xa1\xd7'
# ## READ FLOAT ##
# Read float from address 103 (2 registers) on slave 1 using function code 3 #
# ---------------------------------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 103, 2 registers. CRC.
# Response: Slave address 1, function code 3. 4 bytes, value=1.0. CRC.
GOOD_RTU_RESPONSES['\x01\x03' + '\x00g\x00\x02' + 'u\xd4'] = '\x01\x03' + '\x04\x3f\x80\x00\x00' + '\xf7\xcf'
# Read float from address 103 (2 registers) on slave 1 using function code 4 #
# ---------------------------------------------------------------------------#
# Message: Slave address 1, function code 4. Register address 103, 2 registers. CRC.
# Response: Slave address 1, function code 4. 4 bytes, value=3.65e30. CRC.
GOOD_RTU_RESPONSES['\x01\x04' + '\x00g\x00\x02' + '\xc0\x14'] = '\x01\x04' + '\x04\x72\x38\x47\x25' + '\x93\x1a'
# Read float from address 103 (4 registers) on slave 1 using function code 3 #
# ---------------------------------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 103, 4 registers. CRC.
# Response: Slave address 1, function code 3. 8 bytes, value=-2.0 CRC.
GOOD_RTU_RESPONSES['\x01\x03' + '\x00g\x00\x04' + '\xf5\xd6'] = '\x01\x03' + '\x08\xc0\x00\x00\x00\x00\x00\x00\x00' + '\x99\x87'
# ## WRITE FLOAT ##
# Write float 1.1 to address 103 (2 registers) on slave 1 using function code 16 #
# -------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 103, 2 registers, 4 bytes, value=1.1 . CRC.
# Response: Slave address 1, function code 16. Register address 103, 2 registers. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00g\x00\x02\x04?\x8c\xcc\xcd' + '\xed\x0b'] = '\x01\x10' + '\x00g\x00\x02' + '\xf0\x17'
# Write float 1.1 to address 103 (4 registers) on slave 1 using function code 16 #
# -------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 103, 4 registers, 8 bytes, value=1.1 . CRC.
# Response: Slave address 1, function code 16. Register address 103, 4 registers. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00g\x00\x04\x08?\xf1\x99\x99\x99\x99\x99\x9a' + 'u\xf7'] = '\x01\x10' + '\x00g\x00\x04' + 'p\x15'
# ## READ STRING ##
# Read string from address 104 (1 register) on slave 1 using function code 3 #
# ---------------------------------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 104, 1 register. CRC.
# Response: Slave address 1, function code 3. 2 bytes, value = 'AB'. CRC.
GOOD_RTU_RESPONSES['\x01\x03' + '\x00h\x00\x01' + '\x05\xd6'] = '\x01\x03' + '\x02AB' + '\x08%'
# Read string from address 104 (4 registers) on slave 1 using function code 3 #
# ----------------------------------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 104, 4 registers. CRC.
# Response: Slave address 1, function code 3. 8 bytes, value = 'ABCDEFGH'. CRC.
GOOD_RTU_RESPONSES['\x01\x03' + '\x00h\x00\x04' + '\xc5\xd5'] = '\x01\x03' + '\x08ABCDEFGH' + '\x0b\xcc'
# ## WRITE STRING ##
# Write string 'A' to address 104 (1 register) on slave 1 using function code 16 #
# -------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 104, 1 register, 2 bytes, value='A ' . CRC.
# Response: Slave address 1, function code 16. Register address 104, 1 register. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00h\x00\x01\x02A ' + '\x9f0'] = '\x01\x10' + '\x00h\x00\x01' + '\x80\x15'
# Write string 'A' to address 104 (4 registers) on slave 1 using function code 16 #
# --------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 104, 4 registers, 8 bytes, value='A ' . CRC.
# Response: Slave address 1, function code 16. Register address 104, 2 registers. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00h\x00\x04\x08A ' + '\xa7\xae'] = '\x01\x10' + '\x00h\x00\x04' + '@\x16'
# Write string 'ABCDEFGH' to address 104 (4 registers) on slave 1 using function code 16 #
# ---------------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 104, 4 registers, 8 bytes, value='ABCDEFGH' . CRC.
# Response: Slave address 1, function code 16. Register address 104, 4 registers. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00h\x00\x04\x08ABCDEFGH' + 'I>'] = '\x01\x10' + '\x00h\x00\x04' + '@\x16'
# ## READ REGISTERS ##
# Read from address 105 (1 register) on slave 1 using function code 3 #
# --------------------------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 105, 1 register. CRC.
# Response: Slave address 1, function code 3. 2 bytes, value = 16. CRC.
GOOD_RTU_RESPONSES['\x01\x03' + '\x00i\x00\x01' + 'T\x16'] = '\x01\x03' + '\x02\x00\x10' + '\xb9\x88'
# Read from address 105 (3 registers) on slave 1 using function code 3 #
# ---------------------------------------------------------------------#
# Message: Slave address 1, function code 3. Register address 105, 3 registers. CRC.
# Response: Slave address 1, function code 3. 6 bytes, value = 16, 32, 64. CRC.
GOOD_RTU_RESPONSES['\x01\x03' + '\x00i\x00\x03' + '\xd5\xd7'] = '\x01\x03' + '\x06\x00\x10\x00\x20\x00\x40' + '\xe0\x8c'
# ## WRITE REGISTERS ##
# Write value [2] to address 105 (1 register) on slave 1 using function code 16 #
# ------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 105, 1 register, 2 bytes, value=2 . CRC.
# Response: Slave address 1, function code 16. Register address 105, 1 register. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00i\x00\x01\x02\x00\x02' + '.\xa8'] = '\x01\x10' + '\x00i\x00\x01' + '\xd1\xd5'
# Write value [2, 4, 8] to address 105 (3 registers) on slave 1 using function code 16 #
# -------------------------------------------------------------------------------------#
# Message: Slave address 1, function code 16. Register address 105, 3 register, 6 bytes, value=2, 4, 8. CRC.
# Response: Slave address 1, function code 16. Register address 105, 3 registers. CRC.
GOOD_RTU_RESPONSES['\x01\x10' + '\x00i\x00\x03\x06\x00\x02\x00\x04\x00\x08' + '\x0c\xd6'] = '\x01\x10' + '\x00i\x00\x03' + 'P\x14'
# ## OTHER RESPONSES ##
# Retrieve an empty response (for testing the _communicate method) #
# ---------------------------------------------------------------- #
WRONG_RTU_RESPONSES['MessageForEmptyResponse'] = ''
# Retrieve an known response (for testing the _communicate method) #
# ---------------------------------------------------------------- #
WRONG_RTU_RESPONSES['TESTMESSAGE'] = 'TESTRESPONSE'
# Retrieve an known response with local echo (for testing the _communicate method) #
# ---------------------------------------------------------------- #
WRONG_RTU_RESPONSES['TESTMESSAGE2'] = 'TESTMESSAGE2TESTRESPONSE2'
# Retrieve a response with wrong local echo (for testing the _communicate method) #
# ---------------------------------------------------------------- #
WRONG_RTU_RESPONSES['TESTMESSAGE3'] = 'TESTMeSSAGE3TESTRESPONSE3'
# Retrieve an known response (for testing the _performCommand method) #
# ---------------------------------------------------------------- #
WRONG_RTU_RESPONSES['\x01\x10TESTCOMMAND\x08B'] = '\x01\x10TRspU<' # Response should be 8 bytes
WRONG_RTU_RESPONSES['\x01\x4bTESTCOMMAND2\x18\xc8'] = '\x01\x4bTESTCOMMANDRESPONSE2K\x8c'
WRONG_RTU_RESPONSES['\x01\x01TESTCOMMAND4~'] = '\x02\x01TESTCOMMANDRESPONSEx]' # Wrong slave address in response
WRONG_RTU_RESPONSES['\x01\x02TESTCOMMAND0z'] = '\x01\x03TESTCOMMANDRESPONSE2\x8c' # Wrong function code in response
WRONG_RTU_RESPONSES['\x01\x03TESTCOMMAND\xcd\xb9'] = '\x01\x03TESTCOMMANDRESPONSEab' # Wrong CRC in response
WRONG_RTU_RESPONSES['\x01\x04TESTCOMMAND8r'] = 'A' # Too short response message
WRONG_RTU_RESPONSES['\x01\x05TESTCOMMAND\xc5\xb1'] = '\x01\x85TESTCOMMANDRESPONSE\xa54' # Error indication from slave
# Handle local echo: Read register 289 on slave 20 using function code 3 #
# ---------------------------------------------------------------------- #
# Message: Slave address 20, function code 3. Register address 289, 1 register. CRC.
# Response: Echo. Slave address 20, function code 3. 2 bytes, value=770. CRC.
WRONG_RTU_RESPONSES['\x14\x03' + '\x01!\x00\x01' + '\xd79'] = \
('\x14\x03' + '\x01!\x00\x01' + '\xd79') + '\x14\x03' + '\x02\x03\x02' + '4\xb6'
# Handle local echo: Read register 290 on slave 20 using function code 3. Wrong echo #
# ---------------------------------------------------------------------------------- #
# Message: Slave address 20, function code 3. Register address 290, 1 register. CRC.
# Response: Wrong echo. Slave address 20, function code 3. 2 bytes, value=770. CRC.
WRONG_RTU_RESPONSES['\x14\x03' + '\x01\x22\x00\x01' + '\x27\x39'] = \
('\x14\x03' + '\x01\x22\x00\x02' + '\x27\x39') + '\x14\x03' + '\x02\x03\x02' + '4\xb6'
## Recorded data from OmegaCN7500 ##
####################################
# (Sorted by slave address, register address)
# Slave address 1, read_bit(2068) Response value 1.
GOOD_RTU_RESPONSES['\x01\x02\x08\x14\x00\x01\xfb\xae'] ='\x01\x02\x01\x01`H'
# Slave address 1, write_bit(2068, 0)
GOOD_RTU_RESPONSES['\x01\x05\x08\x14\x00\x00\x8f\xae'] ='\x01\x05\x08\x14\x00\x00\x8f\xae'
# Slave address 1, write_bit(2068, 1)
GOOD_RTU_RESPONSES['\x01\x05\x08\x14\xff\x00\xce^'] ='\x01\x05\x08\x14\xff\x00\xce^'
# Slave address 1, read_register(4097, 1) Response value 823.6
GOOD_RTU_RESPONSES['\x01\x03\x10\x01\x00\x01\xd1\n'] ='\x01\x03\x02 ,\xa0Y'
# Slave address 1, write_register(4097, 700.0, 1)
GOOD_RTU_RESPONSES['\x01\x10\x10\x01\x00\x01\x02\x1bX\xbdJ'] ='\x01\x10\x10\x01\x00\x01T\xc9'
# Slave address 1, write_register(4097, 823.6, 1)
GOOD_RTU_RESPONSES['\x01\x10\x10\x01\x00\x01\x02 ,\xae]'] ='\x01\x10\x10\x01\x00\x01T\xc9'
# Slave address 10, read_bit(2068) Response value 1
GOOD_RTU_RESPONSES['\n\x02\x08\x14\x00\x01\xfa\xd5'] = '\n\x02\x01\x01bl'
# Slave address 10, write_bit(2068, 0)
GOOD_RTU_RESPONSES['\n\x05\x08\x14\x00\x00\x8e\xd5'] ='\n\x05\x08\x14\x00\x00\x8e\xd5'
# Slave address 10, write_bit(2068, 1)
GOOD_RTU_RESPONSES['\n\x05\x08\x14\xff\x00\xcf%'] ='\n\x05\x08\x14\xff\x00\xcf%'
# Slave address 10, read_register(4096, 1) Response value 25.0
GOOD_RTU_RESPONSES['\n\x03\x10\x00\x00\x01\x81\xb1'] ='\n\x03\x02\x00\xfa\x9d\xc6'
# Slave address 10, read_register(4097, 1) Response value 325.8
GOOD_RTU_RESPONSES['\n\x03\x10\x01\x00\x01\xd0q'] ='\n\x03\x02\x0c\xba\x996'
# Slave address 10, write_register(4097, 325.8, 1)
GOOD_RTU_RESPONSES['\n\x10\x10\x01\x00\x01\x02\x0c\xbaA\xc3'] ='\n\x10\x10\x01\x00\x01U\xb2'
# Slave address 10, write_register(4097, 20.0, 1)
GOOD_RTU_RESPONSES['\n\x10\x10\x01\x00\x01\x02\x00\xc8\xc4\xe6'] ='\n\x10\x10\x01\x00\x01U\xb2'
# Slave address 10, write_register(4097, 200.0, 1)
GOOD_RTU_RESPONSES['\n\x10\x10\x01\x00\x01\x02\x07\xd0\xc6\xdc'] ='\n\x10\x10\x01\x00\x01U\xb2'
## Recorded RTU data from Delta DTB4824 ##
##########################################
# (Sorted by register number)
# Slave address 7, read_bit(0x0800). This is LED AT.
# Response value 0
GOOD_RTU_RESPONSES['\x07\x02\x08\x00\x00\x01\xbb\xcc'] = '\x07\x02\x01\x00\xa1\x00'
# Slave address 7, read_bit(0x0801). This is LED Out1.
# Response value 0
GOOD_RTU_RESPONSES['\x07\x02\x08\x01\x00\x01\xea\x0c'] = '\x07\x02\x01\x00\xa1\x00'
# Slave address 7, read_bit(0x0802). This is LED Out2.
# Response value 0
GOOD_RTU_RESPONSES['\x07\x02\x08\x02\x00\x01\x1a\x0c'] = '\x07\x02\x01\x00\xa1\x00'
# Slave address 7, write_bit(0x0810, 1) This is "Communication write in enabled".
GOOD_RTU_RESPONSES['\x07\x05\x08\x10\xff\x00\x8f\xf9'] = '\x07\x05\x08\x10\xff\x00\x8f\xf9'
# Slave address 7, _performCommand(2, '\x08\x10\x00\x09'). This is reading 9 bits starting at 0x0810.
# Response value '\x02\x07\x00'
GOOD_RTU_RESPONSES['\x07\x02\x08\x10\x00\t\xbb\xcf'] = '\x07\x02\x02\x07\x003\x88'
# Slave address 7, read_bit(0x0814). This is RUN/STOP setting.
# Response value 0
GOOD_RTU_RESPONSES['\x07\x02\x08\x14\x00\x01\xfb\xc8'] = '\x07\x02\x01\x00\xa1\x00'
# Slave address 7, write_bit(0x0814, 0). This is STOP.
GOOD_RTU_RESPONSES['\x07\x05\x08\x14\x00\x00\x8f\xc8'] = '\x07\x05\x08\x14\x00\x00\x8f\xc8'
# Slave address 7, write_bit(0x0814, 1). This is RUN.
GOOD_RTU_RESPONSES['\x07\x05\x08\x14\xff\x00\xce8'] = '\x07\x05\x08\x14\xff\x00\xce8'
# Slave address 7, read_registers(0x1000, 2). This is process value (PV) and setpoint (SV).
# Response value [64990, 350]
GOOD_RTU_RESPONSES['\x07\x03\x10\x00\x00\x02\xc0\xad'] = '\x07\x03\x04\xfd\xde\x01^M\xcd'
# Slave address 7, read_register(0x1000). This is process value (PV).
# Response value 64990
GOOD_RTU_RESPONSES['\x07\x03\x10\x00\x00\x01\x80\xac'] = '\x07\x03\x02\xfd\xde\xf0\x8c'
# Slave address 7, read_register(0x1001, 1). This is setpoint (SV).
# Response value 80.0
GOOD_RTU_RESPONSES['\x07\x03\x10\x01\x00\x01\xd1l'] = '\x07\x03\x02\x03 1l'
# Slave address 7, write_register(0x1001, 25, 1, functioncode=6)
GOOD_RTU_RESPONSES['\x07\x06\x10\x01\x00\xfa\\\xef'] = '\x07\x06\x10\x01\x00\xfa\\\xef'
# Slave address 7, write_register(0x1001, 0x0320, functioncode=6) # Write value 800 to register 0x1001.
# This is a setpoint of 80.0 degrees (Centigrades, dependent on setting).
GOOD_RTU_RESPONSES['\x07\x06\x10\x01\x03 \xdd\x84'] = '\x07\x06\x10\x01\x03 \xdd\x84'
# Slave address 7, read_register(0x1004). This is sensor type.
# Response value 14
GOOD_RTU_RESPONSES['\x07\x03\x10\x04\x00\x01\xc1m'] = '\x07\x03\x02\x00\x0e\xb1\x80'
# Slave address 7, read_register(0x1005) This is control method.
# Response value 1
GOOD_RTU_RESPONSES['\x07\x03\x10\x05\x00\x01\x90\xad'] = '\x07\x03\x02\x00\x01\xf1\x84'
# Slave address 7, read_register(0x1006). This is heating/cooling selection.
# Response value 0
GOOD_RTU_RESPONSES['\x07\x03\x10\x06\x00\x01`\xad'] = '\x07\x03\x02\x00\x000D'
# Slave address 7, read_register(0x1012, 1). This is output 1.
# Response value 0.0
GOOD_RTU_RESPONSES['\x07\x03\x10\x12\x00\x01 \xa9'] = '\x07\x03\x02\x00\x000D'
# Slave address 7, read_register(0x1013, 1). This is output 2.
# Response value 0.0
GOOD_RTU_RESPONSES['\x07\x03\x10\x13\x00\x01qi'] = '\x07\x03\x02\x00\x000D'
# Slave address 7, read_register(0x1023). This is system alarm setting.
# Response value 0
GOOD_RTU_RESPONSES['\x07\x03\x10#\x00\x01qf'] = '\x07\x03\x02\x00\x000D'
# Slave address 7, read_register(0x102A). This is LED status.
# Response value 0
GOOD_RTU_RESPONSES['\x07\x03\x10*\x00\x01\xa1d'] = '\x07\x03\x02\x00\x000D'
# Slave address 7, read_register(0x102B). This is pushbutton status.
# Response value 15
GOOD_RTU_RESPONSES['\x07\x03\x10+\x00\x01\xf0\xa4'] = '\x07\x03\x02\x00\x0fp@'
# Slave address 7, read_register(0x102F). This is firmware version.
# Response value 400
GOOD_RTU_RESPONSES['\x07\x03\x10/\x00\x01\xb1e'] = '\x07\x03\x02\x01\x901\xb8'
## Recorded ASCII data from Delta DTB4824 ##
############################################
# (Sorted by register number)
# Slave address 7, read_bit(0x0800). This is LED AT.
# Response value 0
GOOD_ASCII_RESPONSES[':070208000001EE\r\n'] = ':07020100F6\r\n'
# Slave address 7, read_bit(0x0801). This is LED Out1.
# Response value 1
GOOD_ASCII_RESPONSES[':070208010001ED\r\n'] = ':07020101F5\r\n'
# Slave address 7, read_bit(0x0802). This is LED Out2.
# Response value 0
GOOD_ASCII_RESPONSES[':070208020001EC\r\n'] = ':07020100F6\r\n'
# Slave address 7, _performCommand(2, '\x08\x10\x00\x09'). This is reading 9 bits starting at 0x0810.
# Response value '\x02\x17\x00'
GOOD_ASCII_RESPONSES[':070208100009D6\r\n'] = ':0702021700DE\r\n'
# Slave address 7, write_bit(0x0810, 1) This is "Communication write in enabled".
GOOD_ASCII_RESPONSES[':07050810FF00DD\r\n'] = ':07050810FF00DD\r\n'
# Slave address 7, read_bit(0x0814). This is RUN/STOP setting.
# Response value 1
GOOD_ASCII_RESPONSES[':070208140001DA\r\n'] = ':07020101F5\r\n'
# Slave address 7, write_bit(0x0814, 0). This is STOP.
GOOD_ASCII_RESPONSES[':070508140000D8\r\n'] = ':070508140000D8\r\n'
# Slave address 7, write_bit(0x0814, 1). This is RUN.
GOOD_ASCII_RESPONSES[':07050814FF00D9\r\n'] = ':07050814FF00D9\r\n'
# Slave address 7, read_registers(0x1000, 2). This is process value (PV) and setpoint (SV).
# Response value [64990, 350]
GOOD_ASCII_RESPONSES[':070310000002E4\r\n'] = ':070304FDDE015EB8\r\n'
# Slave address 7, read_register(0x1000). This is process value (PV).
# Response value 64990
GOOD_ASCII_RESPONSES[':070310000001E5\r\n'] = ':070302FDDE19\r\n'
# Slave address 7, read_register(0x1001, 1). This is setpoint (SV).
# Response value 80.0
GOOD_ASCII_RESPONSES[':070310010001E4\r\n'] = ':0703020320D1\r\n'
# Slave address 7, write_register(0x1001, 25, 1, functioncode=6)
GOOD_ASCII_RESPONSES[':0706100100FAE8\r\n'] = ':0706100100FAE8\r\n'
# Slave address 7, write_register(0x1001, 0x0320, functioncode=6) # Write value 800 to register 0x1001.
# This is a setpoint of 80.0 degrees (Centigrades, dependent on setting).
GOOD_ASCII_RESPONSES[':070610010320BF\r\n'] = ':070610010320BF\r\n'
# Slave address 7, read_register(0x1004). This is sensor type.
# Response value 14
GOOD_ASCII_RESPONSES[':070310040001E1\r\n'] = ':070302000EE6\r\n'
# Slave address 7, read_register(0x1005) This is control method.
# Response value 1
GOOD_ASCII_RESPONSES[':070310050001E0\r\n'] = ':0703020001F3\r\n'
# Slave address 7, read_register(0x1006). This is heating/cooling selection.
# Response value 0
GOOD_ASCII_RESPONSES[':070310060001DF\r\n'] = ':0703020000F4\r\n'
# Slave address 7, read_register(0x1012, 1). This is output 1.
# Response value 100.0
GOOD_ASCII_RESPONSES[':070310120001D3\r\n'] = ':07030203E809\r\n'
# Slave address 7, read_register(0x1013, 1). This is output 2.
# Response value 0.0
GOOD_ASCII_RESPONSES[':070310130001D2\r\n'] = ':0703020000F4\r\n'
# Slave address 7, read_register(0x1023). This is system alarm setting.
# Response value 0
GOOD_ASCII_RESPONSES[':070310230001C2\r\n'] = ':0703020000F4\r\n'
# Slave address 7, read_register(0x102A). This is LED status.
# Response value 64
GOOD_ASCII_RESPONSES[':0703102A0001BB\r\n'] = ':0703020040B4\r\n'
# Slave address 7, read_register(0x102B). This is pushbutton status.
# Response value 15
GOOD_ASCII_RESPONSES[':0703102B0001BA\r\n'] = ':070302000FE5\r\n'
# Slave address 7, read_register(0x102F). This is firmware version.
# Response value 400
GOOD_ASCII_RESPONSES[':0703102F0001B6\r\n'] = ':070302019063\r\n'
#######################
# Group recorded data #
#######################
RTU_RESPONSES.update(WRONG_RTU_RESPONSES)
RTU_RESPONSES.update(GOOD_RTU_RESPONSES)
ASCII_RESPONSES.update(WRONG_ASCII_RESPONSES)
ASCII_RESPONSES.update(GOOD_ASCII_RESPONSES)
#################
# Run the tests #
#################
if __name__ == '__main__':
## Run all tests ##
unittest.main(verbosity=VERBOSITY)
## Run a test class ##
#suite = unittest.TestLoader().loadTestsFromTestCase(TestDummyCommunicationHandleLocalEcho)
#suite = unittest.TestLoader().loadTestsFromTestCase(TestCalculateCrcString)
#suite = unittest.TestLoader().loadTestsFromTestCase(TestHexdecode)
#unittest.TextTestRunner(verbosity=2).run(suite)
## Run a single test ##
#suite = unittest.TestSuite()
#suite.addTest(TestDummyCommunication("testReadLong"))
#suite.addTest(TestDummyCommunication("testCommunicateWrongLocalEcho"))
#unittest.TextTestRunner(verbosity=2).run(suite)
## Run individual commands ##
#print repr(minimalmodbus._calculateCrcString('\x01\x4bTESTCOMMAND2'))
| 640Labs/minimalmodbus | tests/test_minimalmodbus.py | Python | apache-2.0 | 147,947 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_config import cfg
from oslo_log import log as logging
from conveyor import compute
from conveyor import exception
from conveyor import network
from conveyor import volume
from conveyor.common import plan_status
from conveyor.conveyoragentclient.v1 import client as birdiegatewayclient
from conveyor.conveyorheat.api import api as heat
temp_opts = [
]
CONF = cfg.CONF
CONF.register_opts(temp_opts)
LOG = logging.getLogger(__name__)
class StackTemplateCloneDriver(object):
"""Manages the running instances from creation to destruction."""
# How long to wait in seconds before re-issuing a shutdown
# signal to a instance during power off. The overall
# time to wait is set by CONF.shutdown_timeout.
SHUTDOWN_RETRY_INTERVAL = 10
def __init__(self, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.cinder_api = volume.API()
self.nova_api = compute.API()
self.neutron_api = network.API()
self.heat_api = heat.API()
def start_template_clone(self, context, resource_name, template,
create_volume_wait_fun=None,
volume_wait_fun=None,
trans_data_wait_fun=None,
create_instance_wait_fun=None,
port_wait_fun=None):
LOG.debug("Clone instance %(i)s starting in template %(t)s driver",
{'i': resource_name, 't': template})
# 1. copy data
result = self._copy_volume_data(
context,
resource_name,
template,
trans_data_wait_fun=trans_data_wait_fun,
port_wait_fun=port_wait_fun)
# 2. check data is transforming finished,
# and refresh clone plan status
plan_id = template.get('plan_id', None)
des_gw_ip = result.get('des_ip', None)
des_port = result.get('des_port', None)
task_ids = result.get('copy_tasks', None)
if task_ids and trans_data_wait_fun:
trans_data_wait_fun(context, des_gw_ip, des_port, task_ids,
plan_status.STATE_MAP, plan_id)
# 3 deatach data port for new intsance
server_id = result.get('server_id', None)
port_id = result.get('port_id', None)
if port_id:
self.nova_api.interface_detach(context, server_id, port_id)
LOG.debug("Clone instances end in template driver")
def start_template_migrate(self, context, resource_name, template,
create_volume_wait_fun=None,
volume_wait_fun=None,
trans_data_wait_fun=None,
create_instance_wait_fun=None,
port_wait_fun=None):
LOG.debug("Migrate instance %(i)s starting in template %(t)s driver",
{'i': resource_name, 't': template})
# 1. copy data
result = self._copy_volume_data(
context, resource_name, template,
trans_data_wait_fun=trans_data_wait_fun,
port_wait_fun=port_wait_fun)
# 2. check data is transforming finished,
# and refresh clone plan status
plan_id = template.get('plan_id', None)
des_gw_ip = result.get('des_ip')
des_port = result.get('des_port')
task_ids = result.get('copy_tasks')
if trans_data_wait_fun:
trans_data_wait_fun(context, des_gw_ip, des_port, task_ids,
plan_status.MIGRATE_STATE_MAP, plan_id)
# 3 deatach data port for new intsance
server_id = result.get('server_id')
port_id = result.get('port_id')
if port_id:
self.nova_api.interface_detach(context, server_id, port_id)
LOG.debug("Migrate instance end in template driver")
def _copy_volume_data(self, context, resource_name, template,
trans_data_wait_fun=None, port_wait_fun=None):
'''copy volumes in template data'''
resources = template.get('resources')
instance = resources.get(resource_name)
# 2. get server info
server_id = instance.get('id')
stack_id = template.get('stack_id')
try:
server = self.nova_api.get_server(context, server_id)
except Exception as e:
LOG.error("Query server %(server_id)s error: %(error)s",
{'server_id': server_id, 'error': e})
raise exception.ServerNotFound(server_id=server_id)
# 3. get volumes attached to this server
properties = instance.get('properties')
ext_properties = instance.get('extra_properties')
volumes = properties.get('block_device_mapping_v2')
if not volumes:
LOG.warn("Clone instance warning: instance does not have volume.")
rsp = {'server_id': server_id,
'port_id': None,
'des_ip': None,
'des_port': None,
'copy_tasks': []}
return rsp
bdms = []
for v_volume in volumes:
# if volume id is string, this volume is using exist volume,
# so does not copy data
vol_res_id = v_volume.get('volume_id')
if isinstance(vol_res_id, str) or vol_res_id.get('get_param'):
_msg = "Instance clone warning: volume does not copy data: %s" \
% vol_res_id
LOG.debug(_msg)
continue
vol_res_name = v_volume.get('volume_id').get('get_resource')
sys_clone = ext_properties.get('sys_clone')
boot_index = v_volume.get('boot_index')
# 3.1 if do not clone system volume,
# don't add system volume to bdms
if not sys_clone and boot_index in [0, '0']:
continue
volume_ext_properties = \
resources.get(vol_res_name).get('extra_properties')
if not volume_ext_properties.get('copy_data'):
continue
# 3.2 get volume id
volume_id = self._get_resource_id(context, vol_res_name, stack_id)
v_volume['id'] = volume_id
if volume_ext_properties:
v_volume['guest_format'] = \
volume_ext_properties.get('guest_format')
v_volume['mount_point'] = \
volume_ext_properties.get('mount_point')
# volume dev name in system
vol_sys_dev = volume_ext_properties.get('sys_dev_name')
# if not None, use it,otherwise use default name
if vol_sys_dev:
v_volume['device_name'] = vol_sys_dev
bdms.append(v_volume)
if not bdms:
return {}
# 4. create transform data port to new instances
server_az = server.get('OS-EXT-AZ:availability_zone', None)
id = server.get('id', None)
if not server_az:
LOG.error('Can not get the availability_zone of server %s', id)
raise exception.AvailabilityZoneNotFound(server_uuid=id)
migrate_net_map = CONF.migrate_net_map
migrate_net_id = migrate_net_map.get(server_az, None)
if migrate_net_id:
# 4.1 call neutron api create port
LOG.debug("Instance template driver attach port to instance start")
net_info = self.nova_api.interface_attach(context, id,
migrate_net_id,
port_id=None,
fixed_ip=None)
interface_attachment = net_info._info
if interface_attachment:
LOG.debug('The interface attachment info is %s ' %
str(interface_attachment))
des_gw_ip = \
interface_attachment.get('fixed_ips')[0].get('ip_address')
port_id = interface_attachment.get('port_id')
else:
LOG.error("Instance template driver attach port failed")
raise exception.NoMigrateNetProvided(server_uuid=id)
else:
retrying = 1
while retrying < 300:
des_gw_ip = self._get_server_ip(context, server_id)
if des_gw_ip:
break
retrying += 1
time.sleep(2)
port_id = None
LOG.debug("Instance template driver attach port end: %s", des_gw_ip)
if not des_gw_ip:
_msg = "New clone or migrate VM data transformer IP is None"
raise exception.V2vException(message=_msg)
des_port = str(CONF.v2vgateway_api_listen_port)
des_gw_url = des_gw_ip + ":" + des_port
# data transformer procotol(ftp/fillp)
data_trans_protocol = CONF.data_transformer_procotol
data_trans_ports = CONF.trans_ports
trans_port = data_trans_ports[0]
src_gw_url = ext_properties.get('gw_url')
src_urls = src_gw_url.split(':')
if len(src_urls) != 2:
LOG.error("Input source gw url error: %s", src_gw_url)
msg = "Input source gw url error: " + src_gw_url
raise exception.InvalidInput(reason=msg)
# 5. request birdiegateway service to clone each volume data
# record all volume data copy task id
task_ids = []
for bdm in bdms:
# 6.1 query cloned new VM volume name
# src_dev_name = "/dev/sdc"
src_dev_name = bdm.get('device_name')
client = birdiegatewayclient.get_birdiegateway_client(des_gw_ip,
des_port)
des_dev_name = \
client.vservices.get_disk_name(bdm.get('id')).get('dev_name')
if not des_dev_name:
des_dev_name = src_dev_name
src_dev_format = bdm.get('guest_format')
# if template does not hava disk format and mount point info
# query them from conveyor-agent
if not src_dev_format:
client = \
birdiegatewayclient.get_birdiegateway_client(src_urls[0],
src_urls[1])
d_format = client.vservices.get_disk_format(src_dev_name)
src_dev_format = d_format.get('disk_format')
# if volume does not format, this volume not data to transformer
if not src_dev_format and CONF.data_transformer_procotol == 'ftp':
continue
src_mount_point = bdm.get('mount_point')
if not src_mount_point:
client = \
birdiegatewayclient.get_birdiegateway_client(src_urls[0],
src_urls[1])
m_info = client.vservices.get_disk_mount_point(src_dev_name)
src_mount_point = m_info.get('mount_point')
if not src_mount_point and CONF.data_transformer_procotol == 'ftp':
continue
mount_point = []
mount_point.append(src_mount_point)
LOG.debug('Volume %(dev_name)s disk format is %(disk_format)s'
' and mount point is %(point)s',
{'dev_name': src_dev_name,
'disk_format': src_dev_format,
'point': src_mount_point})
# get conveyor gateway client to call birdiegateway api
LOG.debug("Instance template driver transform data start")
client = birdiegatewayclient.get_birdiegateway_client(des_gw_ip,
des_port)
clone_rsp = client.vservices.clone_volume(
src_dev_name,
des_dev_name,
src_dev_format,
mount_point,
src_gw_url,
des_gw_url,
trans_protocol=data_trans_protocol,
trans_port=trans_port)
task_id = clone_rsp.get('body').get('task_id')
if not task_id:
LOG.warn("Clone volume %(dev_name)s response is %(rsp)s",
{'dev_name': des_dev_name, 'rsp': clone_rsp})
continue
task_ids.append(task_id)
rsp = {'server_id': server_id,
'port_id': port_id,
'des_ip': des_gw_ip,
'des_port': des_port,
'copy_tasks': task_ids}
LOG.debug("Instance template driver transform data end")
return rsp
def _get_server_ip(self, context, server_id):
interfaces = self.neutron_api.port_list(context,
device_id=server_id)
host_ip = None
for infa in interfaces:
if host_ip:
break
binding_profile = infa.get("binding:profile", [])
if binding_profile:
host_ip = binding_profile.get('host_ip')
return host_ip
def _get_resource_id(self, context, resource_name, stack_id):
try:
LOG.debug("Query stack %(stack)s resource %(name)s id start",
{'stack': stack_id, 'name': resource_name})
heat_resource = self.heat_api.get_resource(context, stack_id,
resource_name)
resource_id = heat_resource.physical_resource_id
LOG.debug("Query stack %(s)s resource %(n)s id end, id is %(id)s",
{'s': stack_id, 'n': resource_name, 'id': resource_id})
return resource_id
except exception as e:
LOG.error("Query stack %(s)s resource %(n)s id error: %(error)s",
{'s': stack_id, 'n': resource_name, 'error': e})
return None
| Hybrid-Cloud/conveyor | conveyor/clone/resources/instance/driver/stack_template.py | Python | apache-2.0 | 15,444 |
"""
Django settings for superlists project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bzh(=2afj5mn*4r#tv)%%!l9w_t88o-+wg=8el79%#@p$0eh-r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
#'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lists',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'superlists.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'superlists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../../database/db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '../../static') | freddyiniguez/cimat_scrum_developer | superlists/superlists/settings.py | Python | gpl-2.0 | 2,729 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Server.description'
db.add_column('mod_server', 'description',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'Server.description_html'
db.add_column('mod_server', 'description_html',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Server.description'
db.delete_column('mod_server', 'description')
# Deleting field 'Server.description_html'
db.delete_column('mod_server', 'description_html')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mod.map': {
'Meta': {'ordering': "['name']", 'object_name': 'Map'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'maps'", 'to': "orm['mod.Server']"})
},
'mod.mod': {
'Meta': {'ordering': "['upload_date', 'title']", 'object_name': 'Mod'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'mod_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'mod.option': {
'Meta': {'ordering': "['id']", 'object_name': 'Option'},
'command': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'config_options'", 'to': "orm['mod.Server']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'widget': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'mod.port': {
'Meta': {'ordering': "['port']", 'object_name': 'Port'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'port': ('django.db.models.fields.IntegerField', [], {'unique': 'True'})
},
'mod.server': {
'Meta': {'ordering': "['owner', 'mod', 'port']", 'object_name': 'Server'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mod': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'servers'", 'to': "orm['mod.Mod']"}),
'online': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'servers'", 'to': "orm['auth.User']"}),
'pid': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'server'", 'unique': 'True', 'null': 'True', 'to': "orm['mod.Port']"})
},
'mod.tune': {
'Meta': {'ordering': "['id']", 'object_name': 'Tune'},
'command': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'config_tunes'", 'to': "orm['mod.Server']"}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'mod.vote': {
'Meta': {'ordering': "['id']", 'object_name': 'Vote'},
'command': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'config_votes'", 'to': "orm['mod.Server']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['mod'] | upTee/upTee | uptee/mod/south_migrations/0007_auto__add_field_server_description__add_field_server_description_html.py | Python | bsd-3-clause | 8,541 |
#coding:utf-8
from py_rf.decision_tree import DecisionTree
import requests
import random
import sys
import operator
def test_create_decision_tree():
tree = DecisionTree()
def test_predict():
dataset = requests.get('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data').text.split('\n')
dataset = filter(lambda data: len(data) > 1, dataset)
dataset = map(lambda data: data.split(','), dataset)
split = 2*len(dataset)/3
trial_count = 50
correct_ratio = 0
for _ in xrange(trial_count):
random.shuffle(dataset)
train_data = dataset[:split]
test_data = dataset[split:]
features = map(lambda data: map(float, data[:-1]), train_data)
labels = map(lambda data: data[-1], train_data)
tree = DecisionTree(max_depth=5)
tree.build(features, labels)
correct = 0
for data in test_data:
feature = map(float, data[:-1])
label = data[-1]
probability = tree.predict(feature)
maxlabel = max(probability.iteritems(), key=operator.itemgetter(1))[0]
correct += 1.0 if label == maxlabel else 0.0
correct_ratio += 100.0 * correct / len(test_data)
correct_ratio /= trial_count
print correct_ratio
assert correct_ratio >= 80.0, "sometime fial."
| nel215/py-random-forest | tests/test_decision_tree.py | Python | mit | 1,334 |
"""
.. automodule::
:members:
Agent Class
==================
.. autoclass:: Agent
:members:
"""
import sys
import time
import pdb
if sys.version_info.major==2:
import ConfigParser
from SimPy.SimulationRT import Simulation
else:
import configparser as ConfigParser
import simpy
from pylayers.mobility.transit.Person import Person
from pylayers.mobility.transit.World import world
from pylayers.mobility.transit.SteeringBehavior import Seek, Separation, Containment, InterpenetrationConstraint, queue_steering_mind
import numpy as np
import networkx as nx
import pandas as pd
import pylayers.util.pyutil as pyu
from pylayers.network.network import Node, Network
from pylayers.network.communication import Gcom, TX, RX
from pylayers.location.localization import Localization, PLocalization
from pylayers.gis.layout import Layout
from pylayers.util.utilnet import *
#from pylayers.util.pymysqldb import Database
""""
.. currentmodule:: pylayers.mobility.agent
.. autosummary::
:toctree: generated/
"""
class Agent(object):
""" Class Agent
Members
-------
args
ID
name
typ
net
epwr
gcom
sim
wstd
sens
dcond
meca : transit.Person
net : pylayers.network.Network
sim :
PN :
rxt
rxr
"""
def __init__(self, **args):
""" Mobile Agent Init
Parameters
----------
'ID': string
agent ID
'name': string
Agent name
'typ': string
agent typ . 'ag' for moving agent, 'ap' for static acces point
'pos' : np.array([])
numpy array containing the initial position of the agent
'roomId': int
Room number where the agent is initialized (Layout.Gr)
'meca_updt': float
update time interval for the mechanical process
'loc': bool
enable/disable localization process of the agent
'loc_updt': float
update time interval for localization process
'L': pylayers.gis.Layout()
'net':pylayers.network.Network(),
'wstd': list of string
list of used radio access techology of the agent
'world': transit.world()
Soon deprecated
'save': list of string
list of save method ( soon deprecated)
'sim':Simpy.SimulationRT.Simulation(),
'epwr': dictionnary
dictionnary of emmited power of transsmitter{'wstd#':epwr value}
'sens': dictionnary
dictionnary of sensitivity of reveicer {'wstd#':sens value}
'dcond': dictionnary
Not used yet
'gcom':pylayers.communication.Gcom()
Communication graph
'comm_mod': string
Communication between nodes mode:
'autonomous': all TOAs are refreshed regulary
'synchro' : only visilbe TOAs are refreshed
"""
defaults = {'ID': '0',
'name': 'johndoe',
'typ': 'ag',
'color': 'k',
'pdshow': False,
'pos': np.array([]),
'roomId': -1,
'froom': [],
'wait': [],
'seed': 0,
'cdest': 'random',
'meca_updt': 0.1,
'loc': False,
'loc_updt': 0.5,
'loc_method': ['geo'],
'L': Layout(),
'network': True,
'net': Network(),
'wstd': ['rat1'],
'world': world(),
'save': [],
'sim': simpy.Environment(),
'epwr': {},
'sens': {},
'dcond': {},
'gcom': Gcom(),
'comm_mode': 'autonomous'}
for key, value in defaults.items():
if key not in args:
args[key] = value
self.args = args
self.ID = args['ID']
self.name = args['name']
self.typ = args['typ']
# Create Network
self.net = args['net']
self.epwr = args['epwr']
self.gcom = args['gcom']
self.sim = args['sim']
self.wstd = args['wstd']
if args['epwr'] == {}:
self.epwr = {x: 0 for x in self.wstd}
else:
self.epwr = args['epwr']
if args['sens'] == {}:
self.sens = {x: -180 for x in self.wstd}
else:
self.sens = args['sens']
try:
self.dcond = args['dcond']
except:
pass
# check if node id already given
if self.ID in self.net.nodes():
raise NameError(
'another agent has the ID: ' + self.ID + ' .Please use an other ID')
if self.typ == 'ag':
# mechanical init
self.meca = Person(ID=self.ID,
color=args['color'],
pdshow=args['pdshow'],
roomId=args['roomId'],
L=args['L'],
net=self.net,
interval=args['meca_updt'],
wld=args['world'],
sim=args['sim'],
seed=args['seed'],
moving=True,
froom=args['froom'],
wait=args['wait'],
cdest=args['cdest'],
save=args['save']
)
self.meca.behaviors = [Seek(), Containment(),
Separation(), InterpenetrationConstraint()]
self.meca.steering_mind = queue_steering_mind
# Network init
self.node = Node(ID=self.ID,name=self.name, p=conv_vecarr(self.meca.position),
t=self.sim.now(), wstd=args['wstd'],
epwr=self.epwr, sens=self.sens, typ=self.typ)
self.net.add_nodes_from(self.node.nodes(data=True))
self.sim.activate(self.meca, self.meca.move(), 0.0)
self.PN = self.net.node[self.ID]['PN']
# Communication init
if args['comm_mode'] == 'synchro' and args['network']:
# The TOA requests are made every refreshTOA time ( can be modified in agent.ini)
# This Mode will be deprecated in future version
self.rxr = RX(net=self.net,
ID=self.ID,
dcond=self.dcond,
gcom=self.gcom,
sim=self.sim)
self.rxt = RX(net=self.net,
ID=self.ID,
dcond=self.dcond,
gcom=self.gcom,
sim=self.sim)
self.sim.activate(self.rxr, self.rxr.refresh_RSS(), 0.0)
self.sim.activate(self.rxt, self.rxt.refresh_TOA(), 0.0)
elif args['comm_mode'] == 'autonomous' and args['network']:
# The requests are made by node only when they are in
# visibility of pairs.
# self.rxr only manage a refresh RSS process
self.rxr = RX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
# self.tx manage all requests to other nodes
self.tx = TX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
# self.tx replies to requests from self.tx
self.rx = RX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
self.sim.activate(self.rxr, self.rxr.refresh_RSS(), 0.0)
self.sim.activate(self.tx, self.tx.request(), 0.0)
self.sim.activate(self.rx, self.rx.wait_request(), 0.0)
elif self.typ == 'ap':
if args['roomId'] == -1:
self.node = Node(ID=self.ID, p=self.args['pos'],
t=self.sim.now(), wstd=args['wstd'],
epwr=self.epwr, sens=self.sens, typ=self.typ)
else:
pp = np.array(args['L'].Gr.pos[self.args['roomId']])
self.node = Node(
ID=self.ID, p=pp, t=self.sim.now(), wstd=args['wstd'],
epwr=self.epwr, sens=self.sens, typ=self.typ)
self.net.add_nodes_from(self.node.nodes(data=True))
self.sim = args['sim']
self.PN = self.net.node[self.ID]['PN']
self.PN.node[self.ID]['pe'] = self.net.node[self.ID]['p']
if args['comm_mode'] == 'autonomous' and args['network']:
self.rx = RX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
self.sim.activate(self.rx, self.rx.wait_request(), 0.0)
p = self.args['pos']
self.posdf = pd.DataFrame(
{'t': pd.Timestamp(0), 'x': p[0], 'y': p[1], 'z': p[2],
'vx': np.array([0.0]), 'vy': np.array([0.0]),
'ax': np.array([0.0]), 'ay': np.array([0.0]),
}, columns=['t', 'x', 'y', 'z', 'vx', 'vy', 'ax', 'ay'], index=np.array([0]))
else:
raise NameError(
'wrong agent typ, it must be either agent (ag) or acces point (ap) ')
if self.typ == 'ap':
self.MoA = 1
else:
self.MoA = 0
if 'mysql' in args['save']:
config = ConfigParser.ConfigParser()
config.read(pyu.getlong('simulnet.ini', 'ini'))
sql_opt = dict(config.items('Mysql'))
db = Database(sql_opt['host'], sql_opt['user'],
sql_opt['passwd'], sql_opt['dbname'])
db.writenode(self.ID, self.name, self.MoA)
if 'txt' in args['save']:
pyu.writenode(self)
if self.typ != 'ap' and args['loc']:
self.loc = Localization(net=self.net, ID=self.ID,
method=args['loc_method'])
self.Ploc = PLocalization(loc=self.loc,
loc_updt_time=args['loc_updt'],
tx=self.tx,
sim=args['sim'])
self.sim.activate(self.Ploc, self.Ploc.run(), 1.5)
def __repr__(self):
s = 'General Agent info \n********************\n'
s = s + 'name : ' + self.name + '\n'
s = s + 'ID: ' + self.ID + '\n'
s = s + 'typ: ' + self.typ
s = s + '\n\n More Agent information about:'
s = s + '\n+ Mecanichal => self.meca'
s = s + '\n+ Network => self.net'
s = s + '\n+ Personnal Network => self.PN'
s = s + '\n+ Localization => self.loc\n\n'
try:
s = s + self.PN.__repr__() + '\n\n'
except:
s = s + 'No network simulated'
if self.typ != 'ap':
s = s + self.meca.__repr__() + '\n\n'
try:
s = s + self.loc.__repr__() + '\n\n'
except:
s = s + 'no localization simulated'
return s
| pylayers/pylayers | pylayers/mobility/agent.py | Python | mit | 11,637 |
#####################################################################
# string.py
#
# (c) Copyright 2021, Benjamin Parzella. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#####################################################################
"""SECS string text variable type."""
import unicodedata
from .base_text import BaseText
class String(BaseText):
"""
Secs type for string data.
:param value: initial value
:type value: string
:param count: number of items this value
:type count: integer
"""
format_code = 0o20
text_code = "A"
preferred_types = [bytes, str]
control_chars = "".join(chr(ch) for ch in range(256) if unicodedata.category(chr(ch))[0] == "C" or ch > 127)
coding = "latin-1"
| bparzella/secsgem | secsgem/secs/variables/string.py | Python | lgpl-2.1 | 1,233 |
#-*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse_lazy
from rest_framework import serializers
class Issue(models.Model):
email = models.CharField(max_length="50", blank=True)
subject = models.CharField(max_length="100")
url = models.CharField(max_length="255")
archive = models.CharField(max_length="255", blank=True)
datetime = models.DateTimeField(auto_now_add=True)
count = models.IntegerField(default=0)
goodcount = models.IntegerField(default=0)
claimusers = models.TextField(default='', blank=True)
def get_absolute_url(self, nolook='nolook'):
return reverse_lazy('show news')
| genonfire/portality | issue/models.py | Python | mit | 710 |
from traitlets import Unicode, List
from .base import BasePlugin
from ..api import MissingEntry, Gradebook
class ExportPlugin(BasePlugin):
"""Base class for export plugins."""
to = Unicode("", help="destination to export to").tag(config=True)
student = List(
[], help="list of students to export").tag(config=True)
assignment = List(
[], help="list of assignments to export").tag(config=True)
def export(self, gradebook: Gradebook) -> None:
"""Export grades to another format.
This method MUST be implemented by subclasses. Users should be able to
pass the ``--to`` flag on the command line, which will set the
``self.to`` variable. By default, this variable will be an empty string,
which allows you to specify whatever default you would like.
Arguments
---------
gradebook:
An instance of the gradebook
"""
raise NotImplementedError
class CsvExportPlugin(ExportPlugin):
"""CSV exporter plugin."""
def export(self, gradebook: Gradebook) -> None:
if self.to == "":
dest = "grades.csv"
else:
dest = self.to
if len(self.student) == 0:
allstudents = []
else:
# make sure studentID(s) are a list of strings
allstudents = [str(item) for item in self.student]
if len(self.assignment) == 0:
allassignments = []
else:
# make sure assignment(s) are a list of strings
allassignments = [str(item) for item in self.assignment]
self.log.info("Exporting grades to %s", dest)
if allassignments:
self.log.info("Exporting only assignments: %s", allassignments)
if allstudents:
self.log.info("Exporting only students: %s", allstudents)
fh = open(dest, "w")
keys = [
"assignment",
"duedate",
"timestamp",
"student_id",
"last_name",
"first_name",
"email",
"raw_score",
"late_submission_penalty",
"score",
"max_score"
]
fh.write(",".join(keys) + "\n")
fmt = ",".join(["{" + x + "}" for x in keys]) + "\n"
# Loop over each assignment in the database
for assignment in gradebook.assignments:
# only continue if assignment is required
if allassignments and assignment.name not in allassignments:
continue
# Loop over each student in the database
for student in gradebook.students:
# only continue if student is required
if allstudents and student.id not in allstudents:
continue
# Create a dictionary that will store information
# about this student's submitted assignment
score = {}
score['assignment'] = assignment.name
score['duedate'] = assignment.duedate
score['student_id'] = student.id
score['last_name'] = student.last_name
score['first_name'] = student.first_name
score['email'] = student.email
score['max_score'] = assignment.max_score
# Try to find the submission in the database. If it
# doesn't exist, the `MissingEntry` exception will be
# raised, which means the student didn't submit
# anything, so we assign them a score of zero.
try:
submission = gradebook.find_submission(
assignment.name, student.id)
except MissingEntry:
score['timestamp'] = ''
score['raw_score'] = 0.0
score['late_submission_penalty'] = 0.0
score['score'] = 0.0
else:
penalty = submission.late_submission_penalty
score['timestamp'] = submission.timestamp
score['raw_score'] = submission.score
score['late_submission_penalty'] = penalty
score['score'] = max(0.0, submission.score - penalty)
for key in score:
if score[key] is None:
score[key] = ''
if not isinstance(score[key], str):
score[key] = str(score[key])
fh.write(fmt.format(**score))
fh.close()
| jhamrick/nbgrader | nbgrader/plugins/export.py | Python | bsd-3-clause | 4,603 |
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask.ext.restful import fields, marshal_with, abort
from jormungandr import i_manager
from jormungandr.interfaces.v1.fields import error,\
PbField, NonNullList, NonNullNested,\
feed_publisher, Links, JsonString, place, \
ListLit, beta_endpoint
from jormungandr.timezone import set_request_timezone
from jormungandr.interfaces.v1.errors import ManageError
from jormungandr.utils import date_to_timestamp
from jormungandr.interfaces.parsers import UnsignedInteger
from jormungandr.interfaces.v1.journey_common import JourneyCommon
from jormungandr.interfaces.v1.fields import DateTime
from jormungandr.interfaces.v1.serializer.api import GraphicalIsrochoneSerializer
from jormungandr.interfaces.v1.decorators import get_serializer
graphical_isochrone = {
"geojson": JsonString(),
"max_duration": fields.Integer(),
"min_duration": fields.Integer(),
'from': PbField(place, attribute='origin'),
"to": PbField(place, attribute="destination"),
'requested_date_time': DateTime(),
'min_date_time': DateTime(),
'max_date_time': DateTime()
}
graphical_isochrones = {
"isochrones": NonNullList(NonNullNested(graphical_isochrone), attribute="graphical_isochrones"),
"error": PbField(error, attribute='error'),
"feed_publishers": fields.List(NonNullNested(feed_publisher)),
"links": fields.List(Links()),
"warnings": ListLit([fields.Nested(beta_endpoint)]),
}
class GraphicalIsochrone(JourneyCommon):
def __init__(self):
super(GraphicalIsochrone, self).__init__(output_type_serializer=GraphicalIsrochoneSerializer)
parser_get = self.parsers["get"]
parser_get.add_argument("min_duration", type=UnsignedInteger(), default=0)
parser_get.add_argument("boundary_duration[]", type=UnsignedInteger(), action="append")
@get_serializer(serpy=GraphicalIsrochoneSerializer, marshall=graphical_isochrones)
@ManageError()
def get(self, region=None, uri=None):
args = self.parsers['get'].parse_args()
self.region = i_manager.get_region(region)
args.update(self.parse_args(region, uri))
if not (args['destination'] or args['origin']):
abort(400, message="you should provide a 'from' or a 'to' argument")
if not args['max_duration'] and not args["boundary_duration[]"]:
abort(400, message="you should provide a 'boundary_duration[]' or a 'max_duration' argument")
if args['destination'] and args['origin']:
abort(400, message="you cannot provide a 'from' and a 'to' argument")
set_request_timezone(self.region)
original_datetime = args['original_datetime']
if original_datetime:
new_datetime = self.convert_to_utc(original_datetime)
args['datetime'] = date_to_timestamp(new_datetime)
response = i_manager.dispatch(args, "graphical_isochrones", self.region)
return response
def options(self, **kwargs):
return self.api_description(**kwargs)
| antoine-de/navitia | source/jormungandr/jormungandr/interfaces/v1/GraphicalIsochrone.py | Python | agpl-3.0 | 4,290 |
from __future__ import unicode_literals
import keyword
import re
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
class Command(BaseCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
requires_system_checks = False
db_module = 'django.db'
def add_arguments(self, parser):
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.')
def handle(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options['database']]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: re.sub(r'[^a-zA-Z0-9]', '', table_name.title())
strip_prefix = lambda s: s[1:] if s.startswith("u'") else s
with connection.cursor() as cursor:
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield (
"# * Remove `managed = False` lines if you wish to allow "
"Django to create, modify, and delete the table"
)
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield ''
yield ''
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
try:
constraints = connection.introspection.get_constraints(cursor, table_name)
except NotImplementedError:
constraints = {}
used_column_names = [] # Holds column names used in the table so far
column_to_field_name = {} # Maps column names to names of model fields
for row in connection.introspection.get_table_description(cursor, table_name):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = OrderedDict() # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = column_name in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
column_to_field_name[column_name] = att_name
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = "self" if relations[column_name][1] == table_name else table2model(relations[column_name][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional parameters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and extra_params == {'primary_key': True}:
if field_type == 'AutoField(':
continue
elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield:
comment_notes.append('AutoField?')
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
if field_type == 'BooleanField(':
field_type = 'NullBooleanField('
else:
extra_params['blank'] = True
extra_params['null'] = True
field_desc = '%s = %s%s' % (
att_name,
# Custom fields will have a dotted path
'' if '.' in field_type else 'models.',
field_type,
)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items())
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name, constraints, column_to_field_name):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = OrderedDict()
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for data_types_reverse to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = int(row[3])
if field_type == 'DecimalField':
if row[4] is None or row[5] is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row[4] if row[4] is not None else 10
field_params['decimal_places'] = row[5] if row[5] is not None else 5
else:
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name, constraints, column_to_field_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
unique_together = []
for index, params in constraints.items():
if params['unique']:
columns = params['columns']
if len(columns) > 1:
# we do not want to include the u"" or u'' prefix
# so we build the string rather than interpolate the tuple
tup = '(' + ', '.join("'%s'" % column_to_field_name[c] for c in columns) + ')'
unique_together.append(tup)
meta = ["",
" class Meta:",
" managed = False",
" db_table = '%s'" % table_name]
if unique_together:
tup = '(' + ', '.join(unique_together) + ',)'
meta += [" unique_together = %s" % tup]
return meta
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/django/core/management/commands/inspectdb.py | Python | agpl-3.0 | 12,078 |
#!/usr/bin/env python3
"""Defines ways to "convert" a file name to an input/output stream."""
from __future__ import absolute_import, division, print_function
from builtins import range
from io import TextIOBase
import math
import os
from emLam.utils import allname, openall
class MultiFileWriter(TextIOBase):
def __init__(self, file_name, max_lines, wait_for_empty=True):
self.file_name = file_name
self.max_lines = max_lines
self.wait_for_empty = wait_for_empty
self.index = 1
self.lines = 0
self.f = openall(self.__get_file_name(), 'wt')
def __get_file_name(self, index=None, digits=None):
basename, extension = allname(self.file_name)
ext = extension if extension else ''
num_format = '{{:0{}d}}'.format(digits) if digits else '{}'
index_str = num_format.format(self.index if index is None else index)
return '{}-{}{}'.format(basename, index_str, ext)
def close(self):
self.f.close()
def fileno(self):
return self.f.fileno()
def flush(self):
return self.f.flush()
def write(self, s):
for line in s.splitlines():
self.f.write(line)
self.f.write(u'\n')
self.lines += 1
if self.lines >= self.max_lines and (
not self.wait_for_empty or line == ''):
self.__new_file()
def __new_file(self):
"""
Opens the next file, resets the line counter and renames all previous
files if we need a new digit.
"""
self.f.close()
digits = int(math.log10(self.index)) + 1
self.index += 1
new_digits = int(math.log10(self.index)) + 1
if new_digits > digits:
for i in range(1, self.index):
os.rename(self.__get_file_name(i, digits),
self.__get_file_name(i, new_digits))
self.f = openall(self.__get_file_name(), 'wt')
self.lines = 0
def isatty(self):
return False
def readable(self):
return False
def seekable(self):
return False
def writable(self):
return True
| DavidNemeskey/emLam | emLam/corpus/multi_file_writer.py | Python | mit | 2,175 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Test verifies that address cache entry associated with a SED child addresses is removed
from new parent node ensuring we would not have a routing loop.
"""
import random
import time
import unittest
import silk.hw.hw_resource as hwr
import silk.node.fifteen_four_dev_board as ffdb
import silk.tests.testcase as testcase
from silk.config import wpan_constants as wpan
from silk.node.wpan_node import WpanCredentials
from silk.tools import wpan_table_parser
from silk.tools.wpan_util import verify, verify_within
from silk.unit_tests.test_utils import random_string
from silk.utils import process_cleanup
hwr.global_instance()
PREFIX = "fd00:1234::"
POLL_INTERVAL = 400
INVALID_ROUTER_ID = 63
CHILD_SUPERVISION_CHECK_TIMEOUT = 2
PARENT_SUPERVISION_INTERVAL = 1
REATTACH_WAIT_TIME = 60
PREFIX_PROPAGATION_DELAY = 60
class TestClearAddressCacheTableForSed(testcase.TestCase):
# This test verifies that address cache entry associated with a SED child
# addresses is removed from new parent node ensuring we would not have a
# routing loop.
# -----------------------------------------------------------------------
# Build network topology
#
# r3 ---- r1 ---- r2
# | |
# | |
# ed3 sed
#
# sed is initially attached to r2 but it switches parent during test to r1 and then r3.
# ed3 is just added to make sure r3 become router quickly (not involved in test).
r1_address, r2_address, sed_address = None, None, None
@classmethod
def hardware_select(cls: 'TestClearAddressCacheTableForSed'):
cls.r1 = ffdb.ThreadDevBoard()
cls.r2 = ffdb.ThreadDevBoard()
cls.r3 = ffdb.ThreadDevBoard()
cls.sed = ffdb.ThreadDevBoard()
cls.ed3 = ffdb.ThreadDevBoard()
cls.all_nodes = [cls.r1, cls.r2, cls.r3, cls.sed, cls.ed3]
@classmethod
@testcase.setup_class_decorator
def setUpClass(cls):
# Check and clean up wpantund process if any left over
process_cleanup.ps_cleanup()
cls.hardware_select()
for device in cls.all_nodes:
device.set_logger(cls.logger)
cls.add_test_device(device)
device.set_up()
cls.network_data = WpanCredentials(network_name="SILK-{0:04X}".format(random.randint(0, 0xffff)),
psk="00112233445566778899aabbccdd{0:04x}".format(random.randint(0, 0xffff)),
channel=random.randint(11, 25),
fabric_id="{0:06x}dead".format(random.randint(0, 0xffffff)))
cls.thread_sniffer_init(cls.network_data.channel)
@classmethod
@testcase.teardown_class_decorator
def tearDownClass(cls: 'TestClearAddressCacheTableForSed'):
for device in cls.device_list:
device.tear_down()
@testcase.setup_decorator
def setUp(self):
pass
@testcase.teardown_decorator
def tearDown(self):
pass
def get_ipv6_address_for_prefix(self):
TestClearAddressCacheTableForSed.r1_address = self.r1.find_ip6_address_with_prefix(PREFIX)
TestClearAddressCacheTableForSed.r2_address = self.r2.find_ip6_address_with_prefix(PREFIX)
TestClearAddressCacheTableForSed.sed_address = self.sed.find_ip6_address_with_prefix(PREFIX)
def transmit_receive_udp_message(self, addresses: list):
timeout = 5
delay = 1
for i, (src, dst, src_address, dst_address) in enumerate(addresses):
port = random.randint(10000 + i * 100, 10099 + i * 100)
message = random_string(10)
dst.receive_udp_data(port, message, timeout)
time.sleep(delay)
src.send_udp_data(dst_address, port, message, src_address)
time.sleep(timeout - delay)
@testcase.test_method_decorator
def test01_pairing(self):
# Create allowlisted nodes based on the topology.
for node1, node2 in [(self.r1, self.r2), (self.r1, self.r3),
(self.r2, self.sed), (self.r3, self.ed3)]:
node1.allowlist_node(node2)
node2.allowlist_node(node1)
self.r1.form(self.network_data, "router")
self.r1.permit_join(3600)
self.wait_for_completion(self.device_list)
self.logger.info(self.r1.ip6_lla)
self.logger.info(self.r1.ip6_thread_ula)
self.network_data.xpanid = self.r1.xpanid
self.network_data.panid = self.r1.panid
for node in [self.r2, self.r3]:
node.join(self.network_data, "router")
self.wait_for_completion(self.device_list)
self.sed.join(self.network_data, "sleepy-end-device")
self.sed.set_sleep_poll_interval(POLL_INTERVAL)
self.wait_for_completion(self.device_list)
self.ed3.join(self.network_data, "end-node")
self.wait_for_completion(self.device_list)
@testcase.test_method_decorator
def test02_verify_node_type_and_parent(self):
self.assertTrue(self.r1.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_LEADER)
self.assertTrue(self.r2.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
self.assertTrue(self.r3.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
self.assertTrue(self.ed3.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_END_DEVICE)
self.assertTrue(self.sed.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_SLEEPY_END_DEVICE)
r2_ext_address = self.r2.getprop(wpan.WPAN_EXT_ADDRESS)[1:-1]
sed_parent = self.sed.getprop(wpan.WPAN_THREAD_PARENT)[1:17]
self.assertTrue(sed_parent == r2_ext_address)
r3_ext_address = self.r3.getprop(wpan.WPAN_EXT_ADDRESS)[1:-1]
ed3_parent = self.ed3.getprop(wpan.WPAN_THREAD_PARENT)[1:17]
self.assertTrue(ed3_parent == r3_ext_address)
@testcase.test_method_decorator
def test03_get_ipv6_addresses_for_prefix(self):
# Add prefix on r1
self.r1.add_prefix(PREFIX, stable=True, on_mesh=True, slaac=True, preferred=True)
self.wait_for_completion(self.device_list)
# Wait for prefix to get added to all the nodes
time.sleep(PREFIX_PROPAGATION_DELAY)
self.get_ipv6_address_for_prefix()
@testcase.test_method_decorator
def test04_send_udp_msg_from_r1_to_sed(self):
# Send a single UDP message from r1 to sed
#
# This adds an address cache entry on r1 for sed pointing to r2 (the current parent of sed).
self.transmit_receive_udp_message([(self.r1, self.sed, self.r1_address, self.sed_address)])
@testcase.test_method_decorator
def test05_verify_sed_forced_switch_to_parent_r1(self):
# Force sed to switch its parent from r2 to r1
#
# r3 ---- r1 ---- r2
# | |
# | |
# ed3 sed
self.sed.set(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, str(CHILD_SUPERVISION_CHECK_TIMEOUT))
self.r2.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
self.r1.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
self.r3.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
self.r2.un_allowlist_node(self.sed)
self.r1.allowlist_node(self.sed)
self.sed.allowlist_node(self.r1)
# Wait for sed to detach from r2 and attach to r1.
def check_sed_is_removed_from_r2_child_table():
child_table = self.r2.wpanctl("get", "get " + wpan.WPAN_THREAD_CHILD_TABLE, 2)
child_table = wpan_table_parser.parse_child_table_address_result(child_table)
verify(len(child_table) == 0)
verify_within(check_sed_is_removed_from_r2_child_table, REATTACH_WAIT_TIME)
# check that sed is now a child of r1
child_table = self.r1.wpanctl("get", "get " + wpan.WPAN_THREAD_CHILD_TABLE, 2)
child_table = wpan_table_parser.parse_child_table_address_result(child_table)
verify(len(child_table) == 1)
@testcase.test_method_decorator
def test06_send_udp_msg_from_r2_to_sed(self):
# Send a single UDP message from r2 to sed
#
# This adds an address cache entry on r2 for sed pointing to r1 (the current parent of sed).
self.transmit_receive_udp_message([(self.r2, self.sed, self.r2_address, self.sed_address)])
@testcase.test_method_decorator
def test07_verify_sed_forced_switch_to_parent_r3(self):
# Force sed to switch its parent from r1 to r3
#
# r3 ---- r1 ---- r2
# | \
# | \
# ed3 sed
self.r1.un_allowlist_node(self.sed)
self.r3.allowlist_node(self.sed)
self.sed.allowlist_node(self.r3)
# Wait for sed to detach from r1 and attach to r3.
def check_sed_is_removed_from_r1_child_table():
child_table = self.r1.wpanctl("get", "get " + wpan.WPAN_THREAD_CHILD_TABLE, 2)
child_table = wpan_table_parser.parse_child_table_address_result(child_table)
verify(len(child_table) == 0)
verify_within(check_sed_is_removed_from_r1_child_table, REATTACH_WAIT_TIME)
# check that sed is now a child of r3 (r3 should have two child, sed and ed3)
child_table = self.r3.wpanctl("get", "get " + wpan.WPAN_THREAD_CHILD_TABLE, 2)
child_table = wpan_table_parser.parse_child_table_address_result(child_table)
verify(len(child_table) == 2)
@testcase.test_method_decorator
def test08_verify_r1_address_cache_entry_is_cleared(self):
# Send a single UDP message from r1 to sed
#
# If the r1 address cache entry is not cleared when sed attached to r1,
# r1 will still have an entry pointing to r2, and r2 will have an entry
# pointing to r1, thus creating a loop (the msg will not be delivered to r3)
self.transmit_receive_udp_message([(self.r1, self.sed, self.r1_address, self.sed_address)])
if __name__ == "__main__":
unittest.main()
| openthread/silk | silk/tests/openthread/ot_test_clear_address_cache_for_sed.py | Python | apache-2.0 | 10,633 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrf_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VRF AF.
description:
- Manages VRF AF
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Default, where supported, restores params default value.
options:
vrf:
description:
- Name of the VRF.
required: true
afi:
description:
- Address-Family Identifier (AFI).
required: true
choices: ['ipv4', 'ipv6']
default: null
safi:
description:
- Sub Address-Family Identifier (SAFI).
- Deprecated in 2.4
required: true
choices: ['unicast', 'multicast']
default: null
route_target_both_auto_evpn:
description:
- Enable/Disable the EVPN route-target 'auto' setting for both
import and export target communities.
required: false
choices: ['true', 'false']
default: null
state:
description:
- Determines whether the config should be present or
not on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vrf_af:
vrf: ntc
afi: ipv4
route_target_both_auto_evpn: True
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["vrf context ntc", "address-family ipv4 unicast"]
'''
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig
def main():
argument_spec = dict(
vrf=dict(required=True),
afi=dict(required=True, choices=['ipv4', 'ipv6']),
route_target_both_auto_evpn=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
m_facts=dict(default=False, type='bool', removed_in_version="2.4"),
safi=dict(choices=['unicast', 'multicast'], removed_in_version="2.4"),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
config_text = get_config(module)
config = NetworkConfig(indent=2, contents=config_text)
path = ['vrf context %s' % module.params['vrf'],
'address-family %s unicast' % module.params['afi']]
try:
current = config.get_block_config(path)
except ValueError:
current = None
commands = list()
if current and module.params['state'] == 'absent':
commands.append('no address-family %s unicast' % module.params['afi'])
elif module.params['state'] == 'present':
if current:
have = 'route-target both auto evpn' in current
want = bool(module.params['route_target_both_auto_evpn'])
if want and not have:
commands.append('address-family %s unicast' % module.params['afi'])
commands.append('route-target both auto evpn')
elif have and not want:
commands.append('address-family %s unicast' % module.params['afi'])
commands.append('no route-target both auto evpn')
else:
commands.append('address-family %s unicast' % module.params['afi'])
if module.params['route_target_both_auto_evpn']:
commands.append('route-target both auto evpn')
if commands:
commands.insert(0, 'vrf context %s' % module.params['vrf'])
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
result['commands'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
| cryptobanana/ansible | lib/ansible/modules/network/nxos/nxos_vrf_af.py | Python | gpl-3.0 | 4,746 |
import os
import re
from sphinx.util.compat import Directive
HTML_THEME_PATH = [os.path.abspath(os.path.join(os.path.dirname(__file__),
'..'))]
# Storage for SEO descriptions.
seo_descriptions = {}
class SeoDescription(Directive):
"""
This directive merely saves it's contents to the seo_descriptions dict
under the document name key.
"""
# this enables content in the directive
has_content = True
def run(self):
# Save the last SEO description for a page.
seo_descriptions[self.state.document.settings.env.docname] = ' '.join(self.content)
# Must return a list of nodes.
return []
def tt2nav(toctree, klass=None, appendix=None, divider=False):
"""
Injects ``has-dropdown`` and ``dropdown`` classes to HTML
generated by the :func:`toctree` function.
:param str toctree:
HTML generated by the :func:`toctree` function.
"""
tt = toctree
divider = '<li class="divider"></li>' if divider else ''
# Append anything just before the closing </ul>.
if appendix:
tt = re.sub(r'(</ul>$)', r'{}\1'.format(appendix), tt)
# Add class attribute to all <ul> elements.
tt = re.sub(r'<ul>', r'<ul class="">', tt)
# Add class to first <ul> tag.
if klass:
tt = re.sub(r'(^<ul[\s\w-]+class=")', r'\1{} '.format(klass), tt)
# Add class "active" to all <li> tags with "current" class.
# tt = re.sub(r'(<li[\s\w-]+class="[^"]*current)([^"]*")', r'\1 active\2', tt)
# Match each <li> that contains <ul>.
pattern = r'(<li[\s\w-]+class=")([^>]*>[^<]*<a[^>]*>[^<]*</a>[^<]*<ul[\s\w]+class=")'
# Inject the classes.
replace = r'{}\1has-dropdown \2dropdown '.format(divider)
# Do the replace and return.
return re.sub(pattern, replace, tt)
def hpc(app, pagename, templatename, context, doctree):
# Add the tt2nav() callable to Jinja2 template context.
context['tt2nav'] = tt2nav
context['seo_description'] = seo_descriptions.get(pagename, '')
def setup(app):
# Add directives.
app.add_directive('seo-description', SeoDescription)
# Hook the events.
app.connect('html-page-context', hpc)
| peterhudec/foundation-sphinx-theme | foundation_sphinx_theme/__init__.py | Python | mit | 2,269 |
#!/usr/bin/python
"""
Copyright (C) 2005-2013 MaNGOS <http://getmangos.com/>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import os, sys, threading, time, subprocess
from multiprocessing import cpu_count
from collections import deque
mapList = deque([0,1,530,571,13,25,30,33,34,35,36,37,42,43,44,47,48,70,90,109,129,169,189,209,229,230,249,269,289,309,329,349,369,
389,409,429,449,450,451,469,489,509,529,531,532,533,534,540,542,543,544,545,546,547,548,550,552,553,554,555,556,557,558,559,
560,562,564,565,566,568,572,573,574,575,576,578,580,582,584,585,586,587,588,589,590,591,592,593,594,595,596,597,598,599,600,
601,602,603,604,605,606,607,608,609,610,612,613,614,615,616,617,618,619,620,621,622,623,624,628,631,632,641,642,647,649,650,
658,668,672,673,712,713,718,723,724])
class workerThread(threading.Thread):
def __init__(self, mapID):
threading.Thread.__init__(self)
self.mapID = mapID
def run(self):
name = "Worker for map %u" % (self.mapID)
print "++ %s" % (name)
if sys.platform == 'win32':
stInfo = subprocess.STARTUPINFO()
stInfo.dwFlags |= 0x00000001
stInfo.wShowWindow = 7
cFlags = subprocess.CREATE_NEW_CONSOLE
binName = "MoveMapGen.exe"
else:
stInfo = None
cFlags = 0
binName = "./MoveMapGen"
retcode = subprocess.call([binName, "%u" % (self.mapID),"--silent"], startupinfo=stInfo, creationflags=cFlags)
print "-- %s" % (name)
if __name__ == "__main__":
cpu = cpu_count() - 0 # You can reduce the load by putting 1 instead of 0 if you need to free 1 core/cpu
if cpu < 1:
cpu = 1
print "I will always maintain %u MoveMapGen tasks running in //\n" % (cpu)
while (len(mapList) > 0):
if (threading.active_count() <= cpu):
workerThread(mapList.popleft()).start()
time.sleep(0.1)
| ralph93/crtoe | contrib/mmap/mmap_extract.py | Python | gpl-2.0 | 2,593 |
from twisted.mail.smtp import ESMTPSenderFactory
from twisted.mail.imap4 import IClientAuthentication
from twisted.mail.smtp import ESMTPSender, ESMTPSenderFactory
from zope.interface import implements
class XOAUTH2Authenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "XOAUTH2"
def challengeResponse(self, access_token, chal=None):
return 'user=%s\1auth=Bearer %s\1\1' % (self.user, access_token)
class ESMTP_XOAUTH2_Sender(ESMTPSender):
def _registerAuthenticators(self):
self.registerAuthenticator(XOAUTH2Authenticator(self.username))
class ESMTP_XOUATH2_SenderFactory(ESMTPSenderFactory):
protocol = ESMTP_XOAUTH2_Sender
| pytlakp/intranetref | src/intranet3/utils/smtp.py | Python | mit | 755 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
*********************************************
*
* PlotXmuSpectra
* Plot Xmu spectra
* Files must be in Xmu
* version: 20180206b
*
* By: Nicola Ferralis <[email protected]>
*
***********************************************
'''
print(__doc__)
import numpy as np
import sys, os.path, getopt, glob, csv, re
from datetime import datetime, date
import matplotlib.pyplot as plt
def main():
if len(sys.argv) < 3:
print(' Usage:\n python3 PlotXmuSpectra.py <EnIn> <EnFin> <EnStep>\n')
print(' Requires python 3.x. Not compatible with python 2.x\n')
return
else:
enInit = sys.argv[1]
enFin = sys.argv[2]
enStep = sys.argv[3]
rootPlotFile = "plot_"
dateTimeStamp = str(datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
summaryPlotFile = rootPlotFile+"summary_"+dateTimeStamp+".csv"
plotFile = rootPlotFile+dateTimeStamp
plt.figure(num=plotFile)
with open(summaryPlotFile, "a") as sum_file:
sum_file.write('Classification started: '+dateTimeStamp+"\n")
index = 0
for ind, file in enumerate(sorted(os.listdir("."))):
try:
if os.path.splitext(file)[-1] == ".xmu":
with open(file, 'r') as f:
#M = np.loadtxt(f, unpack = True, skiprows = 40)
M = np.loadtxt(f, unpack = True)
En = M[0]
R = M[1]
print(file + '\n File OK, converting to ASCII...')
EnT = np.arange(float(enInit), float(enFin), float(enStep), dtype=np.float)
if EnT.shape[0] == En.shape[0]:
print(' Number of points in the learning dataset: ' + str(EnT.shape[0]))
else:
print('\033[1m' + ' Mismatch in datapoints: ' + str(EnT.shape[0]) + '; sample = ' + str(En.shape[0]) + '\033[0m')
# Interpolate to new axis
R = np.interp(EnT, En, R, left = R[0], right = 0)
# Renormalize offset by min R
R = R - np.amin(R) + 1e-8
# Renormalize to max of R
R = R/np.amax(R)
index += 1
label = re.search('(.+?)_',file).group(1)
with open(summaryPlotFile, "a") as sum_file:
sum_file.write(str(index) + ',,,' + label + ','+file+'\n')
plt.plot(EnT,R,label=label)
except:
print("\n Skipping: ",file)
plt.xlabel('Energy [eV]')
plt.ylabel('Intensity [arb. units]')
plt.legend(loc='upper left')
plt.savefig(plotFile+".png", dpi = 160, format = 'png') # Save plot
plt.show()
plt.close()
#************************************
''' Main initialization routine '''
#************************************
if __name__ == "__main__":
sys.exit(main())
| feranick/SpectralMachine | Utilities/PlotXmuSpectra.py | Python | gpl-3.0 | 2,922 |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import mock
import psutil
from pants.java.nailgun_executor import NailgunExecutor
from pants_test.test_base import TestBase
PATCH_OPTS = dict(autospec=True, spec_set=True)
def fake_process(**kwargs):
proc = mock.create_autospec(psutil.Process, spec_set=True)
[setattr(getattr(proc, k), 'return_value', v) for k, v in kwargs.items()]
return proc
class NailgunExecutorTest(TestBase):
def setUp(self):
super(NailgunExecutorTest, self).setUp()
self.executor = NailgunExecutor(identity='test',
workdir='/__non_existent_dir',
nailgun_classpath=[],
distribution=mock.Mock(),
metadata_base_dir=self.subprocess_dir)
def test_is_alive_override(self):
with mock.patch.object(NailgunExecutor, '_as_process', **PATCH_OPTS) as mock_as_process:
mock_as_process.return_value = fake_process(
name='java',
pid=3,
status=psutil.STATUS_IDLE,
cmdline=['java', '-arg', NailgunExecutor._PANTS_NG_BUILDROOT_ARG]
)
self.assertTrue(self.executor.is_alive())
mock_as_process.assert_called_with(self.executor)
def test_is_alive_override_not_my_process(self):
with mock.patch.object(NailgunExecutor, '_as_process', **PATCH_OPTS) as mock_as_process:
mock_as_process.return_value = fake_process(
name='java',
pid=3,
status=psutil.STATUS_IDLE,
cmdline=['java', '-arg', '-arg2']
)
self.assertFalse(self.executor.is_alive())
mock_as_process.assert_called_with(self.executor)
| baroquebobcat/pants | tests/python/pants_test/java/test_nailgun_executor.py | Python | apache-2.0 | 1,934 |
#!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2017-2020 Florian Bruhin (The Compiler) <[email protected]>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import os
import pytest
from scripts import importer
_samples = 'tests/unit/scripts/importer_sample'
def qm_expected(input_format):
"""Read expected quickmark-formatted output."""
with open(os.path.join(_samples, input_format, 'quickmarks'),
'r', encoding='utf-8') as f:
return f.read()
def bm_expected(input_format):
"""Read expected bookmark-formatted output."""
with open(os.path.join(_samples, input_format, 'bookmarks'),
'r', encoding='utf-8') as f:
return f.read()
def search_expected(input_format):
"""Read expected search-formatted (config.py) output."""
with open(os.path.join(_samples, input_format, 'config_py'),
'r', encoding='utf-8') as f:
return f.read()
def sample_input(input_format):
"""Get the sample input path."""
return os.path.join(_samples, input_format, 'input')
def test_opensearch_convert():
urls = [
# simple search query
('http://foo.bar/s?q={searchTerms}', 'http://foo.bar/s?q={}'),
# simple search query with supported additional parameter
('http://foo.bar/s?q={searchTerms}&enc={inputEncoding}',
'http://foo.bar/s?q={}&enc=UTF-8'),
# same as above but with supported optional parameter
('http://foo.bar/s?q={searchTerms}&enc={inputEncoding?}',
'http://foo.bar/s?q={}&enc='),
# unsupported-but-optional parameter
('http://foo.bar/s?q={searchTerms}&opt={unsupported?}',
'http://foo.bar/s?q={}&opt='),
# unsupported-but-optional subset parameter
('http://foo.bar/s?q={searchTerms}&opt={unsupported:unsupported?}',
'http://foo.bar/s?q={}&opt=')
]
for os_url, qb_url in urls:
assert importer.opensearch_convert(os_url) == qb_url
def test_opensearch_convert_unsupported():
"""pass an unsupported, required parameter."""
with pytest.raises(KeyError):
os_url = 'http://foo.bar/s?q={searchTerms}&req={unsupported}'
importer.opensearch_convert(os_url)
def test_chrome_bookmarks(capsys):
"""Read sample bookmarks from chrome profile."""
importer.import_chrome(sample_input('chrome'), ['bookmark'], 'bookmark')
imported = capsys.readouterr()[0]
assert imported == bm_expected('chrome')
def test_chrome_quickmarks(capsys):
"""Read sample bookmarks from chrome profile."""
importer.import_chrome(sample_input('chrome'), ['bookmark'], 'quickmark')
imported = capsys.readouterr()[0]
assert imported == qm_expected('chrome')
def test_chrome_searches(capsys):
"""Read sample searches from chrome profile."""
importer.import_chrome(sample_input('chrome'), ['search'], 'search')
imported = capsys.readouterr()[0]
assert imported == search_expected('chrome')
def test_netscape_bookmarks(capsys):
importer.import_netscape_bookmarks(
sample_input('netscape'), ['bookmark', 'keyword'], 'bookmark')
imported = capsys.readouterr()[0]
assert imported == bm_expected('netscape')
def test_netscape_quickmarks(capsys):
importer.import_netscape_bookmarks(
sample_input('netscape'), ['bookmark', 'keyword'], 'quickmark')
imported = capsys.readouterr()[0]
assert imported == qm_expected('netscape')
def test_netscape_searches(capsys):
importer.import_netscape_bookmarks(
sample_input('netscape'), ['search'], 'search')
imported = capsys.readouterr()[0]
assert imported == search_expected('netscape')
def test_mozilla_bookmarks(capsys):
importer.import_moz_places(
sample_input('mozilla'), ['bookmark', 'keyword'], 'bookmark')
imported = capsys.readouterr()[0]
assert imported == bm_expected('mozilla')
def test_mozilla_quickmarks(capsys):
importer.import_moz_places(
sample_input('mozilla'), ['bookmark', 'keyword'], 'quickmark')
imported = capsys.readouterr()[0]
assert imported == qm_expected('mozilla')
def test_mozilla_searches(capsys):
importer.import_moz_places(sample_input('mozilla'), ['search'], 'search')
imported = capsys.readouterr()[0]
assert imported == search_expected('mozilla')
| t-wissmann/qutebrowser | tests/unit/scripts/test_importer.py | Python | gpl-3.0 | 4,963 |
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from .infoview import InfoBoxDialog
import json
__author__ = "C. Wilhelm"
___license___ = "GPL v3"
class IntuitiveTreeView(QTreeView):
"""Variant of QTreeView with setStretchFirstSection() method"""
def __init__(self, parent=None):
QTreeView.__init__(self, parent)
self.hv = self.header()
self.hv.installEventFilter(self)
self.hv.sectionResized.connect(self.sectionWasResized)
self.setStretchLastSection = self.hv.setStretchLastSection
self.setStretchLastSection(False)
self._stretch_first = False
def eventFilter(self, obj, event):
# http://qt-project.org/doc/qt-5.0/qtcore/qobject.html#installEventFilter
if self._stretch_first and obj == self.hv and event.type() == QEvent.Resize:
self.stretchSection()
return True
return QObject.eventFilter(self, obj, event)
def sectionWasResized(self, logicalIndex, oldSize, newSize):
if not self._stretch_first or 0 in (oldSize, newSize):
return
v_this = self.hv.visualIndex(logicalIndex)
l_next = self.hv.logicalIndex(v_this + 1)
self.stretchSection(l_next)
def stretchSection(self, logicalIndex=None):
if logicalIndex is None:
logicalIndex = self.hv.logicalIndexAt(0)
unused_width = self.hv.size().width() - self.hv.length()
new_width = self.hv.sectionSize(logicalIndex) + unused_width
if new_width > self.hv.minimumSectionSize():
oldState = self.hv.blockSignals(True)
self.hv.resizeSection(logicalIndex, new_width)
self.hv.blockSignals(oldState)
def setStretchFirstSection(self, stretch_first=True):
# quasi-counterpart for setStretchLastSection() (which is actually part of QHeaderView)
# subclassing QHeaderView is no option here, as the QTreeView defaults would be lost!
self._stretch_first = stretch_first
def setNoStretch(self):
self.setStretchFirstSection(False)
self.setStretchLastSection(False)
class QueueTreeView(IntuitiveTreeView):
_ignored_columns = [] # columns that would break the table layout, e.g. multiline descriptions, thumbnails
_visible_columns = [] # columns that weren't deselected by the user or by default NOTE: order is relevant!
_always_visible_column = 'Filename'
def __init__(self, main_window, qsettings_object, model):
IntuitiveTreeView.__init__(self, main_window)
self.setModel(model)
self.settings = qsettings_object
self.loadSettings()
main_window.aboutToQuit.connect(self.writeSettings)
# Configure Header
self.hv.setContextMenuPolicy(Qt.CustomContextMenu)
self.hv.customContextMenuRequested.connect(self.chooseColumns)
self.hv.setMaximumHeight(21)
self.setStretchFirstSection(True)
# Setup Context Menu
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showContextMenu)
self.infobox = InfoBoxDialog(self, self.model())
# Other basic configuration
# self.setAlternatingRowColors(True) # Somehow doesn't work too well when Delegates are used
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setDragDropMode(QAbstractItemView.DragDrop)
self.setDropIndicatorShown(True)
def loadSettings(self):
self.settings.beginGroup(self.__class__.__name__)
visible_columns = self.settings.value("VisibleColumns", self._visible_columns)
if not isinstance(visible_columns, list):
visible_columns = json.loads(visible_columns)
self.initColumns(visible_columns)
self.settings.endGroup()
def writeSettings(self):
self.settings.beginGroup(self.__class__.__name__)
visible_columns = [None] * len(self.model()._columns) # intialize to avoid Index Errors
for i, column_title in enumerate(self.model()._columns):
if not self.isColumnHidden(i):
j = self.hv.visualIndex(i)
visible_columns[j] = column_title
visible_columns = list(filter(None, visible_columns)) # remove None Values
self.settings.setValue("VisibleColumns", json.dumps(visible_columns))
self.settings.endGroup()
def initColumns(self, visible_columns=[]):
self.hv.blockSignals(True)
self.columnMenu = QMenu()
for i, column_title in enumerate(self.model()._columns):
if column_title in self._ignored_columns:
self.setColumnHidden(i, True)
continue
qa = QAction(column_title, self, checkable=True, triggered=self.toggleColumn)
if column_title in visible_columns:
self.setColumnHidden(i, False)
qa.setChecked(True)
else:
self.setColumnHidden(i, True)
qa.setChecked(False)
if column_title != self._always_visible_column:
self.columnMenu.addAction(qa)
self.columnMenu.addSeparator()
self.columnMenu.addAction(QAction("Reset Defaults", self, triggered=self.resetColumnDefaults))
for i, column_title in enumerate(visible_columns):
j = self.model()._columns.index(column_title)
k = self.hv.logicalIndex(j)
self.hv.swapSections(k, i)
self.hv.blockSignals(False)
def resetColumnDefaults(self):
self.hv.blockSignals(True)
for i, column_title in enumerate(self.model()._columns):
k = self.hv.logicalIndex(i)
self.hv.resizeSection(k, self.hv.defaultSectionSize())
self.hv.swapSections(k, i)
self.hv.blockSignals(False)
self.initColumns(self._visible_columns)
self.stretchSection()
def toggleColumn(self, column_title=None):
if column_title is None:
column_title = self.sender().text()
i = self.model()._columns.index(column_title)
self.setColumnHidden(i, not self.isColumnHidden(i))
self.stretchSection()
def chooseColumns(self, pos):
globalPos = self.mapToGlobal(pos)
self.columnMenu.exec_(globalPos)
def showInfo(self):
self.infobox.open_for_selection(self.selectedIndexes()[0])
def removeSelected(self):
self.model().removeScatteredRows(self.selectionModel().selectedRows())
def removeAll(self):
self.model().removeAll()
def showContextMenu(self, pos):
raise NotImplementedError
| valmynd/MediaFetcher | src/views/viewbase.py | Python | gpl-3.0 | 5,882 |
from Classes.Meta import Meta
import re
class Parser:
email_regex = re.compile(Meta.REGEX_EMAIL)
mobile_phone_regex = re.compile(Meta.REGEX_MOBILE_PHONE)
home_phone_regex = re.compile(Meta.REGEX_PHONE_HOME)
lat_regex = re.compile(Meta.REGEX_LATITUDE)
long_regex = re.compile(Meta.REGEX_LONGITUDE)
file = None
def __init__(self, file):
self.file = file
| nvangijzen/android_data_parser | Classes/Parser/Parser.py | Python | mit | 394 |
from django.contrib.auth import get_user_model
from django.contrib.auth import views as auth_views
from django.core.urlresolvers import resolve
from django.test import Client, TestCase
User = get_user_model()
class TestLoggedUser(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user('[email protected]', 'secret')
self.user.save()
self.client.login(username='test_user', password='secret')
def tearDown(self):
self.user.delete()
def test_new_view_test_addflow(self):
response = resolve('/')
self.assertEqual(response.func.func_name, auth_views.login.func_name)
# def test_renders_template_addflow(self):
# url = reverse('rep-add-flow')
# response = self.request_factory.post(url)
# self.assertTemplateUsed(response, '/pariwana/recepcion/addflow')
#
# def test_logged_user_get_homepage(self):
# print(self.user)
# response = self.client.get(reverse('/'), follow=True)
# self.assertEqual(response.status_code, 200)
# def test_logged_user_get_settings(self):
# response = self.client.get(reverse('/settings/'), follow=True)
# self.assertEqual(response.status_code, 200)
| jonaqp/heroku | core/tests/tests_views.py | Python | mit | 1,254 |
# Tests need to be a package otherwise ipyparallel will not find them in the package,
# when trying to import the tests in the subprocesses.
# Therefore, LEAVE THIS FILE HERE
| giacomov/3ML | threeML/test/__init__.py | Python | bsd-3-clause | 175 |
from apps.analyzer.models import Category, FeatureCategory
from django.db.models.aggregates import Sum
import math
class Classifier:
def __init__(self, user, feed, phrases):
self.user = user
self.feed = feed
self.phrases = phrases
def get_features(self, doc):
found = {}
for phrase in self.phrases:
if phrase in doc:
if phrase in found:
found[phrase] += 1
else:
found[phrase] = 1
return found
def increment_feature(self, feature, category):
count = self.feature_count(feature,category)
if count==0:
fc = FeatureCategory(user=self.user, feed=self.feed, feature=feature, category=category, count=1)
fc.save()
else:
fc = FeatureCategory.objects.get(user=self.user, feed=self.feed, feature=feature, category=category)
fc.count = count + 1
fc.save()
def feature_count(self, feature, category):
if isinstance(category, Category):
category = category.category
try:
feature_count = FeatureCategory.objects.get(user=self.user, feed=self.feed, feature=feature, category=category)
except FeatureCategory.DoesNotExist:
return 0
else:
return float(feature_count.count)
def increment_category(self,category):
count = self.category_count(category)
if count==0:
category = Category(user=self.user, feed=self.feed, category=category, count=1)
category.save()
else:
category = Category.objects.get(user=self.user, feed=self.feed, category=category)
category.count = count+1
category.save()
def category_count(self, category):
if not isinstance(category, Category):
try:
category_count = Category.objects.get(user=self.user, feed=self.feed, category=category)
except Category.DoesNotExist:
return 0
else:
category_count = category
return float(category_count.count)
def categories(self):
categories = Category.objects.all()
return categories
def totalcount(self):
categories = Category.objects.filter(user=self.user, feed=self.feed).aggregate(sum=Sum('count'))
return categories['sum']
def train(self, item, category):
features = self.get_features(item)
# Increment the count for every feature with this category
for feature in features:
self.increment_feature(feature, category)
# Increment the count for this category
self.increment_category(category)
def feature_probability(self, feature, category):
if self.category_count(category) == 0:
return 0
# The total number of times this feature appeared in this
# category divided by the total number of items in this category
return self.feature_count(feature, category) / self.category_count(category)
def weighted_probability(self, feature, category, prf, weight=1.0, ap=0.5):
# Calculate current probability
basic_prob = prf(feature, category)
# Count the number of times this feature has appeared in all categories
totals = sum([self.feature_count(feature, c) for c in self.categories()])
# Calculate the weighted average
bp = ((weight*ap) + (totals*basic_prob)) / (weight+totals)
print feature, category, basic_prob, totals, bp
return bp
class FisherClassifier(Classifier):
def __init__(self, user, feed, phrases):
Classifier.__init__(self, user, feed, phrases)
self.minimums = {}
def category_probability(self, feature, category):
# The frequency of this feature in this category
clf = self.feature_probability(feature, category)
if clf==0:
return 0
# The frequency of this feature in all the categories
freqsum = sum([self.feature_probability(feature, category) for c in self.categories()])
# The probability is the frequency in this category divided by
# the overall frequency
p = clf / freqsum
return p
def fisher_probability(self, item, category):
# Multiply all the probabilities together
p = .5
features = self.get_features(item)
if features:
p = 1
for feature in features:
p *= (self.weighted_probability(feature, category, self.category_probability))
# Take the natural log and multiply by -2
fscore = -2*math.log(p)
# Use the inverse chi2 function to get a probability
return self.invchi2(fscore,len(features)*2)
def invchi2(self, chi, df):
m = chi / 2.0
sum = term = math.exp(-m)
for i in range(1, df//2):
term *= m / i
sum += term
return min(sum, 1.0)
def setminimum(self, category, min):
self.minimums[category] = min
def getminimum(self, category):
if category not in self.minimums:
return 0
return self.minimums[category]
def classify(self,item,default=None):
# Loop through looking for the best result
best = default
max = 0.0
print self.categories(), item
for category in self.categories():
p=self.fisher_probability(item, category)
# Make sure it exceeds its minimum
if p > self.getminimum(category) and p > max:
best = category
max = p
return best | AlphaCluster/NewsBlur | apps/analyzer/classifier.py | Python | mit | 5,802 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.