code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, "../")
import logging
import snakemq
import snakemq.link
import snakemq.packeter
import snakemq.messaging
import snakemq.message
def on_recv(conn, ident, message):
print("received from", conn, ident, message)
def on_drop(ident, message):
print("message dropped", ident, message)
snakemq.init_logging()
logger = logging.getLogger("snakemq")
logger.setLevel(logging.DEBUG)
ssl_cfg = snakemq.link.SSLConfig("../tests/unittests/testkey.pem",
"../tests/unittests/testcert.pem")
s = snakemq.link.Link()
s.add_listener(("", 4000), ssl_config=ssl_cfg)
pktr = snakemq.packeter.Packeter(s)
m = snakemq.messaging.Messaging("xlistener", "", pktr)
m.on_message_recv.add(on_recv)
m.on_message_drop.add(on_drop)
msg = snakemq.message.Message(b"hello", ttl=60)
m.send_message("xconnector", msg)
s.loop()
| dsiroky/snakemq | examples/messaging_listener.py | Python | mit | 915 |
# -*- coding: utf-8 -*-
from flask_assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"libs/bootstrap-colorpicker/dist/css/bootstrap-colorpicker.css",
"css/style.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.min.js",
"libs/bootstrap-colorpicker/dist/js/bootstrap-colorpicker.js",
"js/plugins.js",
"js/script.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
| Jaza/colorsearchtest | colorsearchtest/assets.py | Python | apache-2.0 | 624 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
import tempfile
from glance.openstack.common import excutils
from glance.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload and filename in _FILE_CACHE:
del _FILE_CACHE[filename]
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return open(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path
| redhat-openstack/glance | glance/openstack/common/fileutils.py | Python | apache-2.0 | 3,853 |
"""distutils.errors
Provides exceptions used by the Distutils modules. Note that Distutils
modules may raise standard exceptions; in particular, SystemExit is
usually raised for errors that are obviously the end-user's fault
(eg. bad command-line arguments).
This module is safe to use in "from ... import *" mode; it only exports
symbols whose names start with "Distutils" and end with "Error"."""
__revision__ = "$Id$"
class DistutilsError (Exception):
"""The root of all Distutils evil."""
pass
class DistutilsModuleError (DistutilsError):
"""Unable to load an expected module, or to find an expected class
within some module (in particular, command modules and classes)."""
pass
class DistutilsClassError (DistutilsError):
"""Some command class (or possibly distribution class, if anyone
feels a need to subclass Distribution) is found not to be holding
up its end of the bargain, ie. implementing some part of the
"command "interface."""
pass
class DistutilsGetoptError (DistutilsError):
"""The option table provided to 'fancy_getopt()' is bogus."""
pass
class DistutilsArgError (DistutilsError):
"""Raised by fancy_getopt in response to getopt.error -- ie. an
error in the command line usage."""
pass
class DistutilsFileError (DistutilsError):
"""Any problems in the filesystem: expected file not found, etc.
Typically this is for problems that we detect before IOError or
OSError could be raised."""
pass
class DistutilsOptionError (DistutilsError):
"""Syntactic/semantic errors in command options, such as use of
mutually conflicting options, or inconsistent options,
badly-spelled values, etc. No distinction is made between option
values originating in the setup script, the command line, config
files, or what-have-you -- but if we *know* something originated in
the setup script, we'll raise DistutilsSetupError instead."""
pass
class DistutilsSetupError (DistutilsError):
"""For errors that can be definitely blamed on the setup script,
such as invalid keyword arguments to 'setup()'."""
pass
class DistutilsPlatformError (DistutilsError):
"""We don't know how to do something on the current platform (but
we do know how to do it on some platform) -- eg. trying to compile
C files on a platform not supported by a CCompiler subclass."""
pass
class DistutilsExecError (DistutilsError):
"""Any problems executing an external program (such as the C
compiler, when compiling C files)."""
pass
class DistutilsInternalError (DistutilsError):
"""Internal inconsistencies or impossibilities (obviously, this
should never be seen if the code is working!)."""
pass
class DistutilsTemplateError (DistutilsError):
"""Syntax error in a file list template."""
class DistutilsByteCompileError(DistutilsError):
"""Byte compile error."""
# Exception classes used by the CCompiler implementation classes
class CCompilerError (Exception):
"""Some compile/link operation failed."""
class PreprocessError (CCompilerError):
"""Failure to preprocess one or more C/C++ files."""
class CompileError (CCompilerError):
"""Failure to compile one or more C/C++ source files."""
class LibError (CCompilerError):
"""Failure to create a static library from one or more C/C++ object
files."""
class LinkError (CCompilerError):
"""Failure to link one or more C/C++ object files into an executable
or shared library file."""
class UnknownFileError (CCompilerError):
"""Attempt to process an unknown file type."""
| edmundgentle/schoolscript | SchoolScript/bin/Debug/pythonlib/Lib/distutils/errors.py | Python | gpl-2.0 | 3,710 |
from unittest import skipIf
from django.test import TestCase
from django.test.utils import override_settings
from django.db import connection
from django.db.migrations.loader import MigrationLoader, AmbiguityError
from django.db.migrations.recorder import MigrationRecorder
from django.utils import six
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
recorder.applied_migrations(),
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
recorder.applied_migrations(),
set([("myapp", "0432_ponies")]),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
recorder.applied_migrations(),
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new AppCache.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[("migrations", "0001_initial"), ("migrations", "0002_second")],
)
# Now render it out!
project_state = migration_loader.graph.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
[x for x, y in author_state.fields],
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "author"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
with self.assertRaises(AmbiguityError):
migration_loader.get_migration_by_prefix("migrations", "0")
with self.assertRaises(KeyError):
migration_loader.get_migration_by_prefix("migrations", "blarg")
def test_load_import_error(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.import_error"}):
with self.assertRaises(ImportError):
MigrationLoader(connection)
def test_load_module_file(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}):
MigrationLoader(connection)
@skipIf(six.PY2, "PY2 doesn't load empty dirs.")
def test_load_empty_dir(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}):
MigrationLoader(connection)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_loading_squashed(self):
"Tests loading a squashed migration"
migration_loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
# Loading with nothing applied should just give us the one node
self.assertEqual(
len(migration_loader.graph.nodes),
1,
)
# However, fake-apply one migration and it should now use the old two
recorder.record_applied("migrations", "0001_initial")
migration_loader.build_graph()
self.assertEqual(
len(migration_loader.graph.nodes),
2,
)
recorder.flush()
| rogerhu/django | tests/migrations/test_loader.py | Python | bsd-3-clause | 4,238 |
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os import path
import json
from mock import MagicMock, call
from ansible.compat.tests import unittest
from ansible.plugins.cliconf import slxos
FIXTURE_DIR = b'%s/fixtures/slxos' % (
path.dirname(path.abspath(__file__)).encode('utf-8')
)
def _connection_side_effect(*args, **kwargs):
try:
if args:
value = args[0]
else:
value = kwargs.get('command')
fixture_path = path.abspath(
b'%s/%s' % (FIXTURE_DIR, b'_'.join(value.split(b' ')))
)
with open(fixture_path, 'rb') as file_desc:
return file_desc.read()
except (OSError, IOError):
if args:
value = args[0]
return value
elif kwargs.get('command'):
value = kwargs.get('command')
return value
return 'Nope'
class TestPluginCLIConfSLXOS(unittest.TestCase):
""" Test class for SLX-OS CLI Conf Methods
"""
def setUp(self):
self._mock_connection = MagicMock()
self._mock_connection.send.side_effect = _connection_side_effect
self._cliconf = slxos.Cliconf(self._mock_connection)
def tearDown(self):
pass
def test_get_device_info(self):
""" Test get_device_info
"""
device_info = self._cliconf.get_device_info()
mock_device_info = {
'network_os': 'slxos',
'network_os_model': 'BR-SLX9140',
'network_os_version': '17s.1.02',
}
self.assertEqual(device_info, mock_device_info)
def test_get_config(self):
""" Test get_config
"""
running_config = self._cliconf.get_config()
fixture_path = path.abspath(b'%s/show_running-config' % FIXTURE_DIR)
with open(fixture_path, 'rb') as file_desc:
mock_running_config = file_desc.read()
self.assertEqual(running_config, mock_running_config)
startup_config = self._cliconf.get_config()
fixture_path = path.abspath(b'%s/show_running-config' % FIXTURE_DIR)
with open(fixture_path, 'rb') as file_desc:
mock_startup_config = file_desc.read()
self.assertEqual(startup_config, mock_startup_config)
def test_edit_config(self):
""" Test edit_config
"""
test_config_command = b'this\nis\nthe\nsong\nthat\nnever\nends'
self._cliconf.edit_config(test_config_command)
send_calls = []
for command in [b'configure terminal', test_config_command, b'end']:
send_calls.append(call(
command=command,
prompt_retry_check=False,
sendonly=False,
newline=True
))
self._mock_connection.send.assert_has_calls(send_calls)
def test_get_capabilities(self):
""" Test get_capabilities
"""
capabilities = json.loads(self._cliconf.get_capabilities())
mock_capabilities = {
'network_api': 'cliconf',
'rpc': [
'get_config',
'edit_config',
'get_capabilities',
'get'
],
'device_info': {
'network_os_model': 'BR-SLX9140',
'network_os_version': '17s.1.02',
'network_os': 'slxos'
}
}
self.assertEqual(
mock_capabilities,
capabilities
)
| mheap/ansible | test/units/plugins/cliconf/test_slxos.py | Python | gpl-3.0 | 4,208 |
import random, sys, json
class Quotes:
def __init__(self, owner, logger, header):
self.owner = owner
self.quote_list = []
self.logger = logger
self.header = header
# the load_quotes function takes two arguments
# The first argument is the file path of the file to load
# The second arugment is the list in which you wish to store the lines
# This fucntion will read in the file line by line
# and then store each line in the list as a quote.
# So the quotes should be seperated by a newline
def load(self, quote_file):
try:
with open(quote_file) as quote_json:
quotes = json.load(quote_json)
self.quote_list = quotes.get("all", [])
except IOError:
self.logger.fail("Failed to open quotes file, closing program.")
sys.exit(1)
# Generates and returns the quote string used in the process queue function
def generate(self):
quote = "##" + self.header + ":##\n\n >"
randomNum = random.randint(0,len(self.quote_list)-1)
raw_quote = self.quote_list[randomNum]
quote += raw_quote.get("quote", None) + "\n"
quote += '#####' + raw_quote.get("person", None) + '#####'
quote += "\n\n*****"
quote += "\n\n^^***If*** ^^***you*** ^^***would*** ^^***like*** ^^***to*** "
quote += "^^***add*** ^^***a*** ^^***quote***, ^^***please*** ^^***contact*** "
quote += "^^***my*** ^^***owner:*** ^^/u/" + self.owner
return quote | joeYeager/reddit-quote-bot | src/quote.py | Python | mit | 1,546 |
import os
import os.path
import time
import logging
import importlib
import multiprocessing
from configparser import SafeConfigParser, NoOptionError
from wikked.db.base import DatabaseUpgradeRequired
from wikked.endpoint import create_endpoint_infos
from wikked.fs import FileSystem
from wikked.auth import UserManager
from wikked.scheduler import ResolveScheduler
logger = logging.getLogger(__name__)
def passthrough_formatter(text):
""" Passthrough formatter. Pretty simple stuff. """
return text
class InitializationError(Exception):
""" An exception that can get raised while the wiki gets
initialized.
"""
pass
NORMAL_CONTEXT = 0
INIT_CONTEXT = 1
BACKGROUND_CONTEXT = 2
def synchronous_wiki_updater(wiki, url):
logger.debug("Synchronous wiki update: update all pages because %s was "
"edited." % url)
wiki.updateAll()
class WikiParameters(object):
""" An object that defines how a wiki gets initialized.
"""
def __init__(self, root=None, ctx=NORMAL_CONTEXT):
if root is None:
root = os.getcwd()
self.root = root
self.context = ctx
self.custom_heads = {}
self.wiki_updater = synchronous_wiki_updater
self._config = None
self._index_factory = None
self._scm_factory = None
self._formatters = None
@property
def config(self):
if self._config is None:
self._config = self._loadConfig()
return self._config
def fs_factory(self):
return FileSystem(self.root, self.config)
def index_factory(self):
self._ensureIndexFactory()
return self._index_factory()
def db_factory(self):
from wikked.db.sql import SQLDatabase
return SQLDatabase(self.config)
def scm_factory(self):
self._ensureScmFactory()
return self._scm_factory()
def auth_factory(self):
return UserManager(self.config)
@property
def formatters(self):
if self._formatters is None:
self._formatters = {}
self.formatters[passthrough_formatter] = ['txt', 'html']
self.tryAddMarkdownFormatter()
self.tryAddFormatter('textile', 'textile',
['tl', 'text', 'textile'])
self.tryAddFormatter('creole', 'creole2html',
['cr', 'creole'])
self.tryAddFountainFormatter()
return self._formatters
def getSpecialFilenames(self):
yield '.wikirc'
yield '.wiki'
yield '_files'
if self.config.has_section('ignore'):
for name, val in self.config.items('ignore'):
yield val
def tryAddFormatter(self, module_name, module_func, extensions):
try:
module = importlib.import_module(module_name)
func = getattr(module, module_func)
self._formatters[func] = extensions
except ImportError:
pass
def tryAddMarkdownFormatter(self,):
try:
import markdown
except ImportError:
return
from markdown.util import etree
class HeaderAnchorsTreeprocessor(
markdown.treeprocessors.Treeprocessor):
HEADER_TAGS = {'h1', 'h2', 'h3', 'h4', 'h5', 'h6'}
def run(self, root):
hd_tags = self.HEADER_TAGS
for elem in root.iter():
if elem.tag in hd_tags:
hd_id = elem.text.lower().replace(' ', '-')
hd_id = elem.attrib.setdefault('id', hd_id)
elem.append(etree.Element(
'a',
{'class': 'wiki-header-link',
'href': '#%s' % hd_id}))
class HeaderAnchorsExtension(markdown.extensions.Extension):
def extendMarkdown(self, md, *args, **kwargs):
md.treeprocessors.register(
HeaderAnchorsTreeprocessor(md),
'header_anchors',
100)
class _MarkdownWrapper:
def __init__(self, md):
self._md = md
def __call__(self, text):
self._md.reset()
return self._md.convert(text)
exts = self.config.get('markdown', 'extensions').split(',')
exts.append(HeaderAnchorsExtension())
md = markdown.Markdown(extensions=exts)
md_wrapper = _MarkdownWrapper(md)
self._formatters[md_wrapper] = ['md', 'mdown', 'markdown']
def tryAddFountainFormatter(self):
try:
from jouvence.parser import JouvenceParser
from jouvence.html import HtmlDocumentRenderer, get_css
except ImportError:
return
import io
def _jouvence_to_html(text):
parser = JouvenceParser()
document = parser.parseString(text)
rdr = HtmlDocumentRenderer(standalone=False)
with io.StringIO() as fp:
rdr.render_doc(document, fp)
return fp.getvalue()
self._formatters[_jouvence_to_html] = ['fountain']
head_css = ('<link rel="stylesheet" type="text/css" '
'href="/static/css/jouvence.css" />\n')
head_css += '<style>%s</style>' % get_css()
self.custom_heads = {'fountain': head_css}
def _loadConfig(self):
# Merge the default settings with any settings provided by
# the local config file(s).
config_path = os.path.join(self.root, '.wikirc')
local_config_path = os.path.join(self.root, '.wiki', 'wikirc')
default_config_path = os.path.join(
os.path.dirname(__file__), 'resources', 'defaults.cfg')
config = SafeConfigParser()
config.readfp(open(default_config_path))
config.set('wiki', 'root', self.root)
config.read([config_path, local_config_path])
return config
def _ensureIndexFactory(self):
if self._index_factory is None:
index_type = self.config.get('wiki', 'indexer')
if index_type == 'whoosh':
def impl():
from wikked.indexer.whooshidx import WhooshWikiIndex
return WhooshWikiIndex()
self._index_factory = impl
elif index_type == 'elastic':
def impl():
from wikked.indexer.elastic import ElasticWikiIndex
return ElasticWikiIndex()
self._index_factory = impl
else:
raise InitializationError("No such indexer: " + index_type)
def _ensureScmFactory(self):
if self._scm_factory is None:
try:
scm_type = self.config.get('wiki', 'sourcecontrol')
except NoOptionError:
# Auto-detect
if os.path.isdir(os.path.join(self.root, '.hg')):
scm_type = 'hg'
elif os.path.isdir(os.path.join(self.root, '.git')):
scm_type = 'git'
else:
# Default to Mercurial. Yes. I just decided that myself.
scm_type = 'hg'
if self.context == INIT_CONTEXT and scm_type == 'hg':
# Quick workaround for when we're creating a new repo,
# or running background tasks.
# We'll be using the `hg` process instead of the command
# server, since there's no repo there yet, or we just don't
# want to spawn a new process unless we want to.
logger.debug("Forcing `hgexe` source-control for new repo.")
scm_type = 'hgexe'
if scm_type == 'hg':
def impl():
from wikked.scm.mercurial import \
MercurialCommandServerSourceControl
return MercurialCommandServerSourceControl(self.root)
self._scm_factory = impl
elif scm_type == 'hgexe':
def impl():
from wikked.scm.mercurial import MercurialSourceControl
return MercurialSourceControl(self.root)
self._scm_factory = impl
elif scm_type == 'git':
def impl():
from wikked.scm.git import GitLibSourceControl
return GitLibSourceControl(self.root)
self._scm_factory = impl
else:
raise InitializationError(
"No such source control: " + scm_type)
class Wiki(object):
""" The wiki class! This is where the magic happens.
"""
def __init__(self, parameters):
""" Creates a new wiki instance. It won't be fully functional
until you call `start`, which does the actual initialization.
This gives you a chance to customize a few more things before
getting started.
"""
if parameters is None:
raise ValueError("No parameters were given to the wiki.")
self.formatters = parameters.formatters
self.custom_heads = parameters.custom_heads
self.special_filenames = parameters.getSpecialFilenames()
self.main_page_url = (
'/' +
parameters.config.get('wiki', 'main_page').strip('/'))
self.templates_url = (
parameters.config.get('wiki', 'templates_endpoint') +
':/')
self.endpoints = create_endpoint_infos(parameters.config)
self.fs = parameters.fs_factory()
self.index = parameters.index_factory()
self.db = parameters.db_factory()
self.scm = parameters.scm_factory()
self.auth = parameters.auth_factory()
self._wiki_updater = parameters.wiki_updater
self.post_update_hooks = []
@property
def root(self):
return self.fs.root
def start(self, update=False):
""" Properly initializes the wiki and all its sub-systems.
"""
order = [self.fs, self.scm, self.index, self.db, self.auth]
for o in order:
o.start(self)
if update:
self.updateAll()
def init(self):
""" Creates a new wiki at the specified root directory.
"""
order = [self.fs, self.scm, self.index, self.db, self.auth]
for o in order:
o.init(self)
self.start()
for o in order:
o.postInit()
def stop(self, exception=None):
""" De-initializes the wiki and its sub-systems.
"""
self.db.close(exception)
def reset(self, parallel=False):
""" Clears all the cached data and rebuilds it from scratch.
"""
logger.info("Resetting wiki data...")
page_infos = self.fs.getPageInfos()
self.db.reset(page_infos)
self.resolve(force=True, parallel=parallel)
self.index.reset(self.getPages())
def resolve(self, only_urls=None, force=False, parallel=False):
""" Compute the final info (text, meta, links) of all or a subset of
the pages, and caches it in the DB.
"""
logger.debug("Resolving pages...")
if only_urls:
page_urls = only_urls
else:
page_urls = self.db.getPageUrls(uncached_only=(not force))
num_workers = multiprocessing.cpu_count() if parallel else 1
s = ResolveScheduler(self, page_urls)
s.run(num_workers)
def updatePage(self, url=None, path=None):
""" Completely updates a single page, i.e. read it from the file-system
and have it fully resolved and cached in the DB.
"""
if url and path:
raise Exception("Can't specify both an URL and a path.")
logger.info("Updating page: %s" % (url or path))
if path:
page_info = self.fs.getPageInfo(path)
else:
page_info = self.fs.findPageInfo(url)
self.db.updatePage(page_info)
self.resolve(only_urls=[page_info.url])
self.index.updatePage(self.db.getPage(
page_info.url,
fields=['url', 'path', 'title', 'text']))
def updateAll(self, parallel=False, reset_on_db_upgrade_required=True):
""" Completely updates all pages, i.e. read them from the file-system
and have them fully resolved and cached in the DB.
This function will check for timestamps to only update pages that
need it.
"""
logger.info("Updating all pages...")
try:
page_infos = self.fs.getPageInfos()
self.db.updateAll(page_infos)
self.resolve(parallel=parallel)
self.index.updateAll(self.db.getPages(
fields=['url', 'path', 'title', 'text']))
except DatabaseUpgradeRequired:
logger.info("Database upgrade required... running full reset.")
self.reset(parallel=parallel)
def getPageUrls(self, subdir=None):
""" Returns all the page URLs in the wiki, or in the given
sub-directory.
"""
for url in self.db.getPageUrls(subdir):
yield url
def getPages(self, subdir=None, meta_query=None,
endpoint_only=None, no_endpoint_only=False, fields=None):
""" Gets all the pages in the wiki, or in the given sub-directory.
"""
for page in self.db.getPages(
subdir=subdir,
meta_query=meta_query,
endpoint_only=endpoint_only,
no_endpoint_only=no_endpoint_only,
fields=fields):
yield page
def getPage(self, url, fields=None):
""" Gets the page for a given URL.
"""
return self.db.getPage(url, fields=fields)
def setPage(self, url, page_fields):
""" Updates or creates a page for a given URL.
"""
# Validate the parameters.
if 'text' not in page_fields:
raise ValueError(
"No text specified for editing page '%s'." % url)
if 'author' not in page_fields:
raise ValueError(
"No author specified for editing page '%s'." % url)
if 'message' not in page_fields:
raise ValueError(
"No commit message specified for editing page '%s'." % url)
# Save the new/modified text.
page_info = self.fs.setPage(url, page_fields['text'])
# Commit the file to the source-control.
commit_meta = {
'author': page_fields['author'],
'message': page_fields['message']}
self.scm.commit([page_info.path], commit_meta)
# Update the DB and index with the new/modified page.
self.updatePage(path=page_info.path)
# Invalidate all page lists.
self.db.removeAllPageLists()
# Update all the other pages.
self._wiki_updater(self, url)
for hook in self.post_update_hooks:
hook(self, url)
def revertPage(self, url, page_fields):
""" Reverts the page with the given URL to an older revision.
"""
# Validate the parameters.
if 'rev' not in page_fields:
raise ValueError(
"No revision specified for reverting page '%s'." % url)
if 'author' not in page_fields:
raise ValueError(
"No author specified for reverting page '%s'." % url)
if 'message' not in page_fields:
raise ValueError(
"No commit message specified for reverting page '%s'." % url)
# Get the revision.
path = self.fs.getPhysicalPagePath(url)
rev_text = self.scm.getRevision(path, page_fields['rev'])
# Write to the file and commit.
self.fs.setPage(url, rev_text)
# Commit to source-control.
commit_meta = {
'author': page_fields['author'],
'message': page_fields['message']}
self.scm.commit([path], commit_meta)
# Update the DB and index with the modified page.
self.updatePage(url)
# Update all the other pages.
self._wiki_updater(self, url)
for hook in self.post_update_hooks:
hook(self, url)
def pageExists(self, url):
""" Returns whether a page exists at the given URL.
"""
return self.db.pageExists(url)
def getHistory(self, limit=10, after_rev=None):
""" Shorthand method to get the history from the source-control.
"""
return self.scm.getHistory(limit=limit, after_rev=after_rev)
def getSpecialFilenames(self):
return self.special_filenames
def getEndpoints(self):
return self.endpoints.values()
def getEndpoint(self, name):
return self.endpoints.get(name)
def getBuiltinEndpoints(self):
for ep in self.endpoints.values():
if ep.builtin:
yield ep
def reloader_stat_loop(wiki, interval=1):
mtimes = {}
while 1:
for page_info in wiki.fs.getPageInfos():
path = page_info['path']
try:
mtime = os.stat(path).st_mtime
except OSError:
continue
old_time = mtimes.get(path)
if old_time is None:
mtimes[path] = mtime
continue
elif mtime > old_time:
print("Change detected in '%s'." % path)
time.sleep(interval)
| ludovicchabant/Wikked | wikked/wiki.py | Python | apache-2.0 | 17,591 |
## python 3.3 or later
from functools import wraps
def debug(funk):
msg = funk.__qualname__
@wrap(funk)
def wrapper(*args, **kwargs):
print(func.__name__)
print(msg)
return func(*args, **kwargs)
return wrapper
funk = debug(funk)
| abhishekkr/tutorials_as_code | talks-articles/languages-n-runtimes/python/PyConUS2013/Intro_to_MetaClasses/01-debugly.py | Python | mit | 252 |
"""The serialization logic"""
import cPickle
import copy
class InvalidFile(Exception): """The file used to thaw an item was not a valid serialized file"""
class Bag(object):
"""Bag to hold properties"""
def __init__(self, **kw):
"""Initialise the bag"""
for name, value in kw.iteritems():
setattr(self, name, value)
# Types
class Int(int):
"""An int"""
class Float(float):
"""An float"""
class String(str):
"""A str"""
class List(list):
"""A list"""
class Dict(dict):
"""A dict"""
class Bool(int):
"""A boolean"""
class Obj(object):
"""An object"""
def initType(item, name, description=None):
"""Initialize the type"""
item.name = name
item.description = description if description else name
def I(name, value, description=None):
v = Int(value)
initType(v, name, description)
return v
def F(name, value, description=None):
v = Float(value)
initType(v, name, description)
return v
def S(name, value, description=None):
v = String(value)
initType(v, name, description)
return v
def L(name, value, description=None):
v = List(value)
initType(v, name, description)
return v
def D(name, value, description=None):
v = Dict(value)
initType(v, name, description)
return v
def B(name, value, description=None):
v = Bool(value)
initType(v, name, description)
return v
def O(name, value, description=None):
v = Obj()
initType(v, name, description)
return v
class Serializable(object):
"""A mixing class to help serialize and deserialize objects"""
# This is where you put the properties that your object has
# This should be a list of tuples
# name, default value, type, description
my_properties = ()
@classmethod
def createInstance(cls):
"""Return an instance of the class with all default properties set"""
instance = cls()
instance.__setstate__()
return instance
@classmethod
def _getProperties(cls):
"""Get the properties all the way up the inheritance tree"""
props = dict([(obj.name, obj) for obj in cls.my_properties])
for the_cls in cls.__bases__:
if issubclass(the_cls, Serializable):
for key, value in the_cls._getProperties():
if key not in props:
props[key] = value
return props.iteritems()
def __getstate__(self):
"""Return the live properties suitable for pickling"""
values = []
for name, _ in self.__class__._getProperties():
values.append((name, getattr(self, name)))
return values
def __setstate__(self, state=None):
"""Initialize the object to the given state for unpickling"""
self.initial_properties = Bag()
#
# Initialize first from the defaults and then from the live state
for this_state in (self.__class__._getProperties(), state):
if this_state:
for name, value in this_state:
setattr(self, name, value)
setattr(self.initial_properties, name, value)
def asString(self):
"""Return the properties of this object as a string"""
return cPickle.dumps(self, protocol=2)
def toFile(self, filename):
"""Store this object in a file"""
with file(filename, 'wb') as f:
f.write(self.asString())
@classmethod
def fromString(cls, text):
"""Return a new instance from a string"""
obj = cPickle.loads(text)
obj.init()
return obj
@classmethod
def fromFile(cls, filename):
"""Return a new instance from a file"""
with file(filename, 'rb') as f:
try:
return cls.fromString(f.read())
except Exception, err:
raise InvalidFile('Failed to load data from file "%s": %s' % (filename, err))
def init(self):
"""Implement this method to do any object initialization after unpickling"""
pass
def copy(self):
"""Return another copy of this item"""
return self.fromString(self.asString())
class SerializedBag(object):
"""A bag that can be serialized"""
def __init__(self, **kw):
"""Initialise the bag"""
for name, value in kw.iteritems():
setattr(self, name, value)
def init(self):
"""Initialise - here to meet the Serialized protocol"""
pass
def copy(self):
"""Return a copy"""
return copy.deepcopy(self)
| smmosquera/serge | serialize.py | Python | lgpl-3.0 | 4,746 |
from rest_framework import serializers
from cms.common import mixins
from .models import NavModule, NavModuleItem
class NavModuleItemSerializer(serializers.ModelSerializer):
name = serializers.ReadOnlyField(source='get_name')
route = serializers.ReadOnlyField()
class Meta:
model = NavModuleItem
fields = ('name', 'route', 'scroll_to_element', 'link',
'link_open_in_new_tab')
def to_representation(self, obj):
data = super(NavModuleItemSerializer, self).to_representation(obj)
if not obj.route:
data.pop('route')
if not obj.scroll_to_element:
data.pop('scroll_to_element')
if not obj.link:
data.pop('link')
if not obj.link_open_in_new_tab:
data.pop('link_open_in_new_tab')
return data
class NavModuleSerializer(mixins.ModuleSerializer):
items = NavModuleItemSerializer(many=True, source='get_items')
class Meta:
model = NavModule
fields = ('pk', 'name', 'module_name_header_level', 'html_class',
'items')
| HurtowniaPixeli/pixelcms-server | cms/nav/serializers.py | Python | mit | 1,101 |
'''
Fetch simplified WKT boundaries for 2014 congressional districts and
save in CSV format:
state,district,polygon
'''
import requests
import csv
BASE_URL = "https://gis.govtrack.us"
CD_2014_URL = "/boundaries/cd-2014/?limit=500"
# get meta boundary
r = requests.get(BASE_URL + CD_2014_URL)
j = r.json()
boundaries = j['objects']
with open('cb_2014_districts.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['state', 'district', 'polygon'])
for b in boundaries:
p = str.split(b['name'], '-')
r = requests.get(BASE_URL + b['url'] + 'simple_shape?format=wkt')
wkt = r.text
writer.writerow([p[0], p[1], wkt])
| legis-graph/legis-graph | src/fetchDistricts.py | Python | mit | 671 |
"""
Contains the protocols, commands, and client factory needed for the Server
and Portal to communicate with each other, letting Portal work as a proxy.
Both sides use this same protocol.
The separation works like this:
Portal - (AMP client) handles protocols. It contains a list of connected
sessions in a dictionary for identifying the respective player
connected. If it looses the AMP connection it will automatically
try to reconnect.
Server - (AMP server) Handles all mud operations. The server holds its own list
of sessions tied to player objects. This is synced against the portal
at startup and when a session connects/disconnects
"""
# imports needed on both server and portal side
import os
from collections import defaultdict
from textwrap import wrap
try:
import cPickle as pickle
except ImportError:
import pickle
from twisted.protocols import amp
from twisted.internet import protocol
from twisted.internet.defer import Deferred
from src.utils.utils import to_str, variable_from_module
# communication bits
PCONN = chr(1) # portal session connect
PDISCONN = chr(2) # portal session disconnect
PSYNC = chr(3) # portal session sync
SLOGIN = chr(4) # server session login
SDISCONN = chr(5) # server session disconnect
SDISCONNALL = chr(6) # server session disconnect all
SSHUTD = chr(7) # server shutdown
SSYNC = chr(8) # server sessigon sync
MAXLEN = 65535 # max allowed data length in AMP protocol
_MSGBUFFER = defaultdict(list)
def get_restart_mode(restart_file):
"""
Parse the server/portal restart status
"""
if os.path.exists(restart_file):
flag = open(restart_file, 'r').read()
return flag == "True"
return False
class AmpServerFactory(protocol.ServerFactory):
"""
This factory creates the Server as a new AMPProtocol instance for accepting
connections from the Portal.
"""
def __init__(self, server):
"""
server: The Evennia server service instance
protocol: The protocol the factory creates instances of.
"""
self.server = server
self.protocol = AMPProtocol
def buildProtocol(self, addr):
"""
Start a new connection, and store it on the service object
"""
#print "Evennia Server connected to Portal at %s." % addr
self.server.amp_protocol = AMPProtocol()
self.server.amp_protocol.factory = self
return self.server.amp_protocol
class AmpClientFactory(protocol.ReconnectingClientFactory):
"""
This factory creates an instance of the Portal, an AMPProtocol
instances to use to connect
"""
# Initial reconnect delay in seconds.
initialDelay = 1
factor = 1.5
maxDelay = 1
def __init__(self, portal):
self.portal = portal
self.protocol = AMPProtocol
def startedConnecting(self, connector):
"""
Called when starting to try to connect to the MUD server.
"""
pass
#print 'AMP started to connect:', connector
def buildProtocol(self, addr):
"""
Creates an AMPProtocol instance when connecting to the server.
"""
#print "Portal connected to Evennia server at %s." % addr
self.resetDelay()
self.portal.amp_protocol = AMPProtocol()
self.portal.amp_protocol.factory = self
return self.portal.amp_protocol
def clientConnectionLost(self, connector, reason):
"""
Called when the AMP connection to the MUD server is lost.
"""
if hasattr(self, "server_restart_mode"):
self.maxDelay = 1
else:
# Don't translate this; avoid loading django on portal side.
self.maxDelay = 10
self.portal.sessions.announce_all(" ... Portal lost connection to Server.")
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
"""
Called when an AMP connection attempt to the MUD server fails.
"""
if hasattr(self, "server_restart_mode"):
self.maxDelay = 1
else:
self.maxDelay = 10
self.portal.sessions.announce_all(" ...")
protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
# AMP Communication Command types
class MsgPortal2Server(amp.Command):
"""
Message portal -> server
"""
key = "MsgPortal2Server"
arguments = [('sessid', amp.Integer()),
('ipart', amp.Integer()),
('nparts', amp.Integer()),
('msg', amp.String()),
('data', amp.String())]
errors = [(Exception, 'EXCEPTION')]
response = []
class MsgServer2Portal(amp.Command):
"""
Message server -> portal
"""
key = "MsgServer2Portal"
arguments = [('sessid', amp.Integer()),
('ipart', amp.Integer()),
('nparts', amp.Integer()),
('msg', amp.String()),
('data', amp.String())]
errors = [(Exception, 'EXCEPTION')]
response = []
class ServerAdmin(amp.Command):
"""
Portal -> Server
Sent when the portal needs to perform admin
operations on the server, such as when a new
session connects or resyncs
"""
key = "ServerAdmin"
arguments = [('sessid', amp.Integer()),
('ipart', amp.Integer()),
('nparts', amp.Integer()),
('operation', amp.String()),
('data', amp.String())]
errors = [(Exception, 'EXCEPTION')]
response = []
class PortalAdmin(amp.Command):
"""
Server -> Portal
Sent when the server needs to perform admin
operations on the portal.
"""
key = "PortalAdmin"
arguments = [('sessid', amp.Integer()),
('ipart', amp.Integer()),
('nparts', amp.Integer()),
('operation', amp.String()),
('data', amp.String())]
errors = [(Exception, 'EXCEPTION')]
response = []
class FunctionCall(amp.Command):
"""
Bidirectional
Sent when either process needs to call an
arbitrary function in the other.
"""
key = "FunctionCall"
arguments = [('module', amp.String()),
('function', amp.String()),
('args', amp.String()),
('kwargs', amp.String())]
errors = [(Exception, 'EXCEPTION')]
response = [('result', amp.String())]
# Helper functions
dumps = lambda data: to_str(pickle.dumps(data, pickle.HIGHEST_PROTOCOL))
loads = lambda data: pickle.loads(to_str(data))
# multipart message store
#------------------------------------------------------------
# Core AMP protocol for communication Server <-> Portal
#------------------------------------------------------------
class AMPProtocol(amp.AMP):
"""
This is the protocol that the MUD server and the proxy server
communicate to each other with. AMP is a bi-directional protocol, so
both the proxy and the MUD use the same commands and protocol.
AMP specifies responder methods here and connect them to amp.Command
subclasses that specify the datatypes of the input/output of these methods.
"""
# helper methods
def connectionMade(self):
"""
This is called when a connection is established
between server and portal. AMP calls it on both sides,
so we need to make sure to only trigger resync from the
portal side.
"""
if hasattr(self.factory, "portal"):
# only the portal has the 'portal' property, so we know we are
# on the portal side and can initialize the connection.
sessdata = self.factory.portal.sessions.get_all_sync_data()
self.call_remote_ServerAdmin(0,
PSYNC,
data=sessdata)
self.factory.portal.sessions.at_server_connection()
if hasattr(self.factory, "server_restart_mode"):
del self.factory.server_restart_mode
# Error handling
def errback(self, e, info):
"error handler, to avoid dropping connections on server tracebacks."
f = e.trap(Exception)
print "AMP Error for %(info)s: %(e)s" % {'info': info,
'e': e.getErrorMessage()}
def safe_send(self, command, sessid, **kwargs):
"""
This helper method splits the sending of a message into
multiple parts with a maxlength of MAXLEN. This is to avoid
repetition in two sending commands. when calling this the
maximum length has already been exceeded. The max-length will
be checked for all kwargs and these will be used as argument
to the command. The command type must have keywords ipart and
nparts to track the parts and put them back together on the
other side.
Returns a deferred or a list of such
"""
to_send = [(key, [string[i:i+MAXLEN] for i in range(0, len(string), MAXLEN)])
for key, string in kwargs.items()]
nparts_max = max(len(part[1]) for part in to_send)
if nparts_max == 1:
# first try to send directly
return self.callRemote(command,
sessid=sessid,
ipart=0,
nparts=1,
**kwargs).addErrback(self.errback, command.key)
else:
# one or more parts were too long for MAXLEN.
#print "TooLong triggered!"
deferreds = []
for ipart in range(nparts_max):
part_kwargs = {}
for key, str_part in to_send:
try:
part_kwargs[key] = str_part[ipart]
except IndexError:
# means this kwarg needed fewer splits
part_kwargs[key] = ""
# send this part
#print "amp safe sending:", ipart, nparts_max, str_part
deferreds.append(self.callRemote(
command,
sessid=sessid,
ipart=ipart,
nparts=nparts_max,
**part_kwargs).addErrback(self.errback, command.key))
return deferreds
def safe_recv(self, command, sessid, ipart, nparts, **kwargs):
"""
Safely decode potentially split data coming over the wire. No
decoding or parsing is done here, only merging of data split
with safe_send().
If the data stream is not yet complete, this method will return
None, otherwise it will return a dictionary of the (possibly
merged) properties.
"""
global _MSGBUFFER
if nparts == 1:
# the most common case
return kwargs
else:
# part of a multi-part send
hashid = "%s_%s" % (command.key, sessid)
#print "amp safe receive:", ipart, nparts-1, kwargs
if ipart < nparts-1:
# not yet complete
_MSGBUFFER[hashid].append(kwargs)
return
else:
# all parts in place, put them back together
buf = _MSGBUFFER.pop(hashid) + [kwargs]
recv_kwargs = dict((key, "".join(kw[key] for kw in buf)) for key in kwargs)
return recv_kwargs
# def send_split_msg(self, sessid, msg, data, command):
# """
# This helper method splits the sending of a msg into multiple parts
# with a maxlength of MAXLEN. This is to avoid repetition in the two
# msg-sending commands. When calling this, the maximum length has
# already been exceeded.
# Inputs:
# msg - string
# data - data dictionary
# command - one of MsgPortal2Server or MsgServer2Portal commands
# """
# # split the strings into acceptable chunks
# datastr = dumps(data)
# nmsg, ndata = len(msg), len(datastr)
# if nmsg > MAXLEN or ndata > MAXLEN:
# msglist = [msg[i:i + MAXLEN] for i in range(0, len(msg), MAXLEN)]
# datalist = [datastr[i:i + MAXLEN]
# for i in range(0, len(datastr), MAXLEN)]
# nmsglist, ndatalist = len(msglist), len(datalist)
# if ndatalist < nmsglist:
# datalist.extend("" for i in range(nmsglist - ndatalist))
# if nmsglist < ndatalist:
# msglist.extend("" for i in range(ndatalist - nmsglist))
# # we have split the msg/data into right-size chunks. Now we
# # send it in sequence
# return [self.callRemote(command,
# sessid=sessid,
# msg=to_str(msg),
# ipart=icall,
# nparts=nmsglist,
# data=dumps(data)).addErrback(self.errback, "MsgServer2Portal")
# for icall, (msg, data) in enumerate(zip(msglist, datalist))]
# Message definition + helper methods to call/create each message type
# Portal -> Server Msg
def amp_msg_portal2server(self, sessid, ipart, nparts, msg, data):
"""
Relays message to server. This method is executed on the Server.
Since AMP has a limit of 65355 bytes per message, it's possible the
data comes in multiple chunks; if so (nparts>1) we buffer the data
and wait for the remaining parts to arrive before continuing.
"""
#print "msg portal -> server (server side):", sessid, msg, data
ret = self.safe_recv(MsgPortal2Server, sessid, ipart, nparts,
text=msg, data=data)
if ret is not None:
self.factory.server.sessions.data_in(sessid,
text=ret["text"],
**loads(ret["data"]))
return {}
# global MSGBUFFER
# if nparts > 1:
# # a multipart message
# if len(MSGBUFFER[sessid]) != nparts:
# # we don't have all parts yet. Wait.
# return {}
# else:
# # we have all parts. Put it all together in the right order.
# msg = "".join(t[1] for t in sorted(MSGBUFFER[sessid], key=lambda o: o[0]))
# data = "".join(t[2] for t in sorted(MSGBUFFER[sessid], key=lambda o: o[0]))
# del MSGBUFFER[sessid]
# # call session hook with the data
# self.factory.server.sessions.data_in(sessid, text=msg, **loads(data))
# return {}
MsgPortal2Server.responder(amp_msg_portal2server)
def call_remote_MsgPortal2Server(self, sessid, msg, data=""):
"""
Access method called by the Portal and executed on the Portal.
"""
#print "msg portal->server (portal side):", sessid, msg, data
return self.safe_send(MsgPortal2Server, sessid,
msg=to_str(msg) if msg is not None else "",
data=dumps(data))
# try:
# return self.callRemote(MsgPortal2Server,
# sessid=sessid,
# msg=to_str(msg) if msg is not None else "",
# ipart=0,
# nparts=1,
# data=dumps(data)).addErrback(self.errback, "MsgPortal2Server")
# except amp.TooLong:
# # the msg (or data) was too long for AMP to send.
# # We need to send in blocks.
# return self.send_split_msg(sessid, msg, data, MsgPortal2Server)
# Server -> Portal message
def amp_msg_server2portal(self, sessid, ipart, nparts, msg, data):
"""
Relays message to Portal. This method is executed on the Portal.
"""
#print "msg server->portal (portal side):", sessid, msg
ret = self.safe_recv(MsgServer2Portal, sessid,
ipart, nparts, text=msg, data=data)
if ret is not None:
self.factory.portal.sessions.data_out(sessid,
text=ret["text"],
**loads(ret["data"]))
return {}
# global MSGBUFFER
# if nparts > 1:
# # a multipart message
# MSGBUFFER[sessid].append((ipart, msg, data))
# if len(MSGBUFFER[sessid]) != nparts:
# # we don't have all parts yet. Wait.
# return {}
# else:
# # we have all parts. Put it all together in the right order.
# msg = "".join(t[1] for t in sorted(MSGBUFFER[sessid], key=lambda o: o[0]))
# data = "".join(t[2] for t in sorted(MSGBUFFER[sessid], key=lambda o: o[0]))
# del MSGBUFFER[sessid]
# # call session hook with the data
# self.factory.portal.sessions.data_out(sessid, text=msg, **loads(data))
# return {}
MsgServer2Portal.responder(amp_msg_server2portal)
def call_remote_MsgServer2Portal(self, sessid, msg, data=""):
"""
Access method called by the Server and executed on the Server.
"""
#print "msg server->portal (server side):", sessid, msg, data
return self.safe_send(MsgServer2Portal, sessid,
msg=to_str(msg) if msg is not None else "",
data=dumps(data))
# try:
# return self.callRemote(MsgServer2Portal,
# sessid=sessid,
# msg=to_str(msg) if msg is not None else "",
# ipart=0,
# nparts=1,
# data=dumps(data)).addErrback(self.errback, "MsgServer2Portal")
# except amp.TooLong:
# # the msg (or data) was too long for AMP to send.
# # We need to send in blocks.
# return self.send_split_msg(sessid, msg, data, MsgServer2Portal)
# Server administration from the Portal side
def amp_server_admin(self, sessid, ipart, nparts, operation, data):
"""
This allows the portal to perform admin
operations on the server. This is executed on the Server.
"""
ret = self.safe_recv(ServerAdmin, sessid, ipart, nparts,
operation=operation, data=data)
if ret is not None:
data = loads(ret["data"])
operation = ret["operation"]
server_sessionhandler = self.factory.server.sessions
#print "serveradmin (server side):", sessid, ord(operation), data
if operation == PCONN: # portal_session_connect
# create a new session and sync it
server_sessionhandler.portal_connect(data)
elif operation == PDISCONN: # portal_session_disconnect
# session closed from portal side
self.factory.server.sessions.portal_disconnect(sessid)
elif operation == PSYNC: # portal_session_sync
# force a resync of sessions when portal reconnects to
# server (e.g. after a server reboot) the data kwarg
# contains a dict {sessid: {arg1:val1,...}}
# representing the attributes to sync for each
# session.
server_sessionhandler.portal_session_sync(data)
else:
raise Exception("operation %(op)s not recognized." % {'op': operation})
return {}
ServerAdmin.responder(amp_server_admin)
def call_remote_ServerAdmin(self, sessid, operation="", data=""):
"""
Access method called by the Portal and Executed on the Portal.
"""
#print "serveradmin (portal side):", sessid, ord(operation), data
data = dumps(data)
return self.safe_send(ServerAdmin, sessid, operation=operation, data=data)
# return self.callRemote(ServerAdmin,
# sessid=sessid,
# operation=operation,
# data=data).addErrback(self.errback, "ServerAdmin")
# Portal administraton from the Server side
def amp_portal_admin(self, sessid, ipart, nparts, operation, data):
"""
This allows the server to perform admin
operations on the portal. This is executed on the Portal.
"""
#print "portaladmin (portal side):", sessid, ord(operation), data
ret = self.safe_recv(PortalAdmin, sessid, ipart, nparts,
operation=operation, data=data)
if ret is not None:
data = loads(data)
portal_sessionhandler = self.factory.portal.sessions
if operation == SLOGIN: # server_session_login
# a session has authenticated; sync it.
portal_sessionhandler.server_logged_in(sessid, data)
elif operation == SDISCONN: # server_session_disconnect
# the server is ordering to disconnect the session
portal_sessionhandler.server_disconnect(sessid, reason=data)
elif operation == SDISCONNALL: # server_session_disconnect_all
# server orders all sessions to disconnect
portal_sessionhandler.server_disconnect_all(reason=data)
elif operation == SSHUTD: # server_shutdown
# the server orders the portal to shut down
self.factory.portal.shutdown(restart=False)
elif operation == SSYNC: # server_session_sync
# server wants to save session data to the portal,
# maybe because it's about to shut down.
portal_sessionhandler.server_session_sync(data)
# set a flag in case we are about to shut down soon
self.factory.server_restart_mode = True
else:
raise Exception("operation %(op)s not recognized." % {'op': operation})
return {}
PortalAdmin.responder(amp_portal_admin)
def call_remote_PortalAdmin(self, sessid, operation="", data=""):
"""
Access method called by the server side.
"""
self.safe_send(PortalAdmin, sessid, operation=operation, data=dumps(data))
#print "portaladmin (server side):", sessid, ord(operation), data
# return self.callRemote(PortalAdmin,
# sessid=sessid,
# operation=operation,
# data=dumps(data)).addErrback(self.errback, "PortalAdmin")
# Extra functions
def amp_function_call(self, module, function, args, **kwargs):
"""
This allows Portal- and Server-process to call an arbitrary function
in the other process. It is intended for use by plugin modules.
"""
args = loads(args)
kwargs = loads(kwargs)
# call the function (don't catch tracebacks here)
result = variable_from_module(module, function)(*args, **kwargs)
if isinstance(result, Deferred):
# if result is a deferred, attach handler to properly
# wrap the return value
result.addCallback(lambda r: {"result": dumps(r)})
return result
else:
return {'result': dumps(result)}
FunctionCall.responder(amp_function_call)
def call_remote_FunctionCall(self, modulepath, functionname, *args, **kwargs):
"""
Access method called by either process. This will call an arbitrary
function on the other process (On Portal if calling from Server and
vice versa).
Inputs:
modulepath (str) - python path to module holding function to call
functionname (str) - name of function in given module
*args, **kwargs will be used as arguments/keyword args for the
remote function call
Returns:
A deferred that fires with the return value of the remote
function call
"""
return self.callRemote(FunctionCall,
module=modulepath,
function=functionname,
args=dumps(args),
kwargs=dumps(kwargs)).addCallback(lambda r: loads(r["result"])).addErrback(self.errback, "FunctionCall")
| google-code-export/evennia | src/server/amp.py | Python | bsd-3-clause | 24,783 |
# Atomic covalent radius data
# http://www.periodictable.com/Properties/A/CovalentRadius.an.html
# Updated Jun. 9th, 2016
class Covalent(object):
x = {
"H": 0.37, "He": 0.32, "Li": 1.34, "Be": 0.90, "B": 0.82, "C": 0.77,
"N": 0.75, "O": 0.73, "F": 0.71, "Ne": 0.69, "Na": 1.54, "Mg": 1.30,
"Al": 1.18, "Si": 1.11, "P": 1.06, "S": 1.02, "Cl": 0.99, "Ar": 0.97,
"K": 1.96, "Ca": 1.74, "Sc": 1.44, "Ti": 1.36, "V": 1.25, "Cr": 1.27,
"Mn": 1.39, "Fe": 1.25, "Co": 1.26, "Ni": 1.21, "Cu": 1.38, "Zn": 1.31,
"Ga": 1.26, "Ge": 1.22, "As": 1.19, "Se": 1.16, "Br": 1.14, "Kr": 1.10,
"Rb": 2.11, "Sr": 1.92, "Y": 1.62, "Zr": 1.48, "Nb": 1.37, "Mo": 1.45,
"Tc": 1.56, "Ru": 1.26, "Rh": 1.35, "Pd": 1.31, "Ag": 1.53, "Cd": 1.48,
"In": 1.44, "Sn": 1.41, "Sb": 1.38, "Te": 1.35, "I": 1.33, "Xe": 1.30,
"Cs": 2.25, "Ba": 1.98, "La": 1.69, "Ce": 0.00, "Pr": 0.00, "Nd": 0.00,
"Pm": 0.00, "Sm": 0.00, "Eu": 0.00, "Gd": 0.00, "Tb": 0.00, "Dy": 0.00,
"Ho": 0.00, "Er": 0.00, "Tm": 0.00, "Yb": 0.00, "Lu": 1.60, "Hf": 1.50,
"Ta": 1.38, "W": 1.46, "Re": 1.59, "Os": 1.28, "Ir": 1.37, "Pt": 1.28,
"Au": 1.44, "Hg": 1.49, "Tl": 1.48, "Pb": 1.47, "Bi": 1.46, "Po": 0.00,
"At": 0.00, "Rn": 1.45, "Fr": 0.00, "Ra": 0.00, "Ac": 0.00, "Th": 0.00,
"Pa": 0.00, "U": 0.00, "Np": 0.00, "Pu": 0.00, "Am": 0.00, "Cm": 0.00,
"Bk": 0.00, "Cf": 0.00, "Es": 0.00, "Fm": 0.00, "Md": 0.00, "No": 0.00,
"Lr": 0.00, "Rf": 0.00, "Db": 0.00, "Sg": 0.00, "Bh": 0.00, "Hs": 0.00,
"Mt": 0.00, "Ds": 0.00, "Rg": 0.00, "Uub": 0.00, "Uut": 0.00, "Uuq": 0.00,
"Uup": 0.00, "Uuh": 0.00, "Uus": 0.00, "Uuo": 0.00
} | stczhc/neupy | tests/fitting/coval.py | Python | mit | 1,681 |
"""
Traits constituting sets of types.
"""
from itertools import chain
from .coretypes import (Unit, int8, int16, int32, int64, uint8, uint16, uint32,
uint64, float16, float32, float64, complex64,
complex128, bool_, Decimal, TimeDelta, Option)
__all__ = ['TypeSet', 'matches_typeset', 'signed', 'unsigned', 'integral',
'floating', 'complexes', 'boolean', 'numeric', 'scalar',
'maxtype']
class TypeSet(Unit):
"""
Create a new set of types. Keyword argument 'name' may create a registered
typeset for use in datashape type strings.
"""
__slots__ = '_order', 'name'
def __init__(self, *args, **kwds):
self._order = args
self.name = kwds.get('name')
if self.name:
register_typeset(self.name, self)
@property
def _set(self):
return set(self._order)
@property
def types(self):
return self._order
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.name == other.name and self.types == other.types)
def __hash__(self):
return hash((self.name, self.types))
def __contains__(self, val):
return val in self._set
def __repr__(self):
if self.name:
return '{%s}' % (self.name,)
return "%s(%s, name=%s)" % (self.__class__.__name__, self._set,
self.name)
def __or__(self, other):
return TypeSet(*chain(self, other))
def __iter__(self):
return iter(self._order)
def __len__(self):
return len(self._set)
def matches_typeset(types, signature):
"""Match argument types to the parameter types of a signature
>>> matches_typeset(int32, integral)
True
>>> matches_typeset(float32, integral)
False
>>> matches_typeset(integral, real)
True
"""
if types in signature:
return True
match = True
for a, b in zip(types, signature):
check = isinstance(b, TypeSet)
if check and (a not in b) or (not check and a != b):
match = False
break
return match
class TypesetRegistry(object):
def __init__(self):
self.registry = {}
self.lookup = self.registry.get
def register_typeset(self, name, typeset):
if name in self.registry:
raise TypeError("TypeSet %s already defined with types %s" %
(name, self.registry[name].types))
self.registry[name] = typeset
return typeset
def __getitem__(self, key):
value = self.lookup(key)
if value is None:
raise KeyError(key)
return value
registry = TypesetRegistry()
register_typeset = registry.register_typeset
lookup = registry.lookup
#------------------------------------------------------------------------
# Default Type Sets
#------------------------------------------------------------------------
signed = TypeSet(int8, int16, int32, int64, name='signed')
unsigned = TypeSet(uint8, uint16, uint32, uint64, name='unsigned')
integral = TypeSet(*[x for t in zip(signed, unsigned) for x in t],
name='integral')
floating = TypeSet(float32, float64, name='floating')
complexes = TypeSet(complex64, complex128, name='complexes')
boolean = TypeSet(bool_, name='boolean')
real = TypeSet(*integral | floating, name='real')
numeric = TypeSet(*integral | floating | complexes, name='numeric')
scalar = TypeSet(*boolean | numeric, name='scalar')
supertype_map = {
int8: signed,
int16: signed,
int32: signed,
int64: signed,
uint8: unsigned,
uint16: unsigned,
uint32: unsigned,
uint64: unsigned,
float16: floating,
float32: floating,
float64: floating,
complex64: complexes,
complex128: complexes,
bool_: boolean
}
def supertype(measure):
"""Get the super type of a concrete numeric type
Examples
--------
>>> supertype(int8)
{signed}
>>> supertype(float32)
{floating}
>>> supertype(complex128)
{complexes}
>>> supertype(bool_)
{boolean}
>>> supertype(Option(bool_))
{boolean}
"""
if isinstance(measure, Option):
measure = measure.ty
assert matches_typeset(measure, scalar), 'measure must be numeric'
return supertype_map[measure]
def maxtype(measure):
"""Get the maximum width for a particular numeric type
Examples
--------
>>> maxtype(int8)
ctype("int64")
>>> maxtype(Option(float64))
Option(ty=ctype("float64"))
>>> maxtype(bool_)
ctype("bool")
>>> maxtype(Decimal(11, 2))
Decimal(precision=11, scale=2)
>>> maxtype(Option(Decimal(11, 2)))
Option(ty=Decimal(precision=11, scale=2))
>>> maxtype(TimeDelta(unit='ms'))
TimeDelta(unit='ms')
>>> maxtype(Option(TimeDelta(unit='ms')))
Option(ty=TimeDelta(unit='ms'))
"""
measure = measure.measure
isoption = isinstance(measure, Option)
if isoption:
measure = measure.ty
if (not matches_typeset(measure, scalar) and
not isinstance(measure, (Decimal, TimeDelta))):
raise TypeError('measure must be numeric')
if measure == bool_:
result = bool_
elif isinstance(measure, (Decimal, TimeDelta)):
result = measure
else:
result = max(supertype(measure).types, key=lambda x: x.itemsize)
return Option(result) if isoption else result
| ContinuumIO/datashape | datashape/typesets.py | Python | bsd-2-clause | 5,475 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN Cells and additional RNN operations.
See @{$python/contrib.rnn} guide.
@@RNNCell
@@BasicRNNCell
@@BasicLSTMCell
@@GRUCell
@@LSTMCell
@@LayerNormBasicLSTMCell
@@LSTMStateTuple
@@MultiRNNCell
@@LSTMBlockWrapper
@@DropoutWrapper
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
@@DeviceWrapper
@@ResidualWrapper
@@LSTMBlockCell
@@GRUBlockCell
@@FusedRNNCell
@@FusedRNNCellAdaptor
@@TimeReversedFusedRNN
@@LSTMBlockFusedCell
@@CoupledInputForgetGateLSTMCell
@@TimeFreqLSTMCell
@@GridLSTMCell
@@BidirectionalGridLSTMCell
@@NASCell
@@PhasedLSTMCell
### RNNCell wrappers
@@AttentionCellWrapper
@@CompiledWrapper
@@static_rnn
@@static_state_saving_rnn
@@static_bidirectional_rnn
@@stack_bidirectional_dynamic_rnn
@@stack_bidirectional_rnn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rnn.python.ops.core_rnn import static_bidirectional_rnn
from tensorflow.contrib.rnn.python.ops.core_rnn import static_rnn
from tensorflow.contrib.rnn.python.ops.core_rnn import static_state_saving_rnn
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import BasicLSTMCell
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import BasicRNNCell
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import DeviceWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import DropoutWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import EmbeddingWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import GRUCell
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import InputProjectionWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import LSTMCell
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import LSTMStateTuple
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import MultiRNNCell
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import OutputProjectionWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import ResidualWrapper
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import RNNCell
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.rnn.python.ops.fused_rnn_cell import *
from tensorflow.contrib.rnn.python.ops.gru_ops import *
from tensorflow.contrib.rnn.python.ops.lstm_ops import *
from tensorflow.contrib.rnn.python.ops.rnn import *
from tensorflow.contrib.rnn.python.ops.rnn_cell import *
# pylint: enable=unused-import,wildcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, ['core_rnn_cell'])
| mengxn/tensorflow | tensorflow/contrib/rnn/__init__.py | Python | apache-2.0 | 3,284 |
"""Preference management for cloud."""
from ipaddress import ip_address
from .const import (
DOMAIN, PREF_ENABLE_ALEXA, PREF_ENABLE_GOOGLE, PREF_ENABLE_REMOTE,
PREF_GOOGLE_SECURE_DEVICES_PIN, PREF_CLOUDHOOKS, PREF_CLOUD_USER,
InvalidTrustedNetworks)
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
_UNDEF = object()
class CloudPreferences:
"""Handle cloud preferences."""
def __init__(self, hass):
"""Initialize cloud prefs."""
self._hass = hass
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._prefs = None
async def async_initialize(self):
"""Finish initializing the preferences."""
prefs = await self._store.async_load()
if prefs is None:
prefs = {
PREF_ENABLE_ALEXA: True,
PREF_ENABLE_GOOGLE: True,
PREF_ENABLE_REMOTE: False,
PREF_GOOGLE_SECURE_DEVICES_PIN: None,
PREF_CLOUDHOOKS: {},
PREF_CLOUD_USER: None,
}
self._prefs = prefs
async def async_update(self, *, google_enabled=_UNDEF,
alexa_enabled=_UNDEF, remote_enabled=_UNDEF,
google_secure_devices_pin=_UNDEF, cloudhooks=_UNDEF,
cloud_user=_UNDEF):
"""Update user preferences."""
for key, value in (
(PREF_ENABLE_GOOGLE, google_enabled),
(PREF_ENABLE_ALEXA, alexa_enabled),
(PREF_ENABLE_REMOTE, remote_enabled),
(PREF_GOOGLE_SECURE_DEVICES_PIN, google_secure_devices_pin),
(PREF_CLOUDHOOKS, cloudhooks),
(PREF_CLOUD_USER, cloud_user),
):
if value is not _UNDEF:
self._prefs[key] = value
if remote_enabled is True and self._has_local_trusted_network:
raise InvalidTrustedNetworks
await self._store.async_save(self._prefs)
def as_dict(self):
"""Return dictionary version."""
return self._prefs
@property
def remote_enabled(self):
"""Return if remote is enabled on start."""
enabled = self._prefs.get(PREF_ENABLE_REMOTE, False)
if not enabled:
return False
if self._has_local_trusted_network:
return False
return True
@property
def alexa_enabled(self):
"""Return if Alexa is enabled."""
return self._prefs[PREF_ENABLE_ALEXA]
@property
def google_enabled(self):
"""Return if Google is enabled."""
return self._prefs[PREF_ENABLE_GOOGLE]
@property
def google_secure_devices_pin(self):
"""Return if Google is allowed to unlock locks."""
return self._prefs.get(PREF_GOOGLE_SECURE_DEVICES_PIN)
@property
def cloudhooks(self):
"""Return the published cloud webhooks."""
return self._prefs.get(PREF_CLOUDHOOKS, {})
@property
def cloud_user(self) -> str:
"""Return ID from Home Assistant Cloud system user."""
return self._prefs.get(PREF_CLOUD_USER)
@property
def _has_local_trusted_network(self) -> bool:
"""Return if we allow localhost to bypass auth."""
local4 = ip_address('127.0.0.1')
local6 = ip_address('::1')
for prv in self._hass.auth.auth_providers:
if prv.type != 'trusted_networks':
continue
for network in prv.trusted_networks:
if local4 in network or local6 in network:
return True
return False
| MartinHjelmare/home-assistant | homeassistant/components/cloud/prefs.py | Python | apache-2.0 | 3,606 |
# yapf: disable
import ray
# __doc_import_begin__
from ray import serve
from io import BytesIO
from PIL import Image
import requests
import torch
from torchvision import transforms
from torchvision.models import resnet18
# __doc_import_end__
# yapf: enable
# __doc_define_servable_begin__
class ImageModel:
def __init__(self):
self.model = resnet18(pretrained=True).eval()
self.preprocessor = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Lambda(lambda t: t[:3, ...]), # remove alpha channel
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def __call__(self, flask_request):
image_payload_bytes = flask_request.data
pil_image = Image.open(BytesIO(image_payload_bytes))
print("[1/3] Parsed image data: {}".format(pil_image))
pil_images = [pil_image] # Our current batch size is one
input_tensor = torch.cat(
[self.preprocessor(i).unsqueeze(0) for i in pil_images])
print("[2/3] Images transformed, tensor shape {}".format(
input_tensor.shape))
with torch.no_grad():
output_tensor = self.model(input_tensor)
print("[3/3] Inference done!")
return {"class_index": int(torch.argmax(output_tensor[0]))}
# __doc_define_servable_end__
ray.init(num_cpus=8)
# __doc_deploy_begin__
client = serve.start()
client.create_backend("resnet18:v0", ImageModel)
client.create_endpoint(
"predictor",
backend="resnet18:v0",
route="/image_predict",
methods=["POST"])
# __doc_deploy_end__
# __doc_query_begin__
ray_logo_bytes = requests.get(
"https://github.com/ray-project/ray/raw/"
"master/doc/source/images/ray_header_logo.png").content
resp = requests.post(
"http://localhost:8000/image_predict", data=ray_logo_bytes)
print(resp.json())
# Output
# {'class_index': 463}
# __doc_query_end__
| richardliaw/ray | python/ray/serve/examples/doc/tutorial_pytorch.py | Python | apache-2.0 | 2,022 |
# -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'agg': 'agg',
'inline': 'module://ipykernel.pylab.backend_inline',
'ipympl': 'module://ipympl.backend_nbagg',
}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
# And some backends that don't need GUI integration
del backend2gui['nbAgg']
del backend2gui['agg']
del backend2gui['module://ipykernel.pylab.backend_inline']
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = fig.dpi
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = {
"format":fmt,
"facecolor":fig.get_facecolor(),
"edgecolor":fig.get_edgecolor(),
"dpi":dpi,
"bbox_inches":bbox_inches,
}
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
# Make sure that retina_figure acts just like print_figure and returns
# None when the figure is empty.
if pngdata is None:
return
w, h = _pngxy(pngdata)
metadata = {"width": w//2, "height":h//2}
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pyplot as plt
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if plt.draw_if_interactive.called:
plt.draw()
plt.draw_if_interactive.called = False
# re-draw everything that is stale
try:
da = plt.draw_all
except AttributeError:
pass
else:
da()
return mpl_execfile
def _reshow_nbagg_figure(fig):
"""reshow an nbagg figure"""
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main IPython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
import matplotlib
from matplotlib.figure import Figure
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, str):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
mplbackend = matplotlib.get_backend().lower()
if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
formatter = shell.display_formatter.ipython_display_formatter
formatter.for_type(Figure, _reshow_nbagg_figure)
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline','agg').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline','agg').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
if gui == 'agg':
gui = None
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pyplot as plt
plt.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from ipykernel.pylab.backend_inline import InlineBackend
except ImportError:
return
import matplotlib
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from ipykernel.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = {}
for k in cfg.rc:
shell._saved_rcParams[k] = matplotlib.rcParams[k]
# load inline_rc
matplotlib.rcParams.update(cfg.rc)
new_backend_name = "inline"
else:
from ipykernel.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
matplotlib.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
new_backend_name = "other"
# only enable the formats once -> don't change the enabled formats (which the user may
# has changed) when getting another "%matplotlib inline" call.
# See https://github.com/ipython/ipykernel/issues/29
cur_backend = getattr(configure_inline_support, "current_backend", "unset")
if new_backend_name != cur_backend:
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
configure_inline_support.current_backend = new_backend_name
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/core/pylabtools.py | Python | bsd-2-clause | 14,139 |
"""
Django admin page for CourseOverviews, the basic metadata about a course that
is used in user dashboard queries and other places where you need info like
name, and start dates, but don't actually need to crawl into course content.
"""
from __future__ import absolute_import
from config_models.admin import ConfigurationModelAdmin
from django.contrib import admin
from .models import CourseOverview, CourseOverviewImageConfig, CourseOverviewImageSet
class CourseOverviewAdmin(admin.ModelAdmin):
"""
Simple, read-only list/search view of Course Overviews.
"""
list_display = [
'id',
'display_name',
'version',
'enrollment_start',
'enrollment_end',
'created',
'modified',
]
search_fields = ['id', 'display_name']
class CourseOverviewImageConfigAdmin(ConfigurationModelAdmin):
"""
Basic configuration for CourseOverview Image thumbnails.
By default this is disabled. If you change the dimensions of the images with
a new config after thumbnails have already been generated, you need to clear
the entries in CourseOverviewImageSet manually for new entries to be
created.
"""
list_display = [
'change_date',
'changed_by',
'enabled',
'large_width',
'large_height',
'small_width',
'small_height'
]
def get_list_display(self, request):
"""
Restore default list_display behavior.
ConfigurationModelAdmin overrides this, but in a way that doesn't
respect the ordering. This lets us customize it the usual Django admin
way.
"""
return self.list_display
class CourseOverviewImageSetAdmin(admin.ModelAdmin):
"""
Thumbnail images associated with CourseOverviews. This should be used for
debugging purposes only -- e.g. don't edit these values.
"""
list_display = [
'course_overview',
'small_url',
'large_url',
]
search_fields = ['course_overview__id']
readonly_fields = ['course_overview_id']
fields = ('course_overview_id', 'small_url', 'large_url')
admin.site.register(CourseOverview, CourseOverviewAdmin)
admin.site.register(CourseOverviewImageConfig, CourseOverviewImageConfigAdmin)
admin.site.register(CourseOverviewImageSet, CourseOverviewImageSetAdmin)
| jolyonb/edx-platform | openedx/core/djangoapps/content/course_overviews/admin.py | Python | agpl-3.0 | 2,355 |
from typing import Dict, Optional, Text
import ujson
from zerver.lib.test_classes import WebhookTestCase
from zerver.lib.webhooks.git import COMMITS_LIMIT
from zerver.models import Message
class GithubV1HookTests(WebhookTestCase):
STREAM_NAME = None # type: Optional[Text]
URL_TEMPLATE = u"/api/v1/external/github"
FIXTURE_DIR_NAME = 'github'
SEND_STREAM = False
BRANCHES = None # type: Optional[Text]
push_content = u"""zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 3 commits to branch master.
* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))
* Baz needs to be longer ([06ebe5f](https://github.com/zbenjamin/zulip-test/commit/06ebe5f472a32f6f31fd2a665f0c7442b69cce72))
* Final edit to baz, I swear ([b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8))"""
def test_spam_branch_is_ignored(self) -> None:
self.SEND_STREAM = True
self.STREAM_NAME = 'commits'
self.BRANCHES = 'dev,staging'
data = self.get_body('push')
# We subscribe to the stream in this test, even though
# it won't get written, to avoid failing for the wrong
# reason.
self.subscribe(self.test_user, self.STREAM_NAME)
prior_count = Message.objects.count()
result = self.client_post(self.URL_TEMPLATE, data)
self.assert_json_success(result)
after_count = Message.objects.count()
self.assertEqual(prior_count, after_count)
def get_body(self, fixture_name: Text) -> Dict[str, Text]:
api_key = self.test_user.api_key
data = ujson.loads(self.fixture_data(self.FIXTURE_DIR_NAME, 'v1_' + fixture_name))
data.update({'email': self.TEST_USER_EMAIL,
'api-key': api_key,
'payload': ujson.dumps(data['payload'])})
if self.SEND_STREAM:
data['stream'] = self.STREAM_NAME
if self.BRANCHES is not None:
data['branches'] = self.BRANCHES
return data
def basic_test(self, fixture_name: Text, stream_name: Text,
expected_subject: Text, expected_content: Text,
send_stream: bool=False, branches: Optional[Text]=None) -> None:
self.STREAM_NAME = stream_name
self.SEND_STREAM = send_stream
self.BRANCHES = branches
self.send_and_test_stream_message(fixture_name, expected_subject, expected_content, content_type=None)
def test_user_specified_branches(self) -> None:
self.basic_test('push', 'my_commits', 'zulip-test / master', self.push_content,
send_stream=True, branches="master,staging")
def test_user_specified_stream(self) -> None:
"""Around May 2013 the github webhook started to specify the stream.
Before then, the stream was hard coded to "commits"."""
self.basic_test('push', 'my_commits', 'zulip-test / master', self.push_content,
send_stream=True)
def test_legacy_hook(self) -> None:
self.basic_test('push', 'commits', 'zulip-test / master', self.push_content)
def test_push_multiple_commits(self) -> None:
commit_info = "* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))\n"
expected_subject = "zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 50 commits to branch master.\n\n{}[and {} more commit(s)]".format(
commit_info * COMMITS_LIMIT,
50 - COMMITS_LIMIT,
)
self.basic_test('push_commits_more_than_limit', 'commits', 'zulip-test / master', expected_subject)
def test_issues_opened(self) -> None:
self.basic_test('issues_opened', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin opened [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nI tried changing the widgets, but I got:\r\n\r\nPermission denied: widgets are immutable\n~~~")
def test_issue_comment(self) -> None:
self.basic_test('issue_comment', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/issues/5#issuecomment-23374280) on [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nWhoops, I did something wrong.\r\n\r\nI'm sorry.\n~~~")
def test_issues_closed(self) -> None:
self.basic_test('issues_closed', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin closed [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)")
def test_pull_request_opened(self) -> None:
self.basic_test('pull_request_opened', 'commits',
"zulip-test / PR #7 Counting is hard.",
"lfaraone opened [PR #7](https://github.com/zbenjamin/zulip-test/pull/7)(assigned to lfaraone)\nfrom `patch-2` to `master`\n\n~~~ quote\nOmitted something I think?\n~~~")
def test_pull_request_closed(self) -> None:
self.basic_test('pull_request_closed', 'commits',
"zulip-test / PR #7 Counting is hard.",
"zbenjamin closed [PR #7](https://github.com/zbenjamin/zulip-test/pull/7)")
def test_pull_request_synchronize(self) -> None:
self.basic_test('pull_request_synchronize', 'commits',
"zulip-test / PR #13 Even more cowbell.",
"zbenjamin synchronized [PR #13](https://github.com/zbenjamin/zulip-test/pull/13)")
def test_pull_request_comment(self) -> None:
self.basic_test('pull_request_comment', 'commits',
"zulip-test / PR #9 Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [PR #9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~")
def test_pull_request_comment_user_specified_stream(self) -> None:
self.basic_test('pull_request_comment', 'my_commits',
"zulip-test / PR #9 Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [PR #9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~",
send_stream=True)
def test_commit_comment(self) -> None:
self.basic_test('commit_comment', 'commits',
"zulip-test",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252302) on [7c99467](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533)\n~~~ quote\nAre we sure this is enough cowbell?\n~~~")
def test_commit_comment_line(self) -> None:
self.basic_test('commit_comment_line', 'commits',
"zulip-test",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252307) on [7c99467](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533)\n~~~ quote\nThis line adds /unlucky/ cowbell (because of its line number). We should remove it.\n~~~")
class GithubV2HookTests(WebhookTestCase):
STREAM_NAME = None # type: Optional[Text]
URL_TEMPLATE = u"/api/v1/external/github"
FIXTURE_DIR_NAME = 'github'
SEND_STREAM = False
BRANCHES = None # type: Optional[Text]
push_content = """zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 3 commits to branch master.
* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))
* Baz needs to be longer ([06ebe5f](https://github.com/zbenjamin/zulip-test/commit/06ebe5f472a32f6f31fd2a665f0c7442b69cce72))
* Final edit to baz, I swear ([b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8))"""
def test_spam_branch_is_ignored(self) -> None:
self.SEND_STREAM = True
self.STREAM_NAME = 'commits'
self.BRANCHES = 'dev,staging'
data = self.get_body('push')
# We subscribe to the stream in this test, even though
# it won't get written, to avoid failing for the wrong
# reason.
self.subscribe(self.test_user, self.STREAM_NAME)
prior_count = Message.objects.count()
result = self.client_post(self.URL_TEMPLATE, data)
self.assert_json_success(result)
after_count = Message.objects.count()
self.assertEqual(prior_count, after_count)
def get_body(self, fixture_name: Text) -> Dict[str, Text]:
api_key = self.test_user.api_key
data = ujson.loads(self.fixture_data(self.FIXTURE_DIR_NAME, 'v2_' + fixture_name))
data.update({'email': self.TEST_USER_EMAIL,
'api-key': api_key,
'payload': ujson.dumps(data['payload'])})
if self.SEND_STREAM:
data['stream'] = self.STREAM_NAME
if self.BRANCHES is not None:
data['branches'] = self.BRANCHES
return data
def basic_test(self, fixture_name: Text, stream_name: Text,
expected_subject: Text, expected_content: Text,
send_stream: bool=False, branches: Optional[Text]=None) -> None:
self.STREAM_NAME = stream_name
self.SEND_STREAM = send_stream
self.BRANCHES = branches
self.send_and_test_stream_message(fixture_name, expected_subject, expected_content, content_type=None)
def test_user_specified_branches(self) -> None:
self.basic_test('push', 'my_commits', 'zulip-test / master', self.push_content,
send_stream=True, branches="master,staging")
def test_user_specified_stream(self) -> None:
"""Around May 2013 the github webhook started to specify the stream.
Before then, the stream was hard coded to "commits"."""
self.basic_test('push', 'my_commits', 'zulip-test / master', self.push_content,
send_stream=True)
def test_push_multiple_commits(self) -> None:
commit_info = "* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))\n"
expected_subject = "zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 50 commits to branch master.\n\n{}[and {} more commit(s)]".format(
commit_info * COMMITS_LIMIT,
50 - COMMITS_LIMIT,
)
self.basic_test('push_commits_more_than_limit', 'commits', 'zulip-test / master', expected_subject)
def test_push_multiple_committers(self) -> None:
commit_info = "* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))\n"
expected_subject = "zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 6 commits to branch master. Commits by tomasz (3), baxthehacker (2) and zbenjamin (1).\n\n{}* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))".format(commit_info * 5)
self.basic_test('push_multiple_committers', 'commits', 'zulip-test / master', expected_subject)
def test_push_multiple_committers_with_others(self) -> None:
commit_info = "* Final edit to baz, I swear ([b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8))\n"
expected_subject = "zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 10 commits to branch master. Commits by baxthehacker (4), James (3), Tomasz (2) and others (1).\n\n{}* Final edit to baz, I swear ([b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8))".format(commit_info * 9)
self.basic_test('push_multiple_committers_with_others', 'commits', 'zulip-test / master', expected_subject)
def test_legacy_hook(self) -> None:
self.basic_test('push', 'commits', 'zulip-test / master', self.push_content)
def test_issues_opened(self) -> None:
self.basic_test('issues_opened', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin opened [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nI tried changing the widgets, but I got:\r\n\r\nPermission denied: widgets are immutable\n~~~")
def test_issue_comment(self) -> None:
self.basic_test('issue_comment', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/issues/5#issuecomment-23374280) on [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nWhoops, I did something wrong.\r\n\r\nI'm sorry.\n~~~")
def test_issues_closed(self) -> None:
self.basic_test('issues_closed', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin closed [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)")
def test_pull_request_opened(self) -> None:
self.basic_test('pull_request_opened', 'commits',
"zulip-test / PR #7 Counting is hard.",
"lfaraone opened [PR #7](https://github.com/zbenjamin/zulip-test/pull/7)(assigned to lfaraone)\nfrom `patch-2` to `master`\n\n~~~ quote\nOmitted something I think?\n~~~")
def test_pull_request_closed(self) -> None:
self.basic_test('pull_request_closed', 'commits',
"zulip-test / PR #7 Counting is hard.",
"zbenjamin closed [PR #7](https://github.com/zbenjamin/zulip-test/pull/7)")
def test_pull_request_synchronize(self) -> None:
self.basic_test('pull_request_synchronize', 'commits',
"zulip-test / PR #13 Even more cowbell.",
"zbenjamin synchronized [PR #13](https://github.com/zbenjamin/zulip-test/pull/13)")
def test_pull_request_comment(self) -> None:
self.basic_test('pull_request_comment', 'commits',
"zulip-test / PR #9 Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [PR #9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~")
def test_pull_request_comment_user_specified_stream(self) -> None:
self.basic_test('pull_request_comment', 'my_commits',
"zulip-test / PR #9 Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [PR #9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~",
send_stream=True)
def test_commit_comment(self) -> None:
self.basic_test('commit_comment', 'commits',
"zulip-test",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252302) on [7c99467](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533)\n~~~ quote\nAre we sure this is enough cowbell?\n~~~")
def test_commit_comment_line(self) -> None:
self.basic_test('commit_comment_line', 'commits',
"zulip-test",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252307) on [7c99467](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533)\n~~~ quote\nThis line adds /unlucky/ cowbell (because of its line number). We should remove it.\n~~~")
| mahim97/zulip | zerver/webhooks/github/tests.py | Python | apache-2.0 | 16,526 |
# Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from beakerx_base import BeakerxBox, BeakerxText, getValue, BeakerxPassword, BeakerxTextArea, SelectMultipleWithRows, \
SelectMultipleSingle, DatePicker, BeakerxComboBox, BeakerxCheckbox, BeakerxHBox, BeakerxVBox, BeakerxCheckboxGroup, \
BeakerxLabel, RadioButtons, EasyFormComponent, BeakerxButton
from ipykernel.comm import Comm
from traitlets import Bool, Unicode
class EasyForm(BeakerxBox):
_view_name = Unicode('EasyFormView').tag(sync=True)
_model_name = Unicode('EasyFormModel').tag(sync=True)
_view_module = Unicode('beakerx').tag(sync=True)
_model_module = Unicode('beakerx').tag(sync=True)
easyFormName = Unicode(default_value='Form default').tag(sync=True)
test = ""
HORIZONTAL = 1
VERTICAL = 2
def __init__(self, *args, **kwargs):
super(EasyForm, self).__init__(**kwargs)
self.easyFormName = getValue(kwargs, 'title', "")
if self.easyFormName == "" and len(args) > 0:
self.easyFormName = args[0]
def _handle_msg(self, msg):
print(msg)
def addTextField(self, *args, **kwargs):
text = BeakerxText(description=self.getDescription(args, kwargs))
text.size = getValue(kwargs, 'width', -1)
self.children += (text,)
self.components[text.description] = text
return text
def addPasswordField(self, *args, **kwargs):
password = BeakerxPassword(description=self.getDescription(args, kwargs))
password.size = getValue(kwargs, 'width', -1)
self.children += (password,)
self.components[password.description] = password
return password
def addTextArea(self, *args, **kwargs):
textarea = BeakerxTextArea(
description=self.getDescription(args, kwargs))
textarea.cols = getValue(kwargs, 'width', -1)
textarea.rows = getValue(kwargs, 'height', -1)
textarea.value = getValue(kwargs, 'value', "")
textarea.placeholder = getValue(kwargs, 'placeholder', "")
self.children += (textarea,)
self.components[textarea.description] = textarea
return textarea
def addButton(self, *args, **kwargs):
button = BeakerxButton(description=self.getDescription(args, kwargs))
button.tag = getValue(kwargs, 'tag', "")
button.on_click(self.buttonCallback)
self.children += (button,)
return button
def buttonCallback(self, *args):
if len(args) > 0:
args[0].actionPerformed()
arguments = dict(target_name='beakerx.tag.run')
comm = Comm(**arguments)
msg = {'runByTag': args[0].tag}
state = {'state': msg}
comm.send(data=state, buffers=[])
def addList(self, *args, **kwargs):
multi_select = getValue(kwargs, 'multi', True)
if multi_select:
list = SelectMultipleWithRows(
description=self.getDescription(args, kwargs))
else:
list = SelectMultipleSingle(
description=self.getDescription(args, kwargs))
list.options = self.getOptions(args, kwargs)
list.size = getValue(kwargs, 'rows', len(list.options))
self.children += (list,)
self.components[list.description] = list
return list
def addDatePicker(self, *args, **kwargs):
data_picker = DatePicker(description=self.getDescription(args, kwargs))
data_picker.value = getValue(kwargs, 'value', '')
self.children += (data_picker,)
self.components[data_picker.description] = data_picker
return data_picker
def addComboBox(self, *args, **kwargs):
dropdown = BeakerxComboBox(description=self.getDescription(args, kwargs))
dropdown.options = self.getOptions(args, kwargs)
dropdown.original_options = self.getOptions(args, kwargs)
dropdown.editable = getValue(kwargs, 'editable', False)
self.children += (dropdown,)
self.components[dropdown.description] = dropdown
return dropdown
def addCheckBox(self, *args, **kwargs):
checkbox = BeakerxCheckbox(description=self.getDescription(args, kwargs))
checkbox.value = getValue(kwargs, 'value', False)
self.children += (checkbox,)
self.components[checkbox.description] = checkbox
return checkbox
def addCheckBoxes(self, *args, **kwargs):
layout = BeakerxHBox()
orientation = getValue(kwargs, 'orientation', EasyForm.VERTICAL)
if orientation == EasyForm.HORIZONTAL:
box = BeakerxHBox()
else:
box = BeakerxVBox()
checkbox = BeakerxCheckboxGroup()
for checkBoxItem in self.getOptions(args, kwargs):
children = BeakerxCheckbox(description=checkBoxItem)
checkbox.addChildren(children)
box.children += (children,)
layout.children += (BeakerxLabel(value=self.getDescription(args, kwargs)), box,)
self.children += (layout,)
self.components[self.getDescription(args, kwargs)] = checkbox
return layout
def addRadioButtons(self, *args, **kwargs):
orientation = getValue(kwargs, 'orientation', EasyForm.VERTICAL)
radio_buttons = RadioButtons(options=self.getOptions(args, kwargs),
description=self.getDescription(args,
kwargs))
radio_buttons.index = None
if orientation == EasyForm.VERTICAL:
self.children += (radio_buttons,)
else:
box = BeakerxHBox()
box.children += (radio_buttons,)
self.children += (box,)
self.components[radio_buttons.description] = radio_buttons
return radio_buttons
def addWidget(self, name, widget):
EasyFormComponent.add_interface_to(widget)
self.children += (widget,)
self.components[name] = widget
return widget
def __iter__(self):
return iter(self.components)
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def get(self, key):
if key in self.components:
return self.components[key].value
else:
return ""
def put(self, key, value):
self.components[key].set_value(value)
@staticmethod
def getDescription(args, kwargs):
if len(args) > 0:
return args[0]
else:
return getValue(kwargs, 'description', "")
@staticmethod
def getOptions(args, kwargs):
if len(args) > 1:
return args[1][:]
else:
return getValue(kwargs, 'options', [])
| twosigma/beaker-notebook | beakerx/beakerx/easyform/easyform.py | Python | apache-2.0 | 7,322 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import accounting.apps.books.utils
from django.conf import settings
import datetime
import accounting.libs.checks
import django.core.validators
from decimal import Decimal
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('display_name', models.CharField(help_text='Name that you communicate', max_length=150)),
('legal_name', models.CharField(help_text='Official name to appear on your reports, sales invoices and bills', max_length=150)),
('members', models.ManyToManyField(null=True, blank=True, related_name='organizations', to=settings.AUTH_USER_MODEL)),
('owner', models.ForeignKey(related_name='owned_organizations', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| dulaccc/django-accounting | accounting/apps/books/migrations/0001_initial.py | Python | mit | 1,237 |
__version__ = version = '1.2.3'
| editeodoro/Bbarolo | pyBBarolo/_version.py | Python | gpl-2.0 | 32 |
# (c) Fastly, inc 2016
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
callback: selective
callback_type: stdout
requirements:
- set as main display callback
short_description: only print certain tasks
version_added: "2.4"
description:
- This callback only prints tasks that have been tagged with `print_action` or that have failed.
This allows operators to focus on the tasks that provide value only.
- Tasks that are not printed are placed with a '.'.
- If you increase verbosity all tasks are printed.
options:
nocolor:
default: False
description: This setting allows suppressing colorizing output
env:
- name: ANSIBLE_NOCOLOR
- name: ANSIBLE_SELECTIVE_DONT_COLORIZE
ini:
- section: defaults
key: nocolor
type: boolean
"""
EXAMPLES = """
- debug: msg="This will not be printed"
- debug: msg="But this will"
tags: [print_action]
"""
import difflib
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.module_utils._text import to_text
from ansible.utils.color import codeCodes
DONT_COLORIZE = False
COLORS = {
'normal': '\033[0m',
'ok': '\033[{0}m'.format(codeCodes[C.COLOR_OK]),
'bold': '\033[1m',
'not_so_bold': '\033[1m\033[34m',
'changed': '\033[{0}m'.format(codeCodes[C.COLOR_CHANGED]),
'failed': '\033[{0}m'.format(codeCodes[C.COLOR_ERROR]),
'endc': '\033[0m',
'skipped': '\033[{0}m'.format(codeCodes[C.COLOR_SKIP]),
}
def dict_diff(prv, nxt):
"""Return a dict of keys that differ with another config object."""
keys = set(prv.keys() + nxt.keys())
result = {}
for k in keys:
if prv.get(k) != nxt.get(k):
result[k] = (prv.get(k), nxt.get(k))
return result
def colorize(msg, color):
"""Given a string add necessary codes to format the string."""
if DONT_COLORIZE:
return msg
else:
return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc'])
class CallbackModule(CallbackBase):
"""selective.py callback plugin."""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'selective'
def __init__(self, display=None):
"""selective.py callback plugin."""
super(CallbackModule, self).__init__(display)
self.last_skipped = False
self.last_task_name = None
self.printed_last_task = False
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
global DONT_COLORIZE
DONT_COLORIZE = self.get_option('nocolor')
def _print_task(self, task_name=None):
if task_name is None:
task_name = self.last_task_name
if not self.printed_last_task:
self.printed_last_task = True
line_length = 120
if self.last_skipped:
print()
msg = colorize("# {0} {1}".format(task_name,
'*' * (line_length - len(task_name))), 'bold')
print(msg)
def _indent_text(self, text, indent_level):
lines = text.splitlines()
result_lines = []
for l in lines:
result_lines.append("{0}{1}".format(' ' * indent_level, l))
return '\n'.join(result_lines)
def _print_diff(self, diff, indent_level):
if isinstance(diff, dict):
try:
diff = '\n'.join(difflib.unified_diff(diff['before'].splitlines(),
diff['after'].splitlines(),
fromfile=diff.get('before_header',
'new_file'),
tofile=diff['after_header']))
except AttributeError:
diff = dict_diff(diff['before'], diff['after'])
if diff:
diff = colorize(str(diff), 'changed')
print(self._indent_text(diff, indent_level + 4))
def _print_host_or_item(self, host_or_item, changed, msg, diff, is_host, error, stdout, stderr):
if is_host:
indent_level = 0
name = colorize(host_or_item.name, 'not_so_bold')
else:
indent_level = 4
if isinstance(host_or_item, dict):
if 'key' in host_or_item.keys():
host_or_item = host_or_item['key']
name = colorize(to_text(host_or_item), 'bold')
if error:
color = 'failed'
change_string = colorize('FAILED!!!', color)
else:
color = 'changed' if changed else 'ok'
change_string = colorize("changed={0}".format(changed), color)
msg = colorize(msg, color)
line_length = 120
spaces = ' ' * (40 - len(name) - indent_level)
line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string)
if len(msg) < 50:
line += ' -- {0}'.format(msg)
print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
else:
print("{0} {1}".format(line, '-' * (line_length - len(line))))
print(self._indent_text(msg, indent_level + 4))
if diff:
self._print_diff(diff, indent_level)
if stdout:
stdout = colorize(stdout, 'failed')
print(self._indent_text(stdout, indent_level + 4))
if stderr:
stderr = colorize(stderr, 'failed')
print(self._indent_text(stderr, indent_level + 4))
def v2_playbook_on_play_start(self, play):
"""Run on start of the play."""
pass
def v2_playbook_on_task_start(self, task, **kwargs):
"""Run when a task starts."""
self.last_task_name = task.get_name()
self.printed_last_task = False
def _print_task_result(self, result, error=False, **kwargs):
"""Run when a task finishes correctly."""
if 'print_action' in result._task.tags or error or self._display.verbosity > 1:
self._print_task()
self.last_skipped = False
msg = to_text(result._result.get('msg', '')) or\
to_text(result._result.get('reason', ''))
stderr = [result._result.get('exception', None),
result._result.get('module_stderr', None)]
stderr = "\n".join([e for e in stderr if e]).strip()
self._print_host_or_item(result._host,
result._result.get('changed', False),
msg,
result._result.get('diff', None),
is_host=True,
error=error,
stdout=result._result.get('module_stdout', None),
stderr=stderr.strip(),
)
if 'results' in result._result:
for r in result._result['results']:
failed = 'failed' in r
stderr = [r.get('exception', None), r.get('module_stderr', None)]
stderr = "\n".join([e for e in stderr if e]).strip()
self._print_host_or_item(r['item'],
r.get('changed', False),
to_text(r.get('msg', '')),
r.get('diff', None),
is_host=False,
error=failed,
stdout=r.get('module_stdout', None),
stderr=stderr.strip(),
)
else:
self.last_skipped = True
print('.', end="")
def v2_playbook_on_stats(self, stats):
"""Display info about playbook statistics."""
print()
self.printed_last_task = False
self._print_task('STATS')
hosts = sorted(stats.processed.keys())
for host in hosts:
s = stats.summarize(host)
if s['failures'] or s['unreachable']:
color = 'failed'
elif s['changed']:
color = 'changed'
else:
color = 'ok'
msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format(
host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored'])
print(colorize(msg, color))
def v2_runner_on_skipped(self, result, **kwargs):
"""Run when a task is skipped."""
if self._display.verbosity > 1:
self._print_task()
self.last_skipped = False
line_length = 120
spaces = ' ' * (31 - len(result._host.name) - 4)
line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'),
spaces,
colorize("skipped", 'skipped'),)
reason = result._result.get('skipped_reason', '') or \
result._result.get('skip_reason', '')
if len(reason) < 50:
line += ' -- {0}'.format(reason)
print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
else:
print("{0} {1}".format(line, '-' * (line_length - len(line))))
print(self._indent_text(reason, 8))
print(reason)
def v2_runner_on_ok(self, result, **kwargs):
self._print_task_result(result, error=False, **kwargs)
def v2_runner_on_failed(self, result, **kwargs):
self._print_task_result(result, error=True, **kwargs)
def v2_runner_on_unreachable(self, result, **kwargs):
self._print_task_result(result, error=True, **kwargs)
v2_playbook_on_handler_task_start = v2_playbook_on_task_start
| alxgu/ansible | lib/ansible/plugins/callback/selective.py | Python | gpl-3.0 | 10,438 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Remote process host (client)."""
import os
import subprocess
from clusterfuzz._internal.protos import untrusted_runner_pb2
from clusterfuzz._internal.system import new_process
from clusterfuzz._internal.system import process_handler
from . import environment
from . import host
def process_result_from_proto(process_result_proto):
"""Convert ProcessResult proto to new_process.ProcessResult."""
return new_process.ProcessResult(
process_result_proto.command, process_result_proto.return_code,
process_result_proto.output, process_result_proto.time_executed,
process_result_proto.timed_out)
def run_process(cmdline,
current_working_directory=None,
timeout=process_handler.DEFAULT_TEST_TIMEOUT,
need_shell=False,
gestures=None,
env_copy=None,
testcase_run=True,
ignore_children=True):
"""Remote version of process_handler.run_process."""
request = untrusted_runner_pb2.RunProcessRequest(
cmdline=cmdline,
current_working_directory=current_working_directory,
timeout=timeout,
need_shell=need_shell,
testcase_run=testcase_run,
ignore_children=ignore_children)
if gestures:
request.gestures.extend(gestures)
env = {}
# run_process's local behaviour is to apply the passed |env_copy| on top of
# the current environment instead of replacing it completely (like with
# subprocess).
environment.set_environment_vars(env, os.environ)
environment.set_environment_vars(env, env_copy)
request.env_copy.update(env)
response = host.stub().RunProcess(request)
return response.return_code, response.execution_time, response.output
class RemoteProcessRunner(new_process.ProcessRunner):
"""Remote child process."""
def __init__(self, executable_path, default_args=None):
super(RemoteProcessRunner, self).__init__(
executable_path, default_args=default_args)
def run(self, **kwargs): # pylint: disable=arguments-differ
# TODO(ochang): This can be implemented, but isn't necessary yet.
raise NotImplementedError
def run_and_wait(self,
additional_args=None,
timeout=None,
terminate_before_kill=False,
terminate_wait_time=None,
input_data=None,
max_stdout_len=None,
extra_env=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**popen_args):
# pylint: disable=unused-argument
# pylint: disable=arguments-differ
"""Remote version of new_process.ProcessRunner.run_and_wait."""
assert stdout == subprocess.PIPE
assert stderr == subprocess.STDOUT
request = untrusted_runner_pb2.RunAndWaitRequest(
executable_path=self.executable_path,
timeout=timeout,
terminate_before_kill=terminate_before_kill,
terminate_wait_time=terminate_wait_time,
input_data=input_data,
max_stdout_len=max_stdout_len)
request.default_args.extend(self.default_args)
request.additional_args.extend(additional_args)
if 'bufsize' in popen_args:
request.popen_args.bufsize = popen_args['bufsize']
if 'executable' in popen_args:
request.popen_args.executable = popen_args['executable']
if 'shell' in popen_args:
request.popen_args.shell = popen_args['shell']
if 'cwd' in popen_args:
request.popen_args.cwd = popen_args['cwd']
passed_env = popen_args.get('env', None)
if passed_env is not None:
request.popen_args.env_is_set = True
# Filter the passed environment to prevent leaking sensitive environment
# variables if the caller passes e.g. os.environ.copy().
environment.set_environment_vars(request.popen_args.env, passed_env)
response = host.stub().RunAndWait(request)
return process_result_from_proto(response.result)
def terminate_stale_application_instances():
"""Terminate stale application instances."""
host.stub().TerminateStaleApplicationInstances(
untrusted_runner_pb2.TerminateStaleApplicationInstancesRequest())
| google/clusterfuzz | src/clusterfuzz/_internal/bot/untrusted_runner/remote_process_host.py | Python | apache-2.0 | 4,764 |
"""Module containing class for AWS's Glue Crawler."""
import json
from typing import Any, Dict, Optional, Tuple
from absl import flags
from perfkitbenchmarker import data_discovery_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
class CrawlNotCompletedError(Exception):
"""Used to signal a crawl is still running."""
class CrawlFailedError(Exception):
"""Used to signal a crawl has failed."""
class AwsGlueCrawler(data_discovery_service.BaseDataDiscoveryService):
"""AWS Glue Crawler Resource Class.
Attributes:
db_name: Name of the Glue database that will be provisioned.
crawler_name: Name of the crawler that will be provisioned.
role: Role the crawler will use. Refer to aws_glue_crawler_role flag for
more info.
sample_size: How many files will be crawled in each leaf directory. Refer to
aws_glue_crawler_sample_size flag for more info.
"""
CLOUD = providers.AWS
SERVICE_TYPE = 'glue'
READY = 'READY'
FAILED = 'FAILED'
CRAWL_TIMEOUT = 21600
CRAWL_POLL_INTERVAL = 5
def __init__(self):
super().__init__()
self.db_name = f'pkb-db-{FLAGS.run_uri}'
self.crawler_name = f'pkb-crawler-{FLAGS.run_uri}'
self.role = FLAGS.aws_glue_crawler_role
self.sample_size = FLAGS.aws_glue_crawler_sample_size
def _Create(self) -> None:
# creating database
database_input = {
'Name': self.db_name,
'Description': '\n'.join(
f'{k}={v}' for k, v in util.MakeDefaultTags().items()),
}
cmd = util.AWS_PREFIX + [
'glue',
'create-database',
'--database-input', json.dumps(database_input),
f'--region={self.region}',
]
vm_util.IssueCommand(cmd)
targets = {'S3Targets': [{'Path': self.data_discovery_path}]}
if self.sample_size is not None:
targets['S3Targets'][0]['SampleSize'] = self.sample_size
# creating crawler
cmd = util.AWS_PREFIX + [
'glue',
'create-crawler',
'--name', self.crawler_name,
'--role', self.role,
'--database-name', self.db_name,
'--targets', json.dumps(targets),
'--region', self.region,
'--tags', ','.join(
f'{k}={v}' for k, v in util.MakeDefaultTags().items()),
]
vm_util.IssueCommand(cmd)
def _Exists(self) -> bool:
return self._DbExists() and self._CrawlerExists()
def _IsReady(self, raise_on_crawl_failure=False) -> bool:
stdout, _, _ = self._GetCrawler()
data = json.loads(stdout)
if (data['Crawler'].get('LastCrawl', {}).get('Status') == self.FAILED and
raise_on_crawl_failure):
raise CrawlFailedError(
data['Crawler'].get('LastCrawl', {}).get('ErrorMessage', ''))
return data['Crawler']['State'] == self.READY
def _Delete(self) -> None:
# deleting database
cmd = util.AWS_PREFIX + [
'glue',
'delete-database',
'--name', self.db_name,
'--region', self.region,
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
# deleting crawler
cmd = util.AWS_PREFIX + [
'glue',
'delete-crawler',
'--name', self.crawler_name,
'--region', self.region,
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _IsDeleting(self) -> bool:
crawler_exists = self._CrawlerExists()
db_exists = self._DbExists()
if db_exists is None or crawler_exists is None:
return True
return self._DbExists() or self._CrawlerExists()
def DiscoverData(self) -> float:
"""Runs the AWS Glue Crawler. Returns the time elapsed in secs."""
cmd = util.AWS_PREFIX + [
'glue',
'start-crawler',
'--name', self.crawler_name,
'--region', self.region,
]
vm_util.IssueCommand(cmd)
self._WaitUntilCrawlerReady()
cmd = util.AWS_PREFIX + [
'glue',
'get-crawler-metrics',
'--crawler-name-list', self.crawler_name,
'--region', self.region,
]
output, _, _ = vm_util.IssueCommand(cmd)
data = json.loads(output)
assert (isinstance(data['CrawlerMetricsList'], list) and
len(data['CrawlerMetricsList']) == 1)
return data['CrawlerMetricsList'][0]['LastRuntimeSeconds']
def GetMetadata(self) -> Dict[str, Any]:
"""Return a dictionary of the metadata for this service."""
metadata = super().GetMetadata()
metadata.update(
aws_glue_crawler_sample_size=self.sample_size,
aws_glue_db_name=self.db_name,
aws_glue_crawler_name=self.crawler_name,
)
return metadata
@vm_util.Retry(
timeout=CRAWL_TIMEOUT,
poll_interval=CRAWL_POLL_INTERVAL,
fuzz=0,
retryable_exceptions=CrawlNotCompletedError,)
def _WaitUntilCrawlerReady(self):
if not self._IsReady(raise_on_crawl_failure=True):
raise CrawlNotCompletedError(
f'Crawler {self.crawler_name} still running.')
def _DbExists(self) -> Optional[bool]:
"""Whether the database exists or not.
It might return None if the API call failed with an unknown error.
Returns:
A bool or None.
"""
cmd = util.AWS_PREFIX + [
'glue',
'get-database',
'--name', self.db_name,
'--region', self.region,
]
_, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if not retcode:
return True
return False if 'EntityNotFoundException' in stderr else None
def _CrawlerExists(self) -> Optional[bool]:
"""Whether the crawler exists or not.
It might return None if the API call failed with an unknown error.
Returns:
A bool or None.
"""
_, stderr, retcode = self._GetCrawler(raise_on_failure=False)
if not retcode:
return True
return False if 'EntityNotFoundException' in stderr else None
def _GetCrawler(self, raise_on_failure=True) -> Tuple[str, str, int]:
"""Calls the AWS CLI to retrieve a crawler."""
cmd = util.AWS_PREFIX + [
'glue',
'get-crawler',
'--name', self.crawler_name,
'--region', self.region,
]
return vm_util.IssueCommand(cmd, raise_on_failure=raise_on_failure)
| GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/providers/aws/aws_glue_crawler.py | Python | apache-2.0 | 6,215 |
from piservices import PiService
class MusicPlayerDaemonService(PiService):
name = 'pimpd'
apt_get_install = [ 'mpd', 'mpc', 'alsa-utils']
path_to_music = '/var/lib/mpd/music'
init_script = 'installed'
config_file = '/etc/mpd.conf'
config_file_tpl = 'src/mpd.conf'
#TODO mpd conf
def install(self):
PiService.install(self)
self.sudo('modprobe snd_bcm2835')
self.sudo('amixer cset numid=3 1')
self.sudo('chmod g+w /var/lib/mpd/music/ /var/lib/mpd/playlists/')
self.sudo('chgrp audio /var/lib/mpd/music/ /var/lib/mpd/playlists/')
def deploy(self, restart=True):
config_writer = self.remote.template(self.config_file_tpl)
config_writer.render({
'host': self.config.host,
'port':self.config.port
})
config_writer.write(self.config_file)
PiService.deploy(self, restart)
self.sudo('mpc update')
def start(self):
self.sudo('/etc/init.d/mpd start')
self.sudo('mpc update')
def restart(self):
self.sudo('/etc/init.d/mpd restart')
self.sudo('mpc update')
def stop(self):
self.sudo('/etc/init.d/mpd stop')
instance = MusicPlayerDaemonService()
| creative-workflow/pi-setup | services/pimpd/__init__.py | Python | mit | 1,226 |
'''
Implementation of a Twisted Modbus Server
------------------------------------------
'''
import traceback
from binascii import b2a_hex
from twisted.internet import protocol
from twisted.internet.protocol import ServerFactory
from pymodbus.constants import Defaults
from pymodbus.factory import ServerDecoder
from pymodbus.datastore import ModbusServerContext
from pymodbus.device import ModbusControlBlock
from pymodbus.device import ModbusAccessControl
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.exceptions import NoSuchSlaveException
from pymodbus.transaction import ModbusSocketFramer, ModbusAsciiFramer
from pymodbus.pdu import ModbusExceptions as merror
#---------------------------------------------------------------------------#
# Logging
#---------------------------------------------------------------------------#
import logging
_logger = logging.getLogger(__name__)
#---------------------------------------------------------------------------#
# Modbus TCP Server
#---------------------------------------------------------------------------#
class ModbusTcpProtocol(protocol.Protocol):
''' Implements a modbus server in twisted '''
def connectionMade(self):
''' Callback for when a client connects
..note:: since the protocol factory cannot be accessed from the
protocol __init__, the client connection made is essentially
our __init__ method.
'''
_logger.debug("Client Connected [%s]" % self.transport.getHost())
self.framer = self.factory.framer(decoder=self.factory.decoder)
def connectionLost(self, reason):
''' Callback for when a client disconnects
:param reason: The client's reason for disconnecting
'''
_logger.debug("Client Disconnected: %s" % reason)
def dataReceived(self, data):
''' Callback when we receive any data
:param data: The data sent by the client
'''
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(" ".join([hex(ord(x)) for x in data]))
if not self.factory.control.ListenOnly:
self.framer.processIncomingPacket(data, self._execute)
def _execute(self, request):
''' Executes the request and returns the result
:param request: The decoded request message
'''
try:
context = self.factory.store[request.unit_id]
response = request.execute(context)
except NoSuchSlaveException, ex:
_logger.debug("requested slave does not exist: %s; %s", ex, traceback.format_exc() )
if self.factory.ignore_missing_slaves:
return # the client will simply timeout waiting for a response
response = request.doException(merror.GatewayNoResponse)
except Exception, ex:
_logger.debug("Datastore unable to fulfill request: %s" % ex)
response = request.doException(merror.SlaveFailure)
#self.framer.populateResult(response)
response.transaction_id = request.transaction_id
response.unit_id = request.unit_id
self._send(response)
def _send(self, message):
''' Send a request (string) to the network
:param message: The unencoded modbus response
'''
if message.should_respond:
self.factory.control.Counter.BusMessage += 1
pdu = self.framer.buildPacket(message)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug('send: %s' % b2a_hex(pdu))
return self.transport.write(pdu)
class ModbusServerFactory(ServerFactory):
'''
Builder class for a modbus server
This also holds the server datastore so that it is
persisted between connections
'''
protocol = ModbusTcpProtocol
def __init__(self, store, framer=None, identity=None, **kwargs):
''' Overloaded initializer for the modbus factory
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param store: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
self.decoder = ServerDecoder()
self.framer = framer or ModbusSocketFramer
self.store = store or ModbusServerContext()
self.control = ModbusControlBlock()
self.access = ModbusAccessControl()
self.ignore_missing_slaves = kwargs.get('ignore_missing_slaves', Defaults.IgnoreMissingSlaves)
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
#---------------------------------------------------------------------------#
# Modbus UDP Server
#---------------------------------------------------------------------------#
class ModbusUdpProtocol(protocol.DatagramProtocol):
''' Implements a modbus udp server in twisted '''
def __init__(self, store, framer=None, identity=None, **kwargs):
''' Overloaded initializer for the modbus factory
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param store: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
framer = framer or ModbusSocketFramer
self.framer = framer(decoder=ServerDecoder())
self.store = store or ModbusServerContext()
self.control = ModbusControlBlock()
self.access = ModbusAccessControl()
self.ignore_missing_slaves = kwargs.get('ignore_missing_slaves', Defaults.IgnoreMissingSlaves)
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
def datagramReceived(self, data, addr):
''' Callback when we receive any data
:param data: The data sent by the client
'''
_logger.debug("Client Connected [%s:%s]" % addr)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(" ".join([hex(ord(x)) for x in data]))
if not self.control.ListenOnly:
continuation = lambda request: self._execute(request, addr)
self.framer.processIncomingPacket(data, continuation)
def _execute(self, request, addr):
''' Executes the request and returns the result
:param request: The decoded request message
'''
try:
context = self.store[request.unit_id]
response = request.execute(context)
except NoSuchSlaveException, ex:
_logger.debug("requested slave does not exist: %s; %s", ex, traceback.format_exc() )
if self.ignore_missing_slaves:
return # the client will simply timeout waiting for a response
response = request.doException(merror.GatewayNoResponse)
except Exception, ex:
_logger.debug("Datastore unable to fulfill request: %s" % ex)
response = request.doException(merror.SlaveFailure)
#self.framer.populateResult(response)
response.transaction_id = request.transaction_id
response.unit_id = request.unit_id
self._send(response, addr)
def _send(self, message, addr):
''' Send a request (string) to the network
:param message: The unencoded modbus response
:param addr: The (host, port) to send the message to
'''
self.control.Counter.BusMessage += 1
pdu = self.framer.buildPacket(message)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug('send: %s' % b2a_hex(pdu))
return self.transport.write(pdu, addr)
#---------------------------------------------------------------------------#
# Starting Factories
#---------------------------------------------------------------------------#
def StartTcpServer(context, identity=None, address=None, console=False, **kwargs):
''' Helper method to start the Modbus Async TCP server
:param context: The server data context
:param identify: The server identity to use (default empty)
:param address: An optional (interface, port) to bind to.
:param console: A flag indicating if you want the debug console
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
from twisted.internet import reactor
address = address or ("", Defaults.Port)
framer = ModbusSocketFramer
factory = ModbusServerFactory(context, framer, identity, **kwargs)
if console:
from pymodbus.internal.ptwisted import InstallManagementConsole
InstallManagementConsole({'factory': factory})
_logger.info("Starting Modbus TCP Server on %s:%s" % address)
reactor.listenTCP(address[1], factory, interface=address[0])
reactor.run()
def StartUdpServer(context, identity=None, address=None, **kwargs):
''' Helper method to start the Modbus Async Udp server
:param context: The server data context
:param identify: The server identity to use (default empty)
:param address: An optional (interface, port) to bind to.
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
from twisted.internet import reactor
address = address or ("", Defaults.Port)
framer = ModbusSocketFramer
server = ModbusUdpProtocol(context, framer, identity, **kwargs)
_logger.info("Starting Modbus UDP Server on %s:%s" % address)
reactor.listenUDP(address[1], server, interface=address[0])
reactor.run()
def StartSerialServer(context, identity=None,
framer=ModbusAsciiFramer, **kwargs):
''' Helper method to start the Modbus Async Serial server
:param context: The server data context
:param identify: The server identity to use (default empty)
:param framer: The framer to use (default ModbusAsciiFramer)
:param port: The serial port to attach to
:param baudrate: The baud rate to use for the serial device
:param console: A flag indicating if you want the debug console
:param ignore_missing_slaves: True to not send errors on a request to a missing slave
'''
from twisted.internet import reactor
from twisted.internet.serialport import SerialPort
port = kwargs.get('port', '/dev/ttyS0')
baudrate = kwargs.get('baudrate', Defaults.Baudrate)
console = kwargs.get('console', False)
_logger.info("Starting Modbus Serial Server on %s" % port)
factory = ModbusServerFactory(context, framer, identity, **kwargs)
if console:
from pymodbus.internal.ptwisted import InstallManagementConsole
InstallManagementConsole({'factory': factory})
protocol = factory.buildProtocol(None)
SerialPort.getHost = lambda self: port # hack for logging
SerialPort(protocol, port, reactor, baudrate)
reactor.run()
#---------------------------------------------------------------------------#
# Exported symbols
#---------------------------------------------------------------------------#
__all__ = [
"StartTcpServer", "StartUdpServer", "StartSerialServer",
]
| mjfarmer/scada_py | pymodbus/pymodbus/server/async.py | Python | gpl-3.0 | 11,362 |
'''Integration tests with urllib3'''
# coding=utf-8
import pytest
import pytest_httpbin
import vcr
from assertions import assert_cassette_empty, assert_is_json
urllib3 = pytest.importorskip("urllib3")
@pytest.fixture(scope='module')
def verify_pool_mgr():
return urllib3.PoolManager(
cert_reqs='CERT_REQUIRED', # Force certificate check.
ca_certs=pytest_httpbin.certs.where()
)
@pytest.fixture(scope='module')
def pool_mgr():
return urllib3.PoolManager()
def test_status_code(httpbin_both, tmpdir, verify_pool_mgr):
'''Ensure that we can read the status code'''
url = httpbin_both.url
with vcr.use_cassette(str(tmpdir.join('atts.yaml'))):
status_code = verify_pool_mgr.request('GET', url).status
with vcr.use_cassette(str(tmpdir.join('atts.yaml'))):
assert status_code == verify_pool_mgr.request('GET', url).status
def test_headers(tmpdir, httpbin_both, verify_pool_mgr):
'''Ensure that we can read the headers back'''
url = httpbin_both.url
with vcr.use_cassette(str(tmpdir.join('headers.yaml'))):
headers = verify_pool_mgr.request('GET', url).headers
with vcr.use_cassette(str(tmpdir.join('headers.yaml'))):
assert headers == verify_pool_mgr.request('GET', url).headers
def test_body(tmpdir, httpbin_both, verify_pool_mgr):
'''Ensure the responses are all identical enough'''
url = httpbin_both.url + '/bytes/1024'
with vcr.use_cassette(str(tmpdir.join('body.yaml'))):
content = verify_pool_mgr.request('GET', url).data
with vcr.use_cassette(str(tmpdir.join('body.yaml'))):
assert content == verify_pool_mgr.request('GET', url).data
def test_auth(tmpdir, httpbin_both, verify_pool_mgr):
'''Ensure that we can handle basic auth'''
auth = ('user', 'passwd')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(*auth))
url = httpbin_both.url + '/basic-auth/user/passwd'
with vcr.use_cassette(str(tmpdir.join('auth.yaml'))):
one = verify_pool_mgr.request('GET', url, headers=headers)
with vcr.use_cassette(str(tmpdir.join('auth.yaml'))):
two = verify_pool_mgr.request('GET', url, headers=headers)
assert one.data == two.data
assert one.status == two.status
def test_auth_failed(tmpdir, httpbin_both, verify_pool_mgr):
'''Ensure that we can save failed auth statuses'''
auth = ('user', 'wrongwrongwrong')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(*auth))
url = httpbin_both.url + '/basic-auth/user/passwd'
with vcr.use_cassette(str(tmpdir.join('auth-failed.yaml'))) as cass:
# Ensure that this is empty to begin with
assert_cassette_empty(cass)
one = verify_pool_mgr.request('GET', url, headers=headers)
two = verify_pool_mgr.request('GET', url, headers=headers)
assert one.data == two.data
assert one.status == two.status == 401
def test_post(tmpdir, httpbin_both, verify_pool_mgr):
'''Ensure that we can post and cache the results'''
data = {'key1': 'value1', 'key2': 'value2'}
url = httpbin_both.url + '/post'
with vcr.use_cassette(str(tmpdir.join('verify_pool_mgr.yaml'))):
req1 = verify_pool_mgr.request('POST', url, data).data
with vcr.use_cassette(str(tmpdir.join('verify_pool_mgr.yaml'))):
req2 = verify_pool_mgr.request('POST', url, data).data
assert req1 == req2
def test_redirects(tmpdir, httpbin_both, verify_pool_mgr):
'''Ensure that we can handle redirects'''
url = httpbin_both.url + '/redirect-to?url=bytes/1024'
with vcr.use_cassette(str(tmpdir.join('verify_pool_mgr.yaml'))):
content = verify_pool_mgr.request('GET', url).data
with vcr.use_cassette(str(tmpdir.join('verify_pool_mgr.yaml'))) as cass:
assert content == verify_pool_mgr.request('GET', url).data
# Ensure that we've now cached *two* responses. One for the redirect
# and one for the final fetch
assert len(cass) == 2
assert cass.play_count == 2
def test_cross_scheme(tmpdir, httpbin, httpbin_secure, verify_pool_mgr):
'''Ensure that requests between schemes are treated separately'''
# First fetch a url under http, and then again under https and then
# ensure that we haven't served anything out of cache, and we have two
# requests / response pairs in the cassette
with vcr.use_cassette(str(tmpdir.join('cross_scheme.yaml'))) as cass:
verify_pool_mgr.request('GET', httpbin_secure.url)
verify_pool_mgr.request('GET', httpbin.url)
assert cass.play_count == 0
assert len(cass) == 2
def test_gzip(tmpdir, httpbin_both, verify_pool_mgr):
'''
Ensure that requests (actually urllib3) is able to automatically decompress
the response body
'''
url = httpbin_both.url + '/gzip'
response = verify_pool_mgr.request('GET', url)
with vcr.use_cassette(str(tmpdir.join('gzip.yaml'))):
response = verify_pool_mgr.request('GET', url)
assert_is_json(response.data)
with vcr.use_cassette(str(tmpdir.join('gzip.yaml'))):
assert_is_json(response.data)
def test_https_with_cert_validation_disabled(tmpdir, httpbin_secure, pool_mgr):
with vcr.use_cassette(str(tmpdir.join('cert_validation_disabled.yaml'))):
pool_mgr.request('GET', httpbin_secure.url)
| ByteInternet/vcrpy | tests/integration/test_urllib3.py | Python | mit | 5,351 |
from context import fasin
from fasin import parse, prep
import unittest, sys, os, shutil
here = os.path.dirname(os.path.realpath(__file__))
prog = os.path.join(here, 'add13.f90')
#prog = os.path.join(here, 'temp.f90')
class TestParser(unittest.TestCase):
def _test_prep(self):
preprocessed = prep(prog)
return preprocessed
def test_parse(self):
preprocessed = self._test_prep()
tree = parse(preprocessed)
#print(sys.stdout)
if __name__ == '__main__':
unittest.main.main()
| grnydawn/fasin | tests/test_fortparser.py | Python | gpl-3.0 | 529 |
import sys
import PIL.Image
from api import trans_image
def convert():
if len(sys.argv) != 4:
print >> sys.stderr, 'Usage:'
print >> sys.stderr, ' erix MODE SRC DEST'
return sys.exit(1)
mode = sys.argv[1]
src = sys.argv[2]
dest = sys.argv[3]
im = PIL.Image.open(src)
fmt = im.format
trans_image(im, mode).save(dest, format=fmt)
def view():
if len(sys.argv) != 3:
print >> sys.stderr, 'Usage:'
print >> sys.stderr, ' erixv MODE SRC'
return sys.exit(1)
mode = sys.argv[1]
src = sys.argv[2]
trans_image(PIL.Image.open(src), mode).show()
if __name__ == '__main__':
view()
| neuront/eirx | src/eirx/main.py | Python | mit | 675 |
from datetime import datetime
class StringBaseUtil(object):
def __init__(self, string):
self.string = string
def to_datetime(self):
_datetime = None
formats = [
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%d',
]
exceptions = []
for _format in formats:
try:
_datetime = datetime.strptime(self.string, _format)
return _datetime
except ValueError as e:
exceptions.append(e)
if _datetime is None:
raise exceptions[-1]
| eyalev/jsonapp | jsonapp/utils/string_base_util.py | Python | mit | 616 |
# -*- coding: utf-8 -*-
import unittest
from hamlish_jinja import Hamlish, Output
import testing_base
class TestDebugOutput(testing_base.TestCase):
def setUp(self):
self.hamlish = Hamlish(
Output(indent_string=' ', newline_string='\n', debug=True))
def test_html_tags(self):
s = self._h('''
%html
%head
%title << Test
%body
%p
Test
%p << Test
%p
Test
''')
r = '''
<html>
<head>
<title>Test</title></head>
<body>
<p>
Test</p>
<p>Test</p>
<p>
Test</p></body></html>
'''
self.assertEqual(s, r)
def test_jinja_tags(self):
s = self._h('''
-macro test(name):
%p << {{ name }}
-block content:
-for i in range(20):
-if i < 10:
=test(i)
-elif i < 15:
Test {{ i|safe }}
-if i == 10:
Test
-continue
-elif i == 11:
-break
Test
-else:
Test
-trans count=i:
There is {{ count }} object.
-pluralize
There is {{ count }} objects.
-else:
Test
''')
r = '''
{% macro test(name): %}
<p>{{ name }}</p>{% endmacro %}
{% block content: %}
{% for i in range(20): %}
{% if i < 10: %}
{{ test(i) }}
{% elif i < 15: %}
Test {{ i|safe }}
{% if i == 10: %}
Test
{% continue %}
{% elif i == 11: %}
{% break %}{% endif %}
Test
{% else: %}
Test{% endif %}
{% trans count=i: %}
There is {{ count }} object.
{% pluralize %}
There is {{ count }} objects.{% endtrans %}
{% else: %}
Test{% endfor %}{% endblock %}
'''
self.assertEqual(s, r)
def test_preformatted_lines(self):
s = self._h('''
%html
%pre
|def test(name):
| if True:
| print name
%p
Test
''')
r = '''
<html>
<pre>
def test(name):
if True:
print name</pre>
<p>
Test</p></html>
'''
self.assertEqual(s, r)
def test_self_closing_tag_with_empty_lines_bellow(self):
s = self._h('''
%br
%span << test''')
r = '''
<br />
<span>test</span>
'''
self.assertEqual(s, r)
def test_nested_tags(self):
s = self._h('''
%ul
-for i in range(10):
%li -> %a href="{{ i }}" << {{ i }}
%span << test
''')
r = '''
<ul>
{% for i in range(10): %}
<li><a href="{{ i }}">{{ i }}</a></li>{% endfor %}
<span>test</span></ul>
'''
self.assertEqual(s, r)
def test_nested_tags2(self):
s = self._h('''
%ul
-for i in range(10):
%li -> %a href="{{ i }}" -> {{ i }}
%span << test
''')
r = '''
<ul>
{% for i in range(10): %}
<li><a href="{{ i }}">{{ i }}</a></li>{% endfor %}
<span>test</span></ul>
'''
self.assertEqual(s, r)
def test_nested_tags3(self):
s = self._h('''
%ul
-for i in range(10):
%li -> %a href="{{ i }}" -> =i
%span << test
''')
r = '''
<ul>
{% for i in range(10): %}
<li><a href="{{ i }}">{{ i }}</a></li>{% endfor %}
<span>test</span></ul>
'''
self.assertEqual(s, r)
def test_nested_tags4(self):
s = self._h('''
%ul
-for i in range(10):
%li -> %a href="{{ i }}"
Test {{ i }}
%span << test
''')
r = '''
<ul>
{% for i in range(10): %}
<li><a href="{{ i }}">
Test {{ i }}</a></li>{% endfor %}
<span>test</span></ul>
'''
self.assertEqual(s, r)
if __name__ == '__main__':
unittest.main() | Pitmairen/hamlish-jinja | tests/test_debug_output.py | Python | bsd-3-clause | 3,732 |
from nslsii.detectors.zebra import (EpicsSignalWithRBV,
ZebraPulse,
ZebraFrontOutput12,
ZebraFrontOutput3,
ZebraFrontOutput4,
ZebraRearOutput,
ZebraGate,
ZebraAddresses)
from nslsii.detectors.xspress3 import (XspressTrigger,
Xspress3Detector,
Xspress3Channel)
from ophyd.areadetector.plugins import PluginBase
from ophyd import (FormattedComponent as FC)
class ZebraINP(Device):
use = FC(EpicsSignal,
'{self.prefix}_ENA:B{self._bindex}')
source_addr = FC(EpicsSignalWithRBV,
'{self.prefix}_INP{self.index}')
source_str = FC(EpicsSignalRO,
'{self.prefix}_INP{self.index}:STR',
string=True)
source_status = FC(EpicsSignalRO,
'{self.prefix}_INP{self.index}:STA')
invert = FC(EpicsSignal,
'{self.prefix}_INV:B{self._bindex}')
def __init__(self, prefix, *, index, read_attrs=None, configuration_attrs=None,
**kwargs):
if read_attrs is None:
read_attrs = []
if configuration_attrs is None:
configuration_attrs = ['use', 'source_addr', 'source_str', 'invert']
self.index = index
self._bindex = index - 1
super().__init__(prefix, read_attrs=read_attrs, configuration_attrs=configuration_attrs,
**kwargs)
class ZebraLogic(Device):
inp1 = Cpt(ZebraINP, '', index=1)
inp2 = Cpt(ZebraINP, '', index=2)
inp3 = Cpt(ZebraINP, '', index=3)
inp4 = Cpt(ZebraINP, '', index=4)
out = Cpt(EpicsSignalRO, '_OUT')
def __init__(self, *args, read_attrs=None, configuration_attrs=None,
**kwargs):
if read_attrs is None:
read_attrs = ['out']
if configuration_attrs is None:
configuration_attrs = ['inp{}'.format(j) for j in range(1, 5)]
super().__init__(*args, read_attrs=read_attrs,
configuration_attrs=configuration_attrs, **kwargs)
class CHXXspress3Detector(XspressTrigger, Xspress3Detector):
roi_data = Cpt(PluginBase, 'ROIDATA:')
channel1 = Cpt(Xspress3Channel,
'C1_', channel_num=1,
read_attrs=['rois'])
class GateConfig(Device):
trig_source = Cpt(EpicsSignalWithRBV, 'PC_GATE_SEL', string=True)
start = Cpt(EpicsSignal, 'PC_GATE_START')
width = Cpt(EpicsSignal, 'PC_GATE_WID')
step = Cpt(EpicsSignal, 'PC_GATE_STEP')
ngate = Cpt(EpicsSignal, 'PC_GATE_NGATE')
status = Cpt(EpicsSignal, 'PC_GATE_OUT')
class PulseConfig(Device):
trig_source = Cpt(EpicsSignalWithRBV, 'PC_PULSE_SEL', string=True)
start = Cpt(EpicsSignal, 'PC_PULSE_START')
width = Cpt(EpicsSignal, 'PC_PULSE_WID')
step = Cpt(EpicsSignal, 'PC_PULSE_STEP')
delay = Cpt(EpicsSignal, 'PC_PULSE_DLY')
nmax = Cpt(EpicsSignal, 'PC_PULSE_MAX')
status = Cpt(EpicsSignal, 'PC_PULSE_OUT')
class Zebra(Device):
soft_input1 = Cpt(EpicsSignal, 'SOFT_IN:B0')
soft_input2 = Cpt(EpicsSignal, 'SOFT_IN:B1')
soft_input3 = Cpt(EpicsSignal, 'SOFT_IN:B2')
soft_input4 = Cpt(EpicsSignal, 'SOFT_IN:B3')
pulse1 = Cpt(ZebraPulse, 'PULSE1_', index=1)
pulse2 = Cpt(ZebraPulse, 'PULSE2_', index=2)
pulse3 = Cpt(ZebraPulse, 'PULSE3_', index=3)
pulse4 = Cpt(ZebraPulse, 'PULSE4_', index=4)
output1 = Cpt(ZebraFrontOutput12, 'OUT1_', index=1)
output2 = Cpt(ZebraFrontOutput12, 'OUT2_', index=2)
output3 = Cpt(ZebraFrontOutput3, 'OUT3_', index=3)
output4 = Cpt(ZebraFrontOutput4, 'OUT4_', index=4)
output5 = Cpt(ZebraRearOutput, 'OUT5_', index=5)
output6 = Cpt(ZebraRearOutput, 'OUT6_', index=6)
output7 = Cpt(ZebraRearOutput, 'OUT7_', index=7)
output8 = Cpt(ZebraRearOutput, 'OUT8_', index=8)
gate1 = Cpt(ZebraGate, 'GATE1_', index=1)
gate2 = Cpt(ZebraGate, 'GATE2_', index=2)
gate3 = Cpt(ZebraGate, 'GATE3_', index=3)
gate4 = Cpt(ZebraGate, 'GATE4_', index=4)
or1 = Cpt(ZebraLogic, 'OR1')
or2 = Cpt(ZebraLogic, 'OR2')
or3 = Cpt(ZebraLogic, 'OR3')
or4 = Cpt(ZebraLogic, 'OR4')
and1 = Cpt(ZebraLogic, 'AND1')
and2 = Cpt(ZebraLogic, 'AND2')
and3 = Cpt(ZebraLogic, 'AND3')
and4 = Cpt(ZebraLogic, 'AND4')
addresses = ZebraAddresses
arm = Cpt(EpicsSignal, 'PC_ARM')
arm_status = Cpt(EpicsSignal, 'PC_ARM_OUT')
arm_trig = Cpt(EpicsSignalWithRBV, 'PC_ARM_SEL', string=True)
gate_config = Cpt(GateConfig, '', read_attrs=[], configuration_attrs=['trig_source', 'start', 'width', 'step', 'ngate'])
pulse_config = Cpt(PulseConfig, '', read_attrs=[], configuration_attrs=['trig_source', 'start', 'width', 'step', 'delay', 'nmax'])
class CHXZebra(Zebra):
...
class XspressZebra(Device):
zebra = Cpt(CHXZebra, 'XF:11IDB-ES{Zebra}:', add_prefix=(),
configuration_attrs=['{}{}'.format(n, j) for n in ['pulse', 'gate', 'or', 'and']
for j in range(1,5)] + ['output{}'.format(i) for i in range(1, 9)] + ['gate_config', 'pulse_config', 'arm_trig'],
read_attrs=[])
xs = Cpt(CHXXspress3Detector, 'XSPRESS3-EXAMPLE:', add_prefix=())
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def trigger(self):
st = self.xs.trigger()
# TODO make sure Xspress is actually ready before
# arming zebra
self.zebra.arm.set(1)
return st
def construct_mca():
...
try:
mca = XspressZebra('', name='mca', configuration_attrs=['zebra', 'xs'], read_attrs=['xs'])
#rois = [ 'roi%02d'%i for i in range(1,17) ]
rois = [ 'roi%02d'%i for i in range(1,2) ]
mca.xs.channel1.rois.read_attrs = rois #['roi01']
mca.xs.channel1.rois.configuration_attrs = rois #['roi01']
#mca.xs.channel1.set_roi(1, 5500, 6500)
mca.xs.channel1.set_roi(1, 5000,5750) #, namely, the energy from 5.2 keV for 5.6 keV ( iocs edm scree x10 ), 1 is the roi1
except:
pass
#for i in range(1,17):
# mca.xs.channel1.set_roi(i,1000*i, 1000*(i+1)) # from css screen: x10!!!
#except: pass
# mca.xs.channel1.set_roi(2, 5500, 6500)
# mca.xs.channel1.rois.read_attrs.append('roi02')
# mca.xs.channel1.rois.configuration_attrs.append('roi02')
def set_rois(mca, roi_step=1000, roi_start=0, roi_range= range(1,17) ):
mca.xs.channel1.clear_all_rois()
for i in roi_range:
mca.xs.channel1.set_roi(i,
roi_start + roi_step*(i-1),
roi_start +roi_step*i ) # from css screen: x10!!!
return mca
def get_rois_val( roi_range= range(1,17) ):
vals = np.zeros( len(roi_range))
for i in roi_range:
key = 'mca_xs_channel1_rois_roi%02d_value'%i
vals[i-1]= mca.xs.channel1.rois.read()[key]['value']
return vals
def count_mca( mca, roi_step=1000, roi_start=0, roi_range= range(1,17) ):
mca = set_rois(mca, roi_step=roi_step,
roi_start=roi_start,
roi_range= roi_range )
det = [mca]
RE(count(det))
roi_vals = get_rois_val( roi_range )
rois = np.array( roi_range ) * roi_step + roi_start
return rois, roi_vals
def calibration_mca( mca ):
# do a count with roi from 0 to 16000 with step as 100, because we only have 16 rois, so will do 10 iterations.
rois = []
vals = []
for i in range( 200 ):
roi_step = 10
roi_start = i* 16 * roi_step
roi_range = range(1,17)
mca = set_rois(mca, roi_step=roi_step,
roi_start=roi_start,
roi_range= roi_range )
det = [mca]
RE(count(det))
vals.append( list( get_rois_val( roi_range ) ))
rois.append( list( np.array( roi_range ) * roi_step + roi_start ))
return np.concatenate( np.array(rois) ), np.concatenate( np.array(vals) )
#one example:
#det = [mac]
#RE(count(det))
#mca.xs.channel1.set_roi(1, 2600,3000) # from css screen: x10!!!
| NSLS-II-CHX/ipython_ophyd | startup/98-xspress3.py | Python | bsd-2-clause | 8,345 |
#!/usr/bin/env python
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# If ../neutron/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
import eventlet
import sys
from oslo.config import cfg
from neutron.common import config
from neutron import service
from neutron.openstack.common import gettextutils
from neutron.openstack.common import log as logging
gettextutils.install('neutron', lazy=True)
LOG = logging.getLogger(__name__)
def main():
eventlet.monkey_patch()
# the configuration will be read into the cfg.CONF global data structure
config.parse(sys.argv[1:])
if not cfg.CONF.config_file:
sys.exit(_("ERROR: Unable to find configuration file via the default"
" search paths (~/.neutron/, ~/, /etc/neutron/, /etc/) and"
" the '--config-file' option!"))
try:
pool = eventlet.GreenPool()
neutron_api = service.serve_wsgi(service.NeutronApiService)
api_thread = pool.spawn(neutron_api.wait)
try:
neutron_rpc = service.serve_rpc()
except NotImplementedError:
LOG.info(_("RPC was already started in parent process by plugin."))
else:
rpc_thread = pool.spawn(neutron_rpc.wait)
# api and rpc should die together. When one dies, kill the other.
rpc_thread.link(lambda gt: api_thread.kill())
api_thread.link(lambda gt: rpc_thread.kill())
pool.waitall()
except KeyboardInterrupt:
pass
except RuntimeError as e:
sys.exit(_("ERROR: %s") % e)
if __name__ == "__main__":
main()
| Juniper/neutron | neutron/server/__init__.py | Python | apache-2.0 | 2,276 |
#!/usr/bin/python
import os
import shlex
from time import time, sleep
import sys
from sys import stdout
from subprocess import Popen, PIPE
from qtrotate import get_set_rotation
__author__ = "Joshua Hollander"
__email__ = "[email protected]"
__copyright__ = "Copyright 2013, Joshua Hollander"
FNULL = open(os.devnull, 'w')
def is_movie_file(filename, extensions=['.avi', '.AVI', '.mov', '.MOV']):
return any((filename.endswith(e) and filename.find('.bak') == -1 and filename.find(".out") == -1) for e in extensions)
def find_movies(directory):
for root, dirnames, filenames in os.walk(directory):
for filename in filter(is_movie_file, filenames):
yield os.path.join(root, filename)
def encode(input):
size = os.path.getsize(input)
try:
rotation = get_set_rotation(input)
except:
return
#if(time.time() - os.path.getctime(file) < 2592000.0):
# return
print 'File: ' + input
print 'Size: %s' % size
print 'Rotatation: %s' % rotation
print 'Time: %s' % time()
print 'ctime: %s' % os.path.getctime(file)
print 'Time Diff: %s' % (time() - os.path.getctime(file))
parts = os.path.splitext(input)
output = input
tmp = parts[0] + '.out' + parts[1]
backup = parts[0] + '.bak' + parts[1]
if(os.path.exists(backup)):
print 'Skipping already encoded file: ' + input
return
#do we need to rotate?
filters = ""
if rotation == 90:
filters = "transpose=1"
elif rotation == 180:
filters = "vflip,hflip"
elif rotation != 0:
print 'Skipping file with rotation: %s' % rotation
return
if filters != "":
filters = "-vf " + filters
#do the encoding
cmd = './ffmpeg -i \'{input}\' {filters} \'{output}\''.format(input=input,filters=filters,output=tmp)
print "Command: " + cmd
p = Popen(shlex.split(cmd), stdout=FNULL, stderr=PIPE)
stdout.write("Encoding " + input + " ")
stdout.flush()
code = None
while True:
stdout.write(".")
stdout.flush()
sleep(.1)
code = p.poll()
if code is not None:
break
print ""
print "Result: %s" % p.returncode
if code == 0:
#fix the rotation
if rotation != 0:
print "Adusting rotation to 0"
get_set_rotation(tmp, 0)
os.rename(input, backup) #save a backup just in case
os.rename(tmp, input)
else:
#UNDO! UNDO!
for line in p.stderr:
print line
os.remove(tmp)
if __name__ == '__main__':
directory = sys.argv[1]
for file in find_movies(directory):
encode(file)
| jho/shrinkage | shrinkage.py | Python | apache-2.0 | 2,671 |
from django.utils.translation import ugettext_lazy as _
import horizon
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking.panel \
import Networking
from contrail_openstack_dashboard.openstack_dashboard.dashboards.admin.networking.panel \
import AdminNetworking
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking_topology.panel \
import NetworkingTopology
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.l3routers.panel \
import L3Routers
from contrail_openstack_dashboard.openstack_dashboard.dashboards.admin.l3routers.panel \
import L3AdminRouters
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.lbaas.panel \
import LoadBalancer
class NetworkingPanel(horizon.Panel):
name = "Networking"
slug = "networking"
urls = 'contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking.urls'
class AdminNetworkingPanel(horizon.Panel):
name = "Networking"
slug = "networking"
urls = 'contrail_openstack_dashboard.openstack_dashboard.dashboards.admin.networking.urls'
class NetworkingTopology(horizon.Panel):
name = _("Networking Topology")
slug = 'networking_topology'
urls = 'contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking_topology.urls'
class L3Routers(horizon.Panel):
name = _("Routers")
slug = 'l3routers'
urls = 'contrail_openstack_dashboard.openstack_dashboard.dashboards.project.l3routers.urls'
class L3AdminRouters(horizon.Panel):
name = _("Routers")
slug = 'l3routers'
urls = 'contrail_openstack_dashboard.openstack_dashboard.dashboards.admin.l3routers.urls'
class LoadBalancer(horizon.Panel):
name = _("Load Balancers")
slug = 'lbaas'
urls = 'contrail_openstack_dashboard.openstack_dashboard.dashboards.admin.lbaas.urls'
try:
projects_dashboard = horizon.get_dashboard("project")
try:
topology_panel = projects_dashboard.get_panel("network_topology")
projects_dashboard.unregister(topology_panel.__class__)
except:
pass
try:
network_panel = projects_dashboard.get_panel("networks")
projects_dashboard.unregister(network_panel.__class__)
except:
pass
try:
routers_panel = projects_dashboard.get_panel("routers")
projects_dashboard.unregister(routers_panel.__class__)
except:
pass
try:
lb_panel = projects_dashboard.get_panel("loadbalancers")
projects_dashboard.unregister(lb_panel.__class__)
except:
pass
except:
pass
try:
admin_dashboard = horizon.get_dashboard("admin")
try:
admin_net_panel = admin_dashboard.get_panel("networks")
admin_dashboard.unregister(admin_net_panel.__class__)
except:
pass
try:
admin_router_panel = admin_dashboard.get_panel("routers")
admin_dashboard.unregister(admin_router_panel.__class__)
except:
pass
except:
pass
| Juniper/contrail-horizon | overrides.py | Python | apache-2.0 | 3,033 |
import sys
import subprocess
import time
import re
import os
import simplejson as json
import getopt
from run_manager import RunManager, RainOutputParser
'''
Example config
{
"profilesCreatorClass": "radlab.rain.workload.httptest.HttpTestProfileCreator",
"profilesCreatorClassParams": {
"baseHostIp": "127.0.0.1",
"numHostTargets": 5
},
"timing": {
"rampUp": 10,
"duration": 60,
"rampDown": 10
}
}
'''
class HttpTestConfig:
'''
Rain configuration object for the Http tests
'''
def __init__(self):
# profile creator to use
self.profilesCreatorClass = \
"radlab.rain.workload.httptest.HttpTestProfileCreator"
# profile creator params
self.baseHostIp = "127.0.0.1"
self.numHostTargets = 5
# timing details
self.rampUp = 10
self.duration = 60
self.rampDown = 10
def to_json( self ):
dict = {}
dict['profilesCreatorClass'] = self.profilesCreatorClass
# sub-map with profile creator parameters
creatorParams = {}
creatorParams['baseHostIp'] = self.baseHostIp
creatorParams['numHostTargets'] = self.numHostTargets
# Add profile creator params to top-level dictionary
dict['profilesCreatorClassParams'] = creatorParams
# sub map with timing info
timing = {}
timing['rampUp'] = self.rampUp
timing['duration'] = self.duration
timing['rampDown'] = self.rampDown
# Add timing info to top-level dictionary
dict['timing'] = timing
return dict
class HttpTestStepRunner:
def create_dir( self, path ):
if not os.path.exists( path ):
os.mkdir( path )
def step_run( self, start_ip, num_apps_to_load, apps_powered_on, \
results_dir="./results", run_duration_secs=60, \
config_dir="./config" ):
'''
Given a starting IP, a step size
e.g.,:
1) run servers on ip addressed 11.0.0.1 - 11.0.0.200
2) with a step size of 10 run experiments on 11.0.0.1 - 10
11.0.0.1 - 20, ... 11.0.0.1 - 200
'''
# Some pre-reqs:
# 1) create the config_dir if it does not exist
# 2) create the results_dir if it does not exist
self.create_dir( config_dir )
self.create_dir( results_dir )
num_tests = apps_powered_on/num_apps_to_load
for i in range(num_tests):
# with one Rain launch we can load an entire block of ip's
# using the track feature
#ip_address_parts = start_ip.split( "." )
#print len(ip_address_parts)
# throw exception if we don't find a numeric ip v4 address
#if len(ip_address_parts) != 4:
# raise Exception( "Expected a numeric IPv4 address"\
# + " (format N.N.N.N)" )
#lastOctet = int( ip_address_parts[3] )
#base_ip = "{0}.{1}.{2}.{3}".format( ip_address_parts[0],\
# ip_address_parts[1],\
# ip_address_parts[2],\
# str(lastOctet+(num_apps_to_load*i)))
# Create config objects to write out as files
base_ip = start_ip
config = HttpTestConfig()
config.baseHostIp = base_ip
config.numHostTargets = (i+1)*num_apps_to_load
config.duration = run_duration_secs
json_data = \
json.dumps(config, sort_keys='True',\
default=HttpTestConfig.to_json)
# Write this data out to a file, then invoke the run mananger
# passing in the path to this file
print( "[HttpTestStepRunner] json config: {0}".format(json_data) )
run_classpath=".:rain.jar:workloads/httptest.jar"
run_config_filename = config_dir + "/" + \
"run_config_" + base_ip + "_" + \
str(config.numHostTargets) + "_nodes.json"
run_output_filename = results_dir + "/" + \
"run_log_" + base_ip + "_" + \
str(config.numHostTargets) + "_nodes.txt"
run_results_filename = results_dir + "/" + \
"run_result_" + base_ip + "_" + \
str(config.numHostTargets) + "_nodes.txt"
# write the json data out to the config file
# invoke the run manager passing the location of the config file
# collect the results and write them out to the results_dir
print "[HttpTestStepRunner] Writing config file: {0}"\
.format( run_config_filename )
config_file = open( run_config_filename, 'w' )
config_file.write( json_data )
config_file.flush()
config_file.close()
run_output = RunManager.run_rain( run_config_filename,\
run_classpath )
#print run_output
track_results = RainOutputParser.parse_output( run_output )
# Write out the run output
print "[HttpTestStepRunner] Writing output: {0}"\
.format( run_output_filename )
run_output_file = open( run_output_filename, 'w' )
run_output_file.write( run_output )
run_output_file.flush()
run_output_file.close()
# Write out the run results
print "[HttpTestStepRunner] Writing results: {0}"\
.format( run_results_filename )
run_results_file = open( run_results_filename, 'w' )
RainOutputParser.print_results( track_results, run_results_file )
run_results_file.flush()
run_results_file.close()
def usage():
print( "Usage: {0} [--startip <ipv4 address>] [--numapps <#apps to load>]"\
" [--maxapps <#apps powered on>] [--resultsdir <path>]"\
" [--duration <seconds to run>] [--configdir <path>]"\
.format(sys.argv[0]) )
print( "defaults: {0} --startip 127.0.0.1 --numapps 10"\
" --maxapps 100 --resultsdir ./results --duration 60"\
" --configdir ./config".format(sys.argv[0]) )
def main(argv):
start_ip = "127.0.0.1"
num_apps_to_load = 10
apps_powered_on = 100
results_dir = "./results"
run_duration = 60
config_dir = "./config"
# parse aguments and replace the defaults
try:
opts, args = getopt.getopt( argv, "h", ["startip=", "numapps=", \
"maxapps=", "resultsdir=", \
"duration=", "configdir=" \
"help"] )
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ( "-h", "--help" ):
usage()
sys.exit()
elif opt == "--startip":
start_ip = arg
elif opt == "--numapps":
num_apps_to_load = int(arg)
elif opt == "--maxapps":
apps_powered_on = int(arg)
elif opt == "--resultsdir":
results_dir = arg
elif opt == "--duration":
run_duration = int(arg)
elif opt == "--configdir":
config_dir = arg
test_runner = HttpTestStepRunner()
test_runner.step_run( start_ip, num_apps_to_load, \
apps_powered_on, results_dir, run_duration, \
config_dir )
if __name__=='__main__':
main( sys.argv[1:] )
| umbrant/rain-workload-toolkit | utils/testplan.py | Python | bsd-3-clause | 7,749 |
import sqlite3
con = sqlite3.connect("blog.db")
c = con.cursor()
c.execute('''CREATE TABLE users
(id text, fname text, lname text, email text, password text, online integer)''')
con.commit()
c.execute('''CREATE TABLE posts
(uid text, pid text, title text, content text, date text)''')
con.commit()
c.execute('''CREATE TABLE comments
(uid text, postid text, comment text, date text)''')
con.commit()
con.close()
| andela-bojengwa/My-Flask-Blog | models/toSQL.py | Python | apache-2.0 | 451 |
from __future__ import print_function
from __future__ import division
ALIGN_LEFT = '<'
ALIGN_CENTER = '_'
ALIGN_RIGHT = '>'
def pprint(data, header=None, dictorder=None, align=None, output_file=None):
if ((dict is type(data[0])) and (dictorder is None)):
dictorder = data[0].keys()
if ((dict is type(data[0])) and (header is None)):
header = data[0].keys()
(sdata, align) = makeStrings(data, dictorder, align)
(widths, percents) = calcSize(sdata, header)
output = ''
if header:
for i in range(len(header)):
output += ((('|' + (' ' * (((widths[i] - len(header[i])) // 2) + 1))) + header[i]) + (' ' * (((widths[i] - len(header[i])) // 2) + 1)))
if ((widths[i] - len(header[i])) % 2):
output += ' '
if percents[i]:
output += (' ' * (percents[i] - header[i].count('%')))
output += '|'
output += '\n'
for i in range(len(widths)):
output += ('+-' + ('-' * ((widths[i] + 1) + percents[i])))
output += '+'
output += '\n'
for j in range(len(sdata)):
d = sdata[j]
a = align[j]
for i in range(len(d)):
if (a[i] == ALIGN_RIGHT):
output += ((('|' + (' ' * ((widths[i] - len(d[i])) + 1))) + d[i]) + ' ')
elif (a[i] == ALIGN_CENTER):
output += ((('|' + (' ' * (((widths[i] - len(d[i])) // 2) + 1))) + d[i]) + (' ' * (((widths[i] - len(d[i])) // 2) + 1)))
if ((widths[i] - len(d[i])) % 2):
output += ' '
else:
output += (('| ' + d[i]) + (' ' * ((widths[i] - len(d[i])) + 1)))
if percents[i]:
output += (' ' * (percents[i] - d[i].count('%')))
output += '|'
output += '\n'
if output_file:
with open(output_file, 'wb') as output_handle:
output_handle.write(output)
else:
print(output, end='')
def makeStrings(data, dictOrder, align):
r = []
a = ([] if (align is None) else None)
for i in data:
c = []
ac = []
if dictOrder:
for k in dictOrder:
c += ([i[k]] if (unicode is type(i[k])) else [(str(i[k]) if (i[k] is not None) else '')])
if (a is not None):
ac += ([ALIGN_RIGHT] if ((int is type(i[k])) or (float is type(i[k])) or (long is type(i[k]))) else [ALIGN_LEFT])
else:
for k in i:
c += ([k] if (unicode is type(k)) else [(str(k) if (k is not None) else '')])
if (a is not None):
ac += ([ALIGN_RIGHT] if ((int is type(k)) or (float is type(k)) or (long is type(k))) else [ALIGN_LEFT])
r += [c]
if (a is not None):
a += [ac]
return (r, (a if (a is not None) else align))
def calcSize(data, header):
widths = range(len(data[0]))
percents = range(len(data[0]))
for i in widths:
widths[i] = 0
percents[i] = 0
if header:
for i in range(len(header)):
r = len(header[i])
if (r > widths[i]):
widths[i] = r
r = header[i].count('%')
if (r > percents[i]):
percents[i] = r
for d in data:
for i in range(len(d)):
r = len(d[i])
if (r > widths[i]):
widths[i] = r
r = d[i].count('%')
if (r > percents[i]):
percents[i] = r
return (widths, percents) | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/windows/sentinel/table_print.py | Python | unlicense | 3,527 |
# -*- coding: utf-8 -*-
# $Id$
#
# Copyright (c) 2007-2011 Otto-von-Guericke-Universität Magdeburg
#
# This file is part of ECSpooler.
#
import os
import sys
import time
import thread
import threading
import xmlrpclib
import traceback
import logging
from types import StringTypes
from types import DictionaryType
# local imports
import config
# define a default logger for spooler classes
LOG = logging.getLogger()
from lib.Service import Service
from lib.data.BackendJob import BackendJob
from lib.data.BackendResult import BackendResult
from lib.util import auth
from lib.util import errorcodes
from lib.util.SpoolerQueue import SpoolerQueue
class Spooler(Service):
"""
The Spooler manages incomming jobs and backends. Each job will be turned
over to a backend.
"""
# resourcestring
DEFAULT_DOQUEUE_WAKEUP = 2 # 2 == 2000 ms
def __init__(self, host, port):
"""
Creates a new spooler instance at the given host and port.
@param: host: host name
@param: port: port number
@param: pwd_file: absolute path to password file
"""
Service.__init__(self, host, port)
pwd_file = config.PASSWD_FILE
assert pwd_file and type(pwd_file) in StringTypes,\
"%s requires a correct 'pwd_file' option" % self._className
# a dictionary for backends
self._backends = {}
# q queue to managing incomming jobs
LOG.debug('Using job store %s' % config.JOB_QUEUE_STORAGE)
self._queue = SpoolerQueue(config.JOB_QUEUE_STORAGE)
# a queue for managing backend results
LOG.debug("Using result store %s" % config.RESULT_QUEUE_STORAGE)
self._results = SpoolerQueue(config.RESULT_QUEUE_STORAGE)
# a queue for managing jobs which cannot executed because backend was busy
LOG.debug("Using retry store %s" % config.RETRY_QUEUE_STORAGE)
self._retries = SpoolerQueue(config.RETRY_QUEUE_STORAGE)
self._restoreRetryJobs()
# using md5 encrypted passwords in file etc/passwd for authentification
self._auth = auth.UserAuthMD5(pwd_file)
# doqueue thread (we will use only one thread)
self._doqueueThread = None
self._doqueueThreadExit = False
self._doqueueThreadLock = thread.allocate_lock()
# TODO:
#self._doqueueThreadCond = threading.Condition()
def _registerFunctions(self):
"""
Register all methods/function which can be accessed from a client.
"""
self._server.register_function(self.addBackend)
self._server.register_function(self.removeBackend)
#self._server.register_function(self.stopBackend)
self._server.register_function(self.appendJob)
self._server.register_function(self.push)
self._server.register_function(self.getResults)
self._server.register_function(self.getResult)
self._server.register_function(self.pull)
self._server.register_function(self.getStatus)
self._server.register_function(self.getPID)
self._server.register_function(self.getBackends)
self._server.register_function(self.getBackendStatus)
self._server.register_function(self.getBackendInputFields)
self._server.register_function(self.getBackendTestFields)
def _manageBeforeStart(self):
"""
Starts a thread which checks the queue for new jobs.
"""
# start a new thread which runs the doqueue-method
LOG.info('Starting scheduler thread (%s)...' % self._className)
self._doqueueThread = threading.Thread(target=self._doqueue)
self._doqueueThread.start()
return True
def _manageBeforeStop(self):
"""
Stops the scheduler thread and all backends before shutting down
the spooler itself.
"""
# stop doqueue thread
LOG.info('Stopping scheduler thread (%s)...' % self._className)
self._doqueueThreadExit = True
while self._doqueueThread and self._doqueueThread.isAlive():
LOG.debug('Scheduler thread is still alive, waiting %ds' %
self.DEFAULT_DOQUEUE_WAKEUP)
time.sleep(self.DEFAULT_DOQUEUE_WAKEUP)
LOG.info('Stopping backends...')
# We make a copy of self._backends because it will change during
# iteration: backends call removeBackend in their shutdown method,
# which will change self._backends.
backends = self._backends.copy()
# TODO: Don't if SMF is enabled, since the backend would be respawned
# automatically by SMF (self healing) - env var SMF_METHOD
# is an indicator, that the service is running under SMF
# just unregister it and perhaps send a message to the backend to
# give a hint to free resources/unbind
for grp in backends.itervalues():
for backend in grp:
self._callBackend(backend['url'], 'shutdown', backend['id'])
# wait a moment so that backends have enough time to unregister
time.sleep(1.0)
# def stopBackend(self, authdata, uid):
# """
# Stopps the backend with the given name by invoking the
# shutdown method for this backend. The backend itself then
# invokes the removeBackend method in the spooler.
#
# @param: authdata: username and password for authentication
# @param: uid: a backend's unique ID
# @return: (code, msg) with
# code == 1, stopping backend succeeded
# code != 1, stopping backend failed; msg contains further information
# """
# if not self._auth.test(authdata, auth.STOP_BACKEND):
# return (-110, self.ERROR_AUTH_FAILED)
#
# if uid in self._backends:
# grp = self._backends[uid]
#
# for backend in grp:
# LOG.info("Stopping backend '%s' at '%s'" % (uid, backend['url']))
# # TODO: don't if SMF is enabled - self healing - see above
# self._callBackend(backend["url"], "shutdown")
#
# return (1, '')
# else:
# return (-120, "Backend '%s' is not registered" % uid)
# xmlrpc
def addBackend(self, authdata, backendId, name, version, url):
"""
Adds a backend to the spooler's list of available backends.
Each backend calls the spooler on being started, and tells
the server its ID, name and URL. The spooler returns
a specific ID, which is a random number created at server
startup. This ID is sent to the backend on each request and
thus authorizes the request. Only the spooler to which the
backend is attached can perform requests to this backend.
@param: authdata: username and password for authentication
@param: backendId: a backend's unique ID
@param: name: a backend's name
@param: version: a backend's version
@param: url: a backend's URL
@return: this server's ID (for further communication)
"""
if not self._auth.test(authdata, auth.ADD_BACKEND):
#return (-110, self.ERROR_AUTH_FAILED)
raise Exception(errorcodes.ERROR_AUTH_FAILED)
if backendId not in self._backends:
# 1st backend of this type
self._backends[backendId] = list()
self._backends[backendId].append({'id': backendId,
'name': name,
'version': version,
'url': url,
'isBusy': False})
LOG.info("Backend '%s %s (%s) (%s)' registered" %
(name, version, backendId, url))
return self._srvId
def removeBackend(self, authdata, backendId, url):
"""
Removes a backend from the list of available backends in this spooler.
@param: authdata: username and password for authentication
@param: backendId: a backend's ID
@return: (code, msg) with
code == 1, removing backend succeeded
code != 1, removing backend failed; msg contains further information
"""
if not self._auth.test(authdata, auth.REMOVE_BACKEND):
#return (-110, self.ERROR_AUTH_FAILED)
raise Exception(errorcodes.ERROR_AUTH_FAILED)
if backendId in self._backends:
grp = self._backends[backendId]
for backend in grp:
#backend = self._backends[backendId]
if url == backend['url']:
LOG.info("Unregistering backend '%s %s (%s) (%s)'..." %
(backend['name'],
backend['version'],
backend['id'],
backend['url']))
#self._backends[backendId].remove(backend)
grp.remove(backend)
# end if
# end for
if len(grp) == 0:
self._backends.pop(backendId)
LOG.info("Backend '%s (%s)' unregistered" % (backendId, url))
return True
else:
raise Exception(errorcodes.NO_SUCH_BACKEND)
def push(self, authdata, jobdata):
"""
@see: appendJob
"""
return self.appendJob(authdata, jobdata)
def appendJob(self, authdata, jobdata):
"""
Adds a new test to the queue
@param: authdata: username and password for authentication
@param: jobdata: relevant job data (see also class CheckJob)
@return: (code, msg) with
code == 1, enqueue succeeded and msg contains the job id
code < 0, enqueue failed and msg contains further information
"""
if not self._auth.test(authdata, auth.PUT):
#return (-110, self.ERROR_AUTH_FAILED)
raise Exception(errorcodes.ERROR_AUTH_FAILED)
try:
# create a new BackenJob instance
job = BackendJob(data=jobdata)
# get the backend from the job
backenId = job['backend']
# if no backend with the backendId is currently registered to this
# spooler an appropriate message will be returned
if not backenId in self._backends.keys():
LOG.warn("No such backend: %s" % backenId)
raise Exception(errorcodes.NO_SUCH_BACKEND)
# append the job
LOG.info("Enqueueing job '%s'" % job.getId())
self._queue.enqueue(job)
return job.getId()
except Exception, e:
msg = 'Invalid or insufficient data: %s: %s' % (sys.exc_info()[0], e)
LOG.error(msg)
raise Exception(msg)
def getResults(self, authdata):
"""
Returns a dictionary with the results of all performed jobs. Once the
results are polled, they are no longer stored.
@param: authdata: username and password for authentication
@return: a dictionary
"""
if not self._auth.test(authdata, auth.POP):
#return (-110, self.ERROR_AUTH_FAILED)
raise Exception(errorcodes.ERROR_AUTH_FAILED)
result = {}
LOG.debug("Dequeuing results for all jobs (%d)" % self._results.getSize())
while (not self._results.isEmpty()):
item = self._results.dequeue()
LOG.info("Returning result for job '%s'" % item.getId())
result[item.getId()] = item.getData()
return result
def pull(self, authdata, jobId):
"""
@see: getResult
"""
return self.getResult(authdata, jobId)
def getResult(self, authdata, jobId):
"""
Returns a dictionary { jobID: QueueItem.getData() } with
the result of the performed check job for the given ID.
Once the result is polled, it is no longer stored.
@param: authdata: username and password for authentication
@param: jobId: a valid job ID
@return: a dictionary with 'id' as key and another dictionary with keys
'value', 'message', etc. representing the test results
"""
if not self._auth.test(authdata, auth.POP):
#return {'value':-110, 'message':self.ERROR_AUTH_FAILED}
raise Exception(errorcodes.ERROR_AUTH_FAILED)
result = {}
LOG.debug("Dequeuing result for job '%s'" % jobId)
item = self._results.dequeue(jobId)
if item:
LOG.info("Returning result for job '%s'" % jobId)
result[item.getId()] = item.getData()
else:
LOG.info("No result for job '%s'" % jobId)
return result
def getStatus(self, authdata):
"""
Returns a dict with some status information:
"pid": the process ID
"backends": a list of the attached backends
"queue": the number of items in the queue
"results": the number of cached result data
@param: authdata: username and password for authentication
"""
LOG.debug('Returning spooler status information')
if not self._auth.test(authdata, auth.GET_STATUS):
#return (-110, self.ERROR_AUTH_FAILED)
raise Exception(errorcodes.ERROR_AUTH_FAILED)
return {
#"pid": os.getpid(),
"backends": ["%s:%s" % (key, len(self._backends[key])) for key in self._backends],
"queue": self._queue.getSize(),
"results": self._results.getSize(),
}
def getPID(self, authdata):
"""
Returns the process ID
@param authdata: username and password for authentication
@return: current process ID
"""
LOG.debug('Returning spooler PID')
if not self._auth.test(authdata, auth.SHUTDOWN):
#return (-110, self.ERROR_AUTH_FAILED)
raise Exception(errorcodes.ERROR_AUTH_FAILED)
return os.getpid()
def getBackends(self, authdata):
"""
Returns a dict with all currently available backends.
@param: authdata: username and password for authentication
@return: dict with backend names as keys
"""
LOG.debug('Returning all available backends')
if not self._auth.test(authdata, auth.GET_STATUS):
#return (-110, self.ERROR_AUTH_FAILED)
raise Exception(errorcodes.ERROR_AUTH_FAILED)
result = {}
for grpId in self._backends:
# get backend group
grp = self._backends[grpId]
if len(grp) > 0:
# if group has at least one backend instance, add it
# to result
result[grpId] = grp[0]
return result
def getBackendStatus(self, authdata, backendId):
"""
Returns a dict with status information of a single backend.
@param: authdata: username and password for authentication
@param: backendId: a backend's unique ID
"""
LOG.debug("Trying to return status information for backend '%s'" % backendId)
if not self._auth.test(authdata, auth.GET_BACKEND_INFO):
#return (-110, self.ERROR_AUTH_FAILED)
raise Exception(errorcodes.ERROR_AUTH_FAILED)
if not self._hasBackend(backendId):
#return (-112, "No such backend: %s" % backendId)
raise Exception(errorcodes.NO_SUCH_BACKEND)
grp = self._backends.get(backendId)
if len(grp) > 0:
# if group has at least one backend instance, we will use the first entry
backend = grp[0]
return self._callBackend(backend['url'], 'getStatus', backend['id'])
else:
#return (-112, "No such backend: %s" % backendId)
raise Exception(errorcodes.NO_SUCH_BACKEND)
def getBackendInputFields(self, authdata, backendId):
"""
Returns information about additional fields required by this backend.
@param: authdata: username and password for authentication
@param: backendId: a backend's unique ID
@see: Backend.getInputFields
"""
LOG.debug("Trying to return input fields for backend '%s'" % backendId)
if not self._auth.test(authdata, auth.GET_BACKEND_INFO):
#return (-110, self.ERROR_AUTH_FAILED)
raise Exception(errorcodes.ERROR_AUTH_FAILED)
if not self._hasBackend(backendId):
#return (-112, "No such backend: %s" % backendId)
raise Exception(errorcodes.NO_SUCH_BACKEND)
grp = self._backends.get(backendId)
if len(grp) > 0:
# if group has at least one backend instance, add it
# to result
backend = grp[0]
return self._callBackend(backend['url'], 'getInputFields')
else:
#return (-112, "No such backend: %s" % backendId)
raise Exception(errorcodes.NO_SUCH_BACKEND)
def getBackendTestFields(self, authdata, backendId):
"""
Returns informationen about test scenarios available by this backend.
@param: authdata: username and password for authentication
@param: backendId: a backend's unique ID
@see. Backend.getTestFields
"""
LOG.debug("Trying to return test specs for backend '%s'" % backendId)
if not self._auth.test(authdata, auth.GET_BACKEND_INFO):
#return (-110, self.ERROR_AUTH_FAILED)
raise Exception(errorcodes.ERROR_AUTH_FAILED)
if not self._hasBackend(backendId):
#return (-112, "No such backend: %s" % backendId)
raise Exception(errorcodes.NO_SUCH_BACKEND)
grp = self._backends.get(backendId)
if len(grp) > 0:
# if group has at least one backend instance, add it
# to result
backend = grp[0]
return self._callBackend(backend['url'], 'getTestFields')
else:
#return (-112, "No such backend: %s" % backendId)
raise Exception(errorcodes.NO_SUCH_BACKEND)
def _hasBackend(self, backendId):
"""
@return: True if a backend with the given backendId is registered,
otherwise False
"""
if self._backends.has_key(backendId):
return True
else:
msg = "No such backend: %s" % backendId
LOG.warn(msg)
return False
def _doqueue(self):
"""
Performs the dispatching of a job to a backend.
This method is called in a separate thread, which is opened
and managed by the threading.Thread class. _doqueue() runs in a
loop until _doqueue_thread_stop is True.
"""
while not self._doqueueThreadExit:
try:
#self._doqueueThreadLock.acquire()
# is the queue empty?
if not self._queue.isEmpty():
# get next job from the queue and the selected backend
job = self._queue.dequeue()
backendId = job['backend']
if backendId in self._backends:
# backend groups
grp = self._backends[backendId]
backend = None
for b in grp:
# backend busy?
if not b.get('isBusy', False):
backend = b
break
# end for
if backend == None:
# backend is bussy, try again later
LOG.info('Backend %s is busy (%s)' %
(backendId, job.getId(),))
# try later
#self._queue.enqueue(job)
# put this job in retry-queue
LOG.info("Adding job to retry queue: %s" % job.getId())
self._retries.enqueue(job)
# process job
else:
# dont block - continue to service other backends
threading.Thread(target=self._processJob, args=(backend, job)).start()
# end if
else:
LOG.warn("Job %s can not be executed, no such "
"backend: %s" % (job.getId(), backendId))
# enqueue the job so maybe later, if the backend
# is available, we can process it
self._queue.enqueue(job)
# end if
else:
#LOG.debug('_doqueue: self._queue is empty');
pass
except Exception, e:
msg = '%s: %s' % (sys.exc_info()[0], e)
LOG.error(msg)
# wait a defined time before resuming
time.sleep(self.DEFAULT_DOQUEUE_WAKEUP)
# end while loop
def _processJob(self, backend, job):
"""
Processing the job by dispatching it to the backend.
@param: backend: a dict with backend attributes (such as id, name, url,)
@param: job: a job instance
"""
backend['isBusy'] = True
try:
LOG.info("Dispatching job '%s' to backend '%s'" %
(job.getId(), job['backend']))
# invoke remote method call
rd = self._callBackend(backend['url'], 'execute', job.getData())
# result from backend must be of type dict
if type(rd) == DictionaryType:
# this should be normal case
result = BackendResult(data=rd, id=job.getId())
elif type(rd) in StringTypes:
# probably some kind of error in the backend
result = BackendResult(-152, rd, id=job.getId())
else:
# unexpected or unhandled result
msg = 'Unexpected result type: %s' % repr(type(rd))
LOG.error(msg)
result = BackendResult(-151, msg, id=job.getId())
except Exception, e:
msg = '%s: %s' % (sys.exc_info()[0], e)
LOG.error(msg)
result = BackendResult(-153, msg, id=job.getId())
self._results.enqueue(result)
LOG.info("Result of job '%s' added to result queue" % (result.getId(),))
backend['isBusy'] = False
# move jobs from retry queue back to default queue
self._restoreRetryJobs()
#LOG.debug('jobId: %s' % job.getId())
#LOG.debug('data: %s' % result.getData())
return result
def _restoreRetryJobs(self):
"""
Move jobs from retry queue back to default queue
"""
size = self._retries.getSize()
if size > 0:
LOG.debug("Moving %d job(s) back from 'retry queue' to 'default queue'."
% size)
while not self._retries.isEmpty():
item = self._retries.dequeue()
self._queue.enqueue(item)
def _callBackend(self, url, method, *kw, **args):
"""
Executes an xmlrpc call.
@param: url: backend's URL
@param: method: name of method that will be invoked
@return: a tuple with code and result or an error message
"""
#LOG.debug('xxx: %s' % repr(kw))
#LOG.debug("xmlrpclib.Server('%s')" % (url))
s = xmlrpclib.Server(url)
try:
result = getattr(s, method)({"srv_id": self._srvId}, *kw, **args)
#LOG.debug('_callBackend: %s' % repr(result))
return result
#except (socket.error, xmlrpclib.Fault, xmlrpclib.ProtocolError), exc:
except Exception, e:
LOG.error(traceback.format_exc())
msg = 'Server error: %s: %s (%s)' % (sys.exc_info()[0], e, method)
raise Exception(msg)
| collective/ECSpooler | lib/Spooler.py | Python | gpl-2.0 | 25,028 |
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import os
import re
from collections import defaultdict
from StringIO import StringIO
from lxml import etree
from lxml import isoschematron
import xlrd
class XmlValidator(object):
NS_XML_SCHEMA_INSTANCE = "http://www.w3.org/2001/XMLSchema-instance"
NS_XML_SCHEMA = "http://www.w3.org/2001/XMLSchema"
def __init__(self, schema_dir=None, use_schemaloc=False):
self.__imports = self._build_imports(schema_dir)
self.__use_schemaloc = use_schemaloc
def _get_target_ns(self, fp):
'''Returns the target namespace for a schema file
Keyword Arguments
fp - the path to the schema file
'''
tree = etree.parse(fp)
root = tree.getroot()
return root.attrib['targetNamespace'] # throw an error if it doesn't exist...we can't validate
def _get_include_base_schema(self, list_schemas):
'''Returns the root schema which defines a namespace.
Certain schemas, such as OASIS CIQ use xs:include statements in their schemas, where two schemas
define a namespace (e.g., XAL.xsd and XAL-types.xsd). This makes validation difficult, when we
must refer to one schema for a given namespace.
To fix this, we attempt to find the root schema which includes the others. We do this by seeing
if a schema has an xs:include element, and if it does we assume that it is the parent. This is
totally wrong and needs to be fixed. Ideally this would build a tree of includes and return the
root node.
Keyword Arguments:
list_schemas - a list of schema file paths that all belong to the same namespace
'''
parent_schema = None
tag_include = "{%s}include" % (self.NS_XML_SCHEMA)
for fn in list_schemas:
tree = etree.parse(fn)
root = tree.getroot()
includes = root.findall(tag_include)
if len(includes) > 0: # this is a hack that assumes if the schema includes others, it is the base schema for the namespace
return fn
return parent_schema
def _build_imports(self, schema_dir):
'''Given a directory of schemas, this builds a dictionary of schemas that need to be imported
under a wrapper schema in order to enable validation. This returns a dictionary of the form
{namespace : path to schema}.
Keyword Arguments
schema_dir - a directory of schema files
'''
if not schema_dir:
return None
imports = defaultdict(list)
for top, dirs, files in os.walk(schema_dir):
for f in files:
if f.endswith('.xsd'):
fp = os.path.join(top, f)
target_ns = self._get_target_ns(fp)
imports[target_ns].append(fp)
for k,v in imports.iteritems():
if len(v) > 1:
base_schema = self._get_include_base_schema(v)
imports[k] = base_schema
else:
imports[k] = v[0]
return imports
def _build_wrapper_schema(self, import_dict):
'''Creates a wrapper schema that imports all namespaces defined by the input dictionary. This enables
validation of instance documents that refer to multiple namespaces and schemas
Keyword Arguments
import_dict - a dictionary of the form {namespace : path to schema} that will be used to build the list of xs:import statements
'''
schema_txt = '''<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" targetNamespace="http://stix.mitre.org/tools/validator" elementFormDefault="qualified" attributeFormDefault="qualified"/>'''
root = etree.fromstring(schema_txt)
tag_import = "{%s}import" % (self.NS_XML_SCHEMA)
for ns, list_schemaloc in import_dict.iteritems():
schemaloc = list_schemaloc
schemaloc = schemaloc.replace("\\", "/")
attrib = {'namespace' : ns, 'schemaLocation' : schemaloc}
el_import = etree.Element(tag_import, attrib=attrib)
root.append(el_import)
return root
def _extract_schema_locations(self, root):
schemaloc_dict = {}
tag_schemaloc = "{%s}schemaLocation" % (self.NS_XML_SCHEMA_INSTANCE)
schemaloc = root.attrib[tag_schemaloc].split()
schemaloc_pairs = zip(schemaloc[::2], schemaloc[1::2])
for ns, loc in schemaloc_pairs:
schemaloc_dict[ns] = loc
return schemaloc_dict
def _build_result_dict(self, result, errors=None):
d = {}
d['result'] = result
if errors:
if not hasattr(errors, "__iter__"):
errors = [errors]
d['errors'] = errors
return d
def validate(self, instance_doc):
'''Validates an instance documents.
Returns a tuple of where the first item is the boolean validation
result and the second is the validation error if there was one.
Keyword Arguments
instance_doc - a filename, file-like object, etree._Element, or etree._ElementTree to be validated
'''
if not(self.__use_schemaloc or self.__imports):
return (False, "No schemas to validate against! Try instantiating XmlValidator with use_schemaloc=True or setting the schema_dir")
if isinstance(instance_doc, etree._Element):
instance_root = instance_doc
elif isinstance(instance_doc, etree._ElementTree):
instance_root = instance_doc.getroot()
else:
try:
et = etree.parse(instance_doc)
instance_root = et.getroot()
except etree.XMLSyntaxError as e:
return self._build_result_dict(False, str(e))
if self.__use_schemaloc:
try:
required_imports = self._extract_schema_locations(instance_root)
except KeyError as e:
return (False, "No schemaLocation attribute set on instance document. Unable to validate")
else:
required_imports = {}
for prefix, ns in instance_root.nsmap.iteritems():
schemaloc = self.__imports.get(ns)
if schemaloc:
required_imports[ns] = schemaloc
if not required_imports:
return (False, "Unable to determine schemas to validate against")
wrapper_schema_doc = self._build_wrapper_schema(import_dict=required_imports)
xmlschema = etree.XMLSchema(wrapper_schema_doc)
isvalid = xmlschema.validate(instance_root)
if isvalid:
return self._build_result_dict(True)
else:
return self._build_result_dict(False, [str(x) for x in xmlschema.error_log])
class STIXValidator(XmlValidator):
'''Schema validates STIX v1.1 documents and checks best practice guidance'''
__stix_version__ = "1.1"
PREFIX_STIX_CORE = 'stix'
PREFIX_CYBOX_CORE = 'cybox'
PREFIX_STIX_INDICATOR = 'indicator'
NS_STIX_CORE = "http://stix.mitre.org/stix-1"
NS_STIX_INDICATOR = "http://stix.mitre.org/Indicator-2"
NS_CYBOX_CORE = "http://cybox.mitre.org/cybox-2"
NS_MAP = {PREFIX_CYBOX_CORE : NS_CYBOX_CORE,
PREFIX_STIX_CORE : NS_STIX_CORE,
PREFIX_STIX_INDICATOR : NS_STIX_INDICATOR}
def __init__(self, schema_dir=None, use_schemaloc=False, best_practices=False):
super(STIXValidator, self).__init__(schema_dir, use_schemaloc)
self.best_practices = best_practices
def _check_id_presence_and_format(self, instance_doc):
'''Checks that the core STIX/CybOX constructs in the STIX instance document
have ids and that each id is formatted as [ns_prefix]:[object-type]-[GUID].
Returns a dictionary of lists. Each dictionary has the following keys:
no_id - a list of etree Element objects for all nodes without ids
format - a list of etree Element objects with ids not formatted as [ns_prefix]:[object-type]-[GUID]
Keyword Arguments
instance_doc - an etree Element object for a STIX instance document
'''
return_dict = {'no_id' : [],
'format' : []}
elements_to_check = ['stix:Campaign',
'stix:Course_Of_Action',
'stix:Exploit_Target',
'stix:Incident',
'stix:Indicator',
'stix:STIX_Package',
'stix:Threat_Actor',
'stix:TTP',
'cybox:Observable',
'cybox:Object',
'cybox:Event',
'cybox:Action']
for tag in elements_to_check:
xpath = ".//%s" % (tag)
elements = instance_doc.xpath(xpath, namespaces=self.NS_MAP)
for e in elements:
try:
if not re.match(r'\w+:\w+-', e.attrib['id']): # not the best regex
return_dict['format'].append({'tag':e.tag, 'id':e.attrib['id'], 'line_number':e.sourceline})
except KeyError as ex:
return_dict['no_id'].append({'tag':e.tag, 'line_number':e.sourceline})
return return_dict
def _check_duplicate_ids(self, instance_doc):
'''Looks for duplicate ids in a STIX instance document.
Returns a dictionary of lists. Each dictionary uses the offending
id as a key, which points to a list of etree Element nodes which
use that id.
Keyword Arguments
instance_doc - an etree.Element object for a STIX instance document
'''
dict_id_nodes = defaultdict(list)
dup_dict = {}
xpath_all_nodes_with_ids = "//*[@id]"
all_nodes_with_ids = instance_doc.xpath(xpath_all_nodes_with_ids)
for node in all_nodes_with_ids:
dict_id_nodes[node.attrib['id']].append(node)
for id,node_list in dict_id_nodes.iteritems():
if len(node_list) > 1:
dup_dict[id] = [{'tag':node.tag, 'line_number':node.sourceline} for node in node_list]
return dup_dict
def _check_idref_resolution(self, instance_doc):
'''Checks that all idref attributes in the input document resolve to a local element.
Returns a list etree.Element nodes with unresolveable idrefs.
Keyword Arguments
instance_doc - an etree.Element object for a STIX instance document
'''
list_unresolved_ids = []
xpath_all_idrefs = "//*[@idref]"
xpath_all_ids = "//@id"
all_idrefs = instance_doc.xpath(xpath_all_idrefs)
all_ids = instance_doc.xpath(xpath_all_ids)
for node in all_idrefs:
if node.attrib['idref'] not in all_ids:
d = {'tag': node.tag,
'idref': node.attrib['idref'],
'line_number' : node.sourceline}
list_unresolved_ids.append(d)
return list_unresolved_ids
def _check_idref_with_content(self, instance_doc):
'''Looks for elements that have an idref attribute set, but also have content.
Returns a list of etree.Element nodes.
Keyword Arguments:
instance_doc - an etree.Element object for a STIX instance document
'''
list_nodes = []
xpath = "//*[@idref]"
nodes = instance_doc.xpath(xpath)
for node in nodes:
if node.text or len(node) > 0:
d = {'tag' : node.tag,
'idref' : node.attrib['idref'],
'line_number' : node.sourceline}
list_nodes.append(node)
return list_nodes
def _check_indicator_practices(self, instance_doc):
'''Looks for STIX Indicators that are missing a Title, Description, Type, Valid_Time_Position,
Indicated_TTP, and/or Confidence
Returns a list of dictionaries. Each dictionary has the following keys:
id - the id of the indicator
node - the etree.Element object for the indicator
missing - a list of constructs missing from the indicator
Keyword Arguments
instance_doc - etree Element for a STIX sinstance document
'''
list_indicators = []
xpath = "//%s:Indicator | %s:Indicator" % (self.PREFIX_STIX_CORE, self.PREFIX_STIX_INDICATOR)
nodes = instance_doc.xpath(xpath, namespaces=self.NS_MAP)
for node in nodes:
dict_indicator = defaultdict(list)
if not node.attrib.get('idref'): # if this is not an idref node, look at its content
if node.find('{%s}Title' % (self.NS_STIX_INDICATOR)) is None:
dict_indicator['missing'].append('Title')
if node.find('{%s}Description' % (self.NS_STIX_INDICATOR)) is None:
dict_indicator['missing'].append('Description')
if node.find('{%s}Type' % (self.NS_STIX_INDICATOR)) is None:
dict_indicator['missing'].append('Type')
if node.find('{%s}Valid_Time_Position' % (self.NS_STIX_INDICATOR)) is None:
dict_indicator['missing'].append('Valid_Time_Position')
if node.find('{%s}Indicated_TTP' % (self.NS_STIX_INDICATOR)) is None:
dict_indicator['missing'].append('TTP')
if node.find('{%s}Confidence' % (self.NS_STIX_INDICATOR)) is None:
dict_indicator['missing'].append('Confidence')
if dict_indicator:
dict_indicator['id'] = node.attrib.get('id')
dict_indicator['line_number'] = node.sourceline
list_indicators.append(dict_indicator)
return list_indicators
def _check_root_element(self, instance_doc):
d = {}
if instance_doc.tag != "{%s}STIX_Package" % (self.NS_STIX_CORE):
d['tag'] = instance_doc.tag
d['line_number'] = instance_doc.sourceline
return d
def check_best_practices(self, instance_doc):
'''Checks that a STIX instance document is following best practice guidance.
Looks for the following:
+ idrefs that do not resolve locally
+ elements with duplicate ids
+ elements without ids
+ elements with ids not formatted as [ns_prefix]:[object-type]-[GUID]
+ indicators missing a Title, Description, Type, Valid_Time_Position, Indicated_TTP, and/or Confidence
Returns a dictionary of lists and other dictionaries. This is maybe not ideal but workable.
Keyword Arguments
instance_doc - a filename, file-like object, etree._Element or etree.ElementTree for a STIX instance document
'''
if isinstance(instance_doc, etree._Element):
root = instance_doc
elif isinstance(instance_doc, etree._ElementTree):
root = instance_doc.getroot()
elif isinstance(instance_doc, basestring):
tree = etree.parse(instance_doc)
root = tree.getroot()
else:
instance_doc.seek(0)
tree = etree.parse(instance_doc)
root = tree.getroot()
root_element = self._check_root_element(root)
list_unresolved_idrefs = self._check_idref_resolution(root)
dict_duplicate_ids = self._check_duplicate_ids(root)
dict_presence_and_format = self._check_id_presence_and_format(root)
list_idref_with_content = self._check_idref_with_content(root)
list_indicators = self._check_indicator_practices(root)
d = {}
if root_element:
d['root_element'] = root_element
if list_unresolved_idrefs:
d['unresolved_idrefs'] = list_unresolved_idrefs
if dict_duplicate_ids:
d['duplicate_ids'] = dict_duplicate_ids
if dict_presence_and_format:
if dict_presence_and_format.get('no_id'):
d['missing_ids'] = dict_presence_and_format['no_id']
if dict_presence_and_format.get('format'):
d['id_format'] = dict_presence_and_format['format']
if list_idref_with_content:
d['idref_with_content'] = list_idref_with_content
if list_indicators:
d['indicator_suggestions'] = list_indicators
return d
def validate(self, instance_doc):
'''Validates a STIX document and checks best practice guidance if STIXValidator
was initialized with best_practices=True.
Best practices will not be checked if the document is schema-invalid.
Keyword Arguments
instance_doc - a filename, file-like object, etree._Element or etree.ElementTree for a STIX instance document
'''
result_dict = super(STIXValidator, self).validate(instance_doc)
isvalid = result_dict['result']
if self.best_practices and isvalid:
best_practice_warnings = self.check_best_practices(instance_doc)
else:
best_practice_warnings = None
if best_practice_warnings:
result_dict['best_practice_warnings'] = best_practice_warnings
return result_dict
class SchematronValidator(object):
NS_SVRL = "http://purl.oclc.org/dsdl/svrl"
NS_SCHEMATRON = "http://purl.oclc.org/dsdl/schematron"
NS_SAXON = "http://icl.com/saxon" # libxml2 requires this namespace instead of http://saxon.sf.net/
NS_SAXON_SF_NET = "http://saxon.sf.net/"
def __init__(self, schematron=None):
self.schematron = None # isoschematron.Schematron instance
self._init_schematron(schematron)
def _init_schematron(self, schematron):
'''Returns an instance of lxml.isoschematron.Schematron'''
if schematron is None:
self.schematron = None
return
elif not (isinstance(schematron, etree._Element) or isinstance(schematron, etree._ElementTree)):
tree = etree.parse(schematron)
else:
tree = schematron
self.schematron = isoschematron.Schematron(tree, store_report=True, store_xslt=True, store_schematron=True)
def get_xslt(self):
if not self.schematron:
return None
return self.schematron.validator_xslt
def get_schematron(self):
if not self.schematron:
return None
return self.schematron.schematron
def _build_result_dict(self, result, report=None):
'''Creates a dictionary to be returned by the validate() method.'''
d = {}
d['result'] = result
if report:
d['report'] = report
return d
def _get_schematron_errors(self, validation_report):
'''Returns a list of SVRL failed-assert and successful-report elements.'''
xpath = "//svrl:failed-assert | //svrl:successful-report"
errors = validation_report.xpath(xpath, namespaces={'svrl':self.NS_SVRL})
return errors
def _get_error_line_numbers(self, d_error, tree):
'''Returns a sorted list of line numbers for a given Schematron error.'''
locations = d_error['locations']
nsmap = d_error['nsmap']
line_numbers = []
for location in locations:
ctx_node = tree.xpath(location, namespaces=nsmap)[0]
if ctx_node.sourceline not in line_numbers:
line_numbers.append(ctx_node.sourceline)
line_numbers.sort()
return line_numbers
def _build_error_dict(self, errors, instance_tree, report_line_numbers=True):
'''Returns a dictionary representation of the SVRL validation report:
d0 = { <Schemtron error message> : d1 }
d1 = { "locations" : A list of XPaths to context nodes,
"line_numbers" : A list of line numbers where the error occurred,
"test" : The Schematron evaluation expression used,
"text" : The Schematron error message }
'''
d_errors = {}
for error in errors:
text = error.find("{%s}text" % self.NS_SVRL).text
location = error.attrib.get('location')
test = error.attrib.get('test')
if text in d_errors:
d_errors[text]['locations'].append(location)
else:
d_errors[text] = {'locations':[location], 'test':test, 'nsmap':error.nsmap, 'text':text}
if report_line_numbers:
for d_error in d_errors.itervalues():
line_numbers = self._get_error_line_numbers(d_error, instance_tree)
d_error['line_numbers'] = line_numbers
return d_errors
def _build_error_report_dict(self, validation_report, instance_tree, report_line_numbers=True):
errors = self._get_schematron_errors(validation_report)
d_errors = self._build_error_dict(errors, instance_tree, report_line_numbers)
report_dict = defaultdict(list)
for msg, d in d_errors.iteritems():
d_error = {'error' : msg}
if 'line_numbers' in d:
d_error['line_numbers'] = d['line_numbers']
report_dict['errors'].append(d_error)
return report_dict
def validate(self, instance, report_line_numbers=True):
'''Validates an XML instance document.
Arguments:
report_line_numbers : Includes error line numbers in the returned dictionary.
This may slow performance.
'''
if not self.schematron:
raise Exception('Schematron document not set. Cannot validate. Call init_schematron(...) and retry.')
try:
if isinstance(instance, etree._Element):
tree = etree.ElementTree(instance)
elif isinstance(instance, etree._ElementTree):
tree = instance
else:
tree = etree.parse(instance)
result = self.schematron.validate(tree)
report = self._build_error_report_dict(self.schematron.validation_report, tree, report_line_numbers)
if len(report['errors']) > 0:
report = self._build_error_report_dict(self.schematron.validation_report, tree, report_line_numbers)
return self._build_result_dict(result, report)
else:
return self._build_result_dict(result)
except etree.ParseError as e:
return self._build_result_dict(False, [str(e)])
class ProfileValidator(SchematronValidator):
NS_STIX = "http://stix.mitre.org/stix-1"
def __init__(self, profile_fn):
'''Initializes an instance of ProfileValidator.'''
profile = self._open_profile(profile_fn)
schema = self._parse_profile(profile)
super(ProfileValidator, self).__init__(schematron=schema)
def _build_rule_dict(self, worksheet):
'''Builds a dictionary representation of the rules defined by a STIX profile document.'''
d = defaultdict(list)
for i in xrange(1, worksheet.nrows):
if not any(self._get_cell_value(worksheet, i, x) for x in xrange(0, worksheet.ncols)): # empty row
continue
if not self._get_cell_value(worksheet, i, 1): # assume this is a label row
context = self._get_cell_value(worksheet, i, 0)
continue
field = self._get_cell_value(worksheet, i, 0)
occurrence = self._get_cell_value(worksheet, i, 1).lower()
xsi_types = self._get_cell_value(worksheet, i, 3)
allowed_values = self._get_cell_value(worksheet, i, 4)
list_xsi_types = [x.strip() for x in xsi_types.split(',')] if xsi_types else []
list_allowed_values = [x.strip() for x in allowed_values.split(',')] if allowed_values else []
if occurrence in ('required', 'prohibited') or len(list_xsi_types) > 0 or len(list_allowed_values) > 0: # ignore rows with no rules
d[context].append({'field' : field,
'occurrence' : occurrence,
'xsi_types' : list_xsi_types,
'allowed_values' : list_allowed_values})
return d
def _add_root_test(self, pattern, nsmap):
'''Adds a root-level test that requires the root element of a STIX
document be a STIX_Package'''
ns_stix = "http://stix.mitre.org/stix-1"
rule_element = self._add_element(pattern, "rule", context="/")
text = "The root element must be a STIX_Package instance"
test = "%s:STIX_Package" % nsmap.get(ns_stix, 'stix')
element = etree.XML('''<assert xmlns="%s" test="%s" role="error">%s [<value-of select="saxon:line-number()"/>]</assert> ''' % (self.NS_SCHEMATRON, test, text))
rule_element.append(element)
def _add_required_test(self, rule_element, entity_name, context):
'''Adds a test to the rule element checking for the presence of a required STIX field.'''
entity_path = "%s/%s" % (context, entity_name)
text = "%s is required by this profile" % (entity_path)
test = entity_name
element = etree.XML('''<assert xmlns="%s" test="%s" role="error">%s [<value-of select="saxon:line-number()"/>]</assert> ''' % (self.NS_SCHEMATRON, test, text))
rule_element.append(element)
def _add_prohibited_test(self, rule_element, entity_name, context):
'''Adds a test to the rule element checking for the presence of a prohibited STIX field.'''
entity_path = "%s/%s" % (context, entity_name) if entity_name.startswith("@") else context
text = "%s is prohibited by this profile" % (entity_path)
test_field = entity_name if entity_name.startswith("@") else "true()"
element = etree.XML('''<report xmlns="%s" test="%s" role="error">%s [<value-of select="saxon:line-number()"/>]</report> ''' % (self.NS_SCHEMATRON, test_field, text))
rule_element.append(element)
def _add_allowed_xsi_types_test(self, rule_element, context, entity_name, allowed_xsi_types):
'''Adds a test to the rule element which corresponds to values found in the Allowed Implementations
column of a STIX profile document.'''
entity_path = "%s/%s" % (context, entity_name)
if allowed_xsi_types:
test = " or ".join("@xsi:type='%s'" % (x) for x in allowed_xsi_types)
text = 'The allowed xsi:types for %s are %s' % (entity_path, allowed_xsi_types)
element = etree.XML('''<assert xmlns="%s" test="%s" role="error">%s [<value-of select="saxon:line-number()"/>]</assert> ''' % (self.NS_SCHEMATRON, test, text))
rule_element.append(element)
def _add_allowed_values_test(self, rule_element, context, entity_name, allowed_values):
'''Adds a test to the rule element corresponding to values found in the Allowed Values
column of a STIX profile document.
'''
entity_path = "%s/%s" % (context, entity_name)
text = "The allowed values for %s are %s" % (entity_path, allowed_values)
if entity_name.startswith('@'):
test = " or ".join("%s='%s'" % (entity_name, x) for x in allowed_values)
else:
test = " or ".join(".='%s'" % (x) for x in allowed_values)
element = etree.XML('''<assert xmlns="%s" test="%s" role="error">%s [<value-of select="saxon:line-number()"/>]</assert> ''' % (self.NS_SCHEMATRON, test, text))
rule_element.append(element)
def _create_rule_element(self, context):
'''Returns an etree._Element representation of a Schematron rule element.'''
rule = etree.Element("{%s}rule" % self.NS_SCHEMATRON)
rule.set('context', context)
return rule
def _add_rules(self, pattern_element, selectors, field_ns, tests):
'''Adds all Schematron rules and tests to the overarching Schematron
<pattern> element. Each rule and test corresponds to entries found
in the STIX profile document.
'''
d_rules = {} # context : rule_element
for selector in selectors:
for d_test in tests:
field = d_test['field']
occurrence = d_test['occurrence']
allowed_values = d_test['allowed_values']
allowed_xsi_types = d_test['xsi_types']
if field.startswith("@"):
entity_name = field
else:
entity_name = "%s:%s" % (field_ns, field)
if occurrence == "required":
ctx = selector
rule = d_rules.setdefault(ctx, self._create_rule_element(ctx))
self._add_required_test(rule, entity_name, ctx)
elif occurrence == "prohibited":
if entity_name.startswith("@"):
ctx = selector
else:
ctx = "%s/%s" % (selector, entity_name)
rule = d_rules.setdefault(ctx, self._create_rule_element(ctx))
self._add_prohibited_test(rule, entity_name, ctx)
if allowed_values or allowed_xsi_types:
if entity_name.startswith('@'):
ctx = selector
else:
ctx = "%s/%s" % (selector, entity_name)
rule = d_rules.setdefault(ctx, self._create_rule_element(ctx))
if allowed_values:
self._add_allowed_values_test(rule, selector, entity_name, allowed_values)
if allowed_xsi_types:
self._add_allowed_xsi_types_test(rule, selector, entity_name, allowed_xsi_types)
for rule in d_rules.itervalues():
pattern_element.append(rule)
def _build_schematron_xml(self, rules, nsmap, instance_map):
'''Returns an etree._Element instance representation of the STIX profile'''
root = etree.Element("{%s}schema" % self.NS_SCHEMATRON, nsmap={None:self.NS_SCHEMATRON})
pattern = self._add_element(root, "pattern", id="STIX_Schematron_Profile")
self._add_root_test(pattern, nsmap) # check the root element of the document
for label, tests in rules.iteritems():
d_instances = instance_map[label]
selectors = d_instances['selectors']
field_ns_alias = d_instances['ns_alias']
self._add_rules(pattern, selectors, field_ns_alias, tests)
self._map_ns(root, nsmap) # add namespaces to the schematron document
return root
def _parse_namespace_worksheet(self, worksheet):
'''Parses the Namespaces worksheet of the profile. Returns a dictionary representation:
d = { <namespace> : <namespace alias> }
By default, entries for http://stix.mitre.org/stix-1 and http://icl.com/saxon are added.
'''
nsmap = {self.NS_STIX : 'stix',
self.NS_SAXON : 'saxon'}
for i in xrange(1, worksheet.nrows): # skip the first row
if not any(self._get_cell_value(worksheet, i, x) for x in xrange(0, worksheet.ncols)): # empty row
continue
ns = self._get_cell_value(worksheet, i, 0)
alias = self._get_cell_value(worksheet, i, 1)
if not (ns or alias):
raise Exception("Missing namespace or alias: unable to parse Namespaces worksheet")
nsmap[ns] = alias
return nsmap
def _parse_instance_mapping_worksheet(self, worksheet, nsmap):
'''Parses the supplied Instance Mapping worksheet and returns a dictionary representation.
d0 = { <STIX type label> : d1 }
d1 = { 'selectors' : XPath selectors to instances of the XML datatype',
'ns' : The namespace where the STIX type is defined,
'ns_alias' : The namespace alias associated with the namespace }
'''
instance_map = {}
for i in xrange(1, worksheet.nrows):
if not any(self._get_cell_value(worksheet, i, x) for x in xrange(0, worksheet.ncols)): # empty row
continue
label = self._get_cell_value(worksheet, i, 0)
selectors = [x.strip() for x in self._get_cell_value(worksheet, i, 1).split(",")]
ns = self._get_cell_value(worksheet, i, 2)
ns_alias = nsmap[ns]
if not (label or selectors or ns):
raise Exception("Missing label, instance selector and/or namespace for %s in Instance Mapping worksheet" % label)
instance_map[label] = {'selectors':selectors, 'ns':ns, 'ns_alias':ns_alias}
return instance_map
def _parse_profile(self, profile):
'''Converts the supplied STIX profile into a Schematron representation. The
Schematron schema is returned as a etree._Element instance.
'''
overview_ws = profile.sheet_by_name("Overview")
namespace_ws = profile.sheet_by_name("Namespaces")
instance_mapping_ws = profile.sheet_by_name("Instance Mapping")
all_rules = defaultdict(list)
for worksheet in profile.sheets():
if worksheet.name not in ("Overview", "Namespaces", "Instance Mapping"):
rules = self._build_rule_dict(worksheet)
for context,d in rules.iteritems():
all_rules[context].extend(d)
namespaces = self._parse_namespace_worksheet(namespace_ws)
instance_mapping = self._parse_instance_mapping_worksheet(instance_mapping_ws, namespaces)
schema = self._build_schematron_xml(all_rules, namespaces, instance_mapping)
self._unload_workbook(profile)
return schema
def _map_ns(self, schematron, nsmap):
'''Adds <ns> nodes to the supplied schematron document for each entry
supplied by the nsmap.
'''
for ns, prefix in nsmap.iteritems():
ns_element = etree.Element("{%s}ns" % self.NS_SCHEMATRON)
ns_element.set("prefix", prefix)
ns_element.set("uri", ns)
schematron.insert(0, ns_element)
def _add_element(self, node, name, text=None, **kwargs):
'''Adds an etree._Element child to the supplied node. The child node is returned'''
child = etree.SubElement(node, "{%s}%s" % (self.NS_SCHEMATRON, name))
if text:
child.text = text
for k,v in kwargs.iteritems():
child.set(k, v)
return child
def _unload_workbook(self, workbook):
'''Unloads the xlrd workbook.'''
for worksheet in workbook.sheets():
workbook.unload_sheet(worksheet.name)
def _get_cell_value(self, worksheet, row, col):
'''Returns the worksheet cell value found at (row,col).'''
if not worksheet:
raise Exception("worksheet value was NoneType")
value = str(worksheet.cell_value(row, col))
return value
def _convert_to_string(self, value):
'''Returns the str(value) or an 8-bit string version of value encoded as UTF-8.'''
if isinstance(value, unicode):
return value.encode("UTF-8")
else:
return str(value)
def _open_profile(self, filename):
'''Returns xlrd.open_workbook(filename) or raises an Exception if the
filename extension is not .xlsx or the open_workbook() call fails.
'''
if not filename.lower().endswith(".xlsx"):
raise Exception("File must have .XLSX extension. Filename provided: %s" % filename)
try:
return xlrd.open_workbook(filename)
except:
raise Exception("File does not seem to be valid XLSX.")
def validate(self, instance_doc):
'''Validates an XML instance document against a STIX profile.'''
return super(ProfileValidator, self).validate(instance_doc, report_line_numbers=False)
def _build_error_dict(self, errors, instance_doc, report_line_numbers=False):
'''Overrides SchematronValidator._build_error_dict(...).
Returns a dictionary representation of the SVRL validation report:
d0 = { <Schemtron error message> : d1 }
d1 = { "locations" : A list of XPaths to context nodes,
"line_numbers" : A list of line numbers where the error occurred,
"test" : The Schematron evaluation expression used,
"text" : The Schematron error message }
'''
d_errors = {}
for error in errors:
text = error.find("{%s}text" % self.NS_SVRL).text
location = error.attrib.get('location')
test = error.attrib.get('test')
line_number = text.split(" ")[-1][1:-1]
text = text[:text.rfind(' [')]
if text in d_errors:
d_errors[text]['locations'].append(location)
d_errors[text]['line_numbers'].append(line_number)
else:
d_errors[text] = {'locations':[location], 'test':test, 'nsmap':error.nsmap, 'text':text, 'line_numbers':[line_number]}
return d_errors
def get_xslt(self):
'''Overrides SchematronValidator.get_xslt()
Returns an lxml.etree._ElementTree representation of the ISO Schematron skeleton generated
XSLT translation of a STIX profile.
The ProfileValidator uses the extension function saxon:line-number() for reporting line numbers.
This function is stripped along with any references to the Saxon namespace from the exported
XSLT. This is due to compatibility issues between Schematron/XSLT processing libraries. For
example, SaxonPE/EE expects the Saxon namespace to be "http://saxon.sf.net/" while libxslt
expects it to be "http://icl.com/saxon". The freely distributed SaxonHE library does not support
Saxon extension functions at all.
'''
if not self.schematron:
return None
s = etree.tostring(self.schematron.validator_xslt)
s = s.replace(' [<axsl:text/><axsl:value-of select="saxon:line-number()"/><axsl:text/>]', '')
s = s.replace('xmlns:saxon="http://icl.com/saxon"', '')
s = s.replace('<svrl:ns-prefix-in-attribute-values uri="http://icl.com/saxon" prefix="saxon"/>', '')
return etree.ElementTree(etree.fromstring(s))
def get_schematron(self):
'''Overrides SchematronValidator.get_schematron()
Returns an lxml.etree._ElementTree representation of the ISO Schematron translation of a STIX profile.
The ProfileValidator uses the extension function saxon:line-number() for reporting line numbers.
This function is stripped along with any references to the Saxon namespace from the exported
XSLT. This is due to compatibility issues between Schematron/XSLT processing libraries. For
example, SaxonPE/EE expects the Saxon namespace to be "http://saxon.sf.net/" while libxslt
expects it to be "http://icl.com/saxon". The freely distributed SaxonHE library does not support
Saxon extension functions at all.
'''
if not self.schematron:
return None
s = etree.tostring(self.schematron.schematron)
s = s.replace(' [<value-of select="saxon:line-number()"/>]', '')
s = s.replace('<ns prefix="saxon" uri="http://icl.com/saxon"/>', '')
return etree.ElementTree(etree.fromstring(s)) | 0x0mar/conpot | conpot/tests/helpers/mitre_stix_validator.py | Python | gpl-2.0 | 41,024 |
import pymysql
import warnings
class DBException(Exception):
def __init__(self, msg=""):
Exception.__init__(self, msg)
class PySECO_DB():
def __init__(self, pyseco, host, username, password, db):
self.pyseco = pyseco
try:
self.conn = pymysql.connect(
host=host, user=username,
password=password, db=db, autocommit=False)
self.setup()
except pymysql.err.OperationalError:
raise DBException("connection failed")
except Exception as e:
raise e
def setup(self):
self.pyseco.db_lock.acquire()
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("error")
try:
cur.execute("""
CREATE TABLE IF NOT EXISTS player
(id INT UNSIGNED NOT NULL AUTO_INCREMENT,
login VARCHAR(64) NOT NULL,
nickname VARCHAR(128) NOT NULL,
PRIMARY KEY(id),
UNIQUE KEY(login))""")
except pymysql.err.Warning as e:
pass
try:
cur.execute("""
CREATE TABLE IF NOT EXISTS map
(id INT UNSIGNED NOT NULL AUTO_INCREMENT,
uid VARCHAR(64) NOT NULL,
name VARCHAR(128) NOT NULL,
author VARCHAR(64) NOT NULL,
num_cp SMALLINT NOT NULL,
authortime INT NOT NULL,
PRIMARY KEY(id),
UNIQUE KEY(uid))""")
except pymysql.err.Warning as e:
pass
try:
cur.execute("""
CREATE TABLE IF NOT EXISTS record
(pid INT UNSIGNED NOT NULL,
mid INT UNSIGNED NOT NULL,
time INT UNSIGNED NOT NULL,
timestamp BIGINT UNSIGNED NOT NULL,
PRIMARY KEY(pid,mid),
CONSTRAINT fk_playerRecord FOREIGN KEY (pid)
REFERENCES player(id)
ON UPDATE CASCADE ON DELETE CASCADE,
CONSTRAINT fk_mapRecord FOREIGN KEY (mid)
REFERENCES map(id)
ON UPDATE CASCADE ON DELETE CASCADE)""")
except pymysql.err.Warning as e:
pass
try:
cur.execute("""
CREATE TABLE IF NOT EXISTS cp
(pid INT UNSIGNED NOT NULL,
mid INT UNSIGNED NOT NULL,
cpnum INT UNSIGNED NOT NULL,
time INT UNSIGNED NOT NULL,
PRIMARY KEY(pid, mid, cpnum),
CONSTRAINT fk_playerCp FOREIGN KEY (pid)
REFERENCES player(id)
ON UPDATE CASCADE ON DELETE CASCADE,
CONSTRAINT fk_mapCp FOREIGN KEY (mid)
REFERENCES map(id)
ON UPDATE CASCADE ON DELETE CASCADE)""")
except pymysql.err.Warning as e:
pass
cur.close()
self.conn.commit()
self.pyseco.db_lock.release()
def close(self):
try:
self.conn.commit()
self.conn.close()
except Exception as e:
pass
def add_player(self, login, nickname):
self.pyseco.db_lock.acquire()
cur = self.conn.cursor()
cur.execute("SELECT id FROM player WHERE login = %s LIMIT 1", (login))
data = cur.fetchone()
if data is None:
cur.execute(
"INSERT INTO player (login, nickname) VALUES (%s, %s)",
(login, nickname))
cur.execute("SELECT last_insert_id()")
data = cur.fetchone()
if data is None:
raise DBException("Failed to create/load player")
cur.close()
self.conn.commit()
self.pyseco.db_lock.release()
return(data[0])
def add_map(self, uid, name, author, num_cp, authortime):
name = name.encode("unicode_escape")
self.pyseco.db_lock.acquire()
cur = self.conn.cursor()
cur.execute("SELECT id FROM map WHERE uid = %s LIMIT 1", (uid))
data = cur.fetchone()
if data is None:
cur.execute("""
INSERT INTO map
(uid, name, author, num_cp, authortime)
VALUES (%s, %s, %s, %s, %s)""",
(uid, name, author, num_cp, authortime))
cur.execute("SELECT last_insert_id()")
data = cur.fetchone()
if data is None:
raise DBException("Failed to create/load map")
cur.close()
self.conn.commit()
self.pyseco.db_lock.release()
return(data[0])
def get_record(self, mid, pid):
self.pyseco.db_lock.acquire()
cur = self.conn.cursor()
cur.execute("""
SELECT time FROM record
WHERE pid = %s AND mid = %s LIMIT 1""",
(pid, mid))
data = cur.fetchone()
cur.close()
self.conn.commit()
self.pyseco.db_lock.release()
if data is None:
return None
else:
return data[0]
def get_record_by_login(self, mid, login):
self.pyseco.db_lock.acquire()
cur = self.conn.cursor()
cur.execute("""
SELECT time FROM record, player
WHERE record.pid = player.id
AND mid = %s
AND player.login = %s
LIMIT 1""",
(mid, login))
data = cur.fetchone()
cur.close()
self.conn.commit()
self.pyseco.db_lock.release()
if data is None:
return None
else:
return data[0]
def get_cp_times(self, mid, login):
self.pyseco.db_lock.acquire()
cur = self.conn.cursor()
cur.execute("""
SELECT cpnum, time FROM player, cp
WHERE cp.pid = player.id
AND mid = %s
AND player.login = %s
ORDER BY cpnum ASC""",
(mid, login))
data = cur.fetchall()
cur.close()
self.conn.commit()
self.pyseco.db_lock.release()
if data is None:
return None
else:
return data
def get_record_list(self, mid, login):
self.pyseco.db_lock.acquire()
cur = self.conn.cursor()
# Params:
# Map ID, Map ID, Map ID, Player Login, Map ID, Map ID, Map ID
cur.execute("""
(SELECT (@row_number := @row_number + 1) AS rank, top.*
FROM (SELECT record.time, player.login, player.nickname
FROM record,
player
WHERE record.pid = player.id
AND record.mid = %s
ORDER BY record.time, record.timestamp ASC
LIMIT 3) top,
(SELECT @row_number := 0) r)
UNION
(SELECT new.* FROM
(SELECT (@row_number:=@row_number + 1) AS rank,
record.time, player.login, player.nickname
FROM record,
(SELECT @row_number := 0) r,
player
WHERE record.mid = %s
AND record.pid = player.id
ORDER BY record.time, record.timestamp ASC
LIMIT 200) new,
(SELECT COUNT(*) as count, target_rank FROM
(SELECT (@row_number:=@row_number + 1) AS target_rank,
record.mid,
record.pid,
record.time,
record.timestamp
FROM record,
(SELECT @row_number := 0) r
WHERE record.mid = %s
ORDER BY record.time, record.timestamp ASC
LIMIT 200) t,
player
WHERE player.id = t.pid
AND player.login = %s) x
WHERE (x.count = 1 AND rank >= LEAST(
GREATEST(4,
(SELECT COUNT(*)
FROM record
WHERE record.mid = %s)-10),
200-10,
GREATEST(x.target_rank-5 , 4))
AND rank <= LEAST(
GREATEST(4,
(SELECT COUNT(*)
FROM record
WHERE record.mid = %s)),
200,
GREATEST(x.target_rank-5,4)+10))
OR (x.count != 1 AND rank >= GREATEST(
4,
LEAST(
200,
(SELECT COUNT(*)
FROM record
WHERE record.mid = %s)
)-9)));""",
(mid, mid, mid, login, mid, mid, mid))
data = cur.fetchall()
cur.close()
self.conn.commit()
self.pyseco.db_lock.release()
out = []
for elem in data:
out.append(elem[:3] +
(elem[3].encode().decode("unicode_escape"), ))
return out
# returns 0 if new record was created
# returns previous record else
def handle_record(self, mid, login, time, timestamp, cps):
retval = -1
self.pyseco.db_lock.acquire()
cur = self.conn.cursor()
cur.execute("""
SELECT (SELECT rank
FROM (SELECT (@row_number:=@row_number + 1) AS rank,
player.login
FROM record,
(SELECT @row_number := 0) r,
player
WHERE record.mid = %s
AND record.pid = player.id
ORDER BY record.time, record.timestamp ASC
LIMIT 200) ranks
WHERE login = %s) AS rank,
time,
id
FROM record,player
WHERE mid = %s AND pid = player.id AND login = %s LIMIT 1""",
(mid, login, mid, login))
data = cur.fetchone()
if data is None:
prev_rank = None
prev_time = None
cur.execute("""
INSERT INTO record (mid,pid,time,timestamp)
VALUES (%s,
(SELECT id FROM player WHERE login = %s LIMIT 1),
%s, %s)""",
(mid, login, time, timestamp))
i = 0
for cp_time in cps:
cur.execute("""
INSERT INTO cp (mid,pid,cpnum,time)
VALUES (%s,
(SELECT id FROM player WHERE login = %s LIMIT 1),
%s, %s)
ON DUPLICATE KEY UPDATE time = %s
""",
(mid, login, i, cp_time, cp_time))
i += 1
new_time = time
elif time < data[1]:
prev_rank = data[0]
prev_time = data[1]
cur.execute("""
UPDATE record
SET time = %s,
timestamp = %s
WHERE mid = %s
AND pid = (SELECT id FROM player WHERE login = %s LIMIT 1)
LIMIT 1""",
(time, timestamp, mid, login))
i = 0
for cp_time in cps:
cur.execute("""
INSERT INTO cp (mid,pid,cpnum,time)
VALUES (%s,
(SELECT id FROM player WHERE login = %s LIMIT 1),
%s, %s)
ON DUPLICATE KEY UPDATE time = %s
""",
(mid, login, i, cp_time, cp_time))
i += 1
new_time = time
else:
prev_rank = data[0]
prev_time = data[1]
new_time = data[1]
cur.execute("""
SELECT (SELECT rank
FROM (SELECT (@row_number:=@row_number + 1) AS rank,
player.login
FROM record,
(SELECT @row_number := 0) r,
player
WHERE record.mid = %s
AND record.pid = player.id
ORDER BY record.time, record.timestamp ASC
LIMIT 200) ranks
WHERE login = %s) AS rank
FROM record,player
WHERE mid = %s AND pid = player.id AND login = %s LIMIT 1""",
(mid, login, mid, login))
data = cur.fetchone()
if data is None:
raise DBException("Failed to handle record")
new_rank = data[0]
cur.close()
self.conn.commit()
self.pyseco.db_lock.release()
return (prev_rank, prev_time, new_rank, new_time)
| Hakuba/pyseco | src/db.py | Python | gpl-3.0 | 13,648 |
import logging
logger = logging.getLogger('superdesk')
logger.setLevel(logging.INFO)
LOGGING_TEMPLATE = "\033[1m\033[03{color}mVerifiedPixel: \033[0m{msg}"
def info(msg):
return logger.info(LOGGING_TEMPLATE.format(msg=msg, color=6))
def warning(msg):
return logger.warning(LOGGING_TEMPLATE.format(msg=msg, color=3))
def error(msg):
return logger.error(LOGGING_TEMPLATE.format(msg=msg, color=1))
def success(msg):
return logger.info(LOGGING_TEMPLATE.format(msg=msg, color=2))
def debug(msg):
return logger.info(LOGGING_TEMPLATE.format(
msg="DEBUG: {msg}".format(msg=msg), color=4
))
def print_task_exception(f):
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
debug(e)
raise(e)
return wrapped
| thnkloud9/verifiedpixel | server/vpp/verifiedpixel/logging.py | Python | agpl-3.0 | 836 |
# -*- coding: utf-8 -*-
# Copyright 2020 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from psycopg2 import IntegrityError
from odoo.tests.common import TransactionCase
class TestCarrierCategory(TransactionCase):
def test_code_unique(self):
vals = {
"name": "Drop-Off 2",
"code": "DROP",
}
with self.assertRaises(IntegrityError):
self.env["delivery.carrier.category"].create(vals)
def test_code_unique_archived(self):
drop_off = self.env.ref(
"delivery_carrier_category.delivery_carrier_category_dropoff")
drop_off.active = False
vals = {
"name": "Drop-Off 2",
"code": "DROP",
}
self.env["delivery.carrier.category"].create(vals)
| OCA/carrier-delivery | delivery_carrier_category/tests/test_carrier_category.py | Python | agpl-3.0 | 835 |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import io
import os
import sys
from shutil import copytree, copy, rmtree
from setuptools import setup
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for PyFlink.",
file=sys.stderr)
sys.exit(-1)
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
TEMP_PATH = "deps"
LIB_TEMP_PATH = os.path.join(TEMP_PATH, "lib")
OPT_TEMP_PATH = os.path.join(TEMP_PATH, "opt")
CONF_TEMP_PATH = os.path.join(TEMP_PATH, "conf")
LOG_TEMP_PATH = os.path.join(TEMP_PATH, "log")
EXAMPLES_TEMP_PATH = os.path.join(TEMP_PATH, "examples")
LICENSES_TEMP_PATH = os.path.join(TEMP_PATH, "licenses")
PLUGINS_TEMP_PATH = os.path.join(TEMP_PATH, "plugins")
SCRIPTS_TEMP_PATH = os.path.join(TEMP_PATH, "bin")
LICENSE_FILE_TEMP_PATH = os.path.join("pyflink", "LICENSE")
NOTICE_FILE_TEMP_PATH = os.path.join("pyflink", "NOTICE")
README_FILE_TEMP_PATH = os.path.join("pyflink", "README.txt")
in_flink_source = os.path.isfile("../flink-java/src/main/java/org/apache/flink/api/java/"
"ExecutionEnvironment.java")
# Due to changes in FLINK-14008, the licenses directory and NOTICE file may not exist in
# build-target folder. Just ignore them in this case.
exist_licenses = None
exist_notice = None
try:
if in_flink_source:
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
FLINK_HOME = os.path.abspath("../build-target")
incorrect_invocation_message = """
If you are installing pyflink from flink source, you must first build Flink and
run sdist.
To build Flink with maven you can run:
mvn -DskipTests clean package
Building the source dist is done in the flink-python directory:
cd flink-python
python setup.py sdist
pip install dist/*.tar.gz"""
LIB_PATH = os.path.join(FLINK_HOME, "lib")
OPT_PATH = os.path.join(FLINK_HOME, "opt")
CONF_PATH = os.path.join(FLINK_HOME, "conf")
EXAMPLES_PATH = os.path.join(FLINK_HOME, "examples")
LICENSES_PATH = os.path.join(FLINK_HOME, "licenses")
PLUGINS_PATH = os.path.join(FLINK_HOME, "plugins")
SCRIPTS_PATH = os.path.join(FLINK_HOME, "bin")
LICENSE_FILE_PATH = os.path.join(FLINK_HOME, "LICENSE")
NOTICE_FILE_PATH = os.path.join(FLINK_HOME, "NOTICE")
README_FILE_PATH = os.path.join(FLINK_HOME, "README.txt")
exist_licenses = os.path.exists(LICENSES_PATH)
exist_notice = os.path.exists(NOTICE_FILE_PATH)
if not os.path.isdir(LIB_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
if getattr(os, "symlink", None) is not None:
os.symlink(LIB_PATH, LIB_TEMP_PATH)
os.symlink(OPT_PATH, OPT_TEMP_PATH)
os.symlink(CONF_PATH, CONF_TEMP_PATH)
os.symlink(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
if exist_licenses:
os.symlink(LICENSES_PATH, LICENSES_TEMP_PATH)
os.symlink(PLUGINS_PATH, PLUGINS_TEMP_PATH)
os.symlink(SCRIPTS_PATH, SCRIPTS_TEMP_PATH)
os.symlink(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
if exist_notice:
os.symlink(NOTICE_FILE_PATH, NOTICE_FILE_TEMP_PATH)
os.symlink(README_FILE_PATH, README_FILE_TEMP_PATH)
else:
copytree(LIB_PATH, LIB_TEMP_PATH)
copytree(OPT_PATH, OPT_TEMP_PATH)
copytree(CONF_PATH, CONF_TEMP_PATH)
copytree(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
if exist_licenses:
copytree(LICENSES_PATH, LICENSES_TEMP_PATH)
copytree(PLUGINS_PATH, PLUGINS_TEMP_PATH)
copytree(SCRIPTS_PATH, SCRIPTS_TEMP_PATH)
copy(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
if exist_notice:
copy(NOTICE_FILE_PATH, NOTICE_FILE_TEMP_PATH)
copy(README_FILE_PATH, README_FILE_TEMP_PATH)
os.mkdir(LOG_TEMP_PATH)
with open(os.path.join(LOG_TEMP_PATH, "empty.txt"), 'w') as f:
f.write("This file is used to force setuptools to include the log directory. "
"You can delete it at any time after installation.")
else:
if not os.path.isdir(LIB_TEMP_PATH) or not os.path.isdir(OPT_TEMP_PATH) \
or not os.path.isdir(SCRIPTS_TEMP_PATH):
print("The flink core files are not found. Please make sure your installation package "
"is complete, or do this in the flink-python directory of the flink source "
"directory.")
sys.exit(-1)
exist_licenses = os.path.exists(LICENSES_TEMP_PATH)
exist_notice = os.path.exists(NOTICE_FILE_TEMP_PATH)
script_names = ["pyflink-shell.sh", "find-flink-home.sh"]
scripts = [os.path.join(SCRIPTS_TEMP_PATH, script) for script in script_names]
scripts.append("pyflink/find_flink_home.py")
PACKAGES = ['pyflink',
'pyflink.table',
'pyflink.util',
'pyflink.datastream',
'pyflink.dataset',
'pyflink.common',
'pyflink.fn_execution',
'pyflink.lib',
'pyflink.opt',
'pyflink.conf',
'pyflink.log',
'pyflink.examples',
'pyflink.plugins',
'pyflink.bin']
PACKAGE_DIR = {
'pyflink.lib': TEMP_PATH + '/lib',
'pyflink.opt': TEMP_PATH + '/opt',
'pyflink.conf': TEMP_PATH + '/conf',
'pyflink.log': TEMP_PATH + '/log',
'pyflink.examples': TEMP_PATH + '/examples',
'pyflink.plugins': TEMP_PATH + '/plugins',
'pyflink.bin': TEMP_PATH + '/bin'}
PACKAGE_DATA = {
'pyflink': ['LICENSE', 'README.txt'],
'pyflink.lib': ['*.jar'],
'pyflink.opt': ['*.*', '*/*'],
'pyflink.conf': ['*'],
'pyflink.log': ['*'],
'pyflink.examples': ['*.py', '*/*.py'],
'pyflink.plugins': ['*', '*/*'],
'pyflink.bin': ['*']}
if exist_licenses:
PACKAGES.append('pyflink.licenses')
PACKAGE_DIR['pyflink.licenses'] = TEMP_PATH + '/licenses'
PACKAGE_DATA['pyflink.licenses'] = ['*']
if exist_notice:
PACKAGE_DATA['pyflink'].append('NOTICE')
setup(
name='apache-flink',
version=VERSION,
packages=PACKAGES,
include_package_data=True,
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
scripts=scripts,
url='https://flink.apache.org',
license='https://www.apache.org/licenses/LICENSE-2.0',
author='Flink Developers',
author_email='[email protected]',
install_requires=['py4j==0.10.8.1', 'python-dateutil==2.8.0', 'apache-beam==2.15.0',
'cloudpickle==1.2.2'],
tests_require=['pytest==4.4.1'],
description='Apache Flink Python API',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7']
)
finally:
if in_flink_source:
if getattr(os, "symlink", None) is not None:
os.remove(LIB_TEMP_PATH)
os.remove(OPT_TEMP_PATH)
os.remove(CONF_TEMP_PATH)
os.remove(EXAMPLES_TEMP_PATH)
if exist_licenses:
os.remove(LICENSES_TEMP_PATH)
os.remove(PLUGINS_TEMP_PATH)
os.remove(SCRIPTS_TEMP_PATH)
os.remove(LICENSE_FILE_TEMP_PATH)
if exist_notice:
os.remove(NOTICE_FILE_TEMP_PATH)
os.remove(README_FILE_TEMP_PATH)
else:
rmtree(LIB_TEMP_PATH)
rmtree(OPT_TEMP_PATH)
rmtree(CONF_TEMP_PATH)
rmtree(EXAMPLES_TEMP_PATH)
if exist_licenses:
rmtree(LICENSES_TEMP_PATH)
rmtree(PLUGINS_TEMP_PATH)
rmtree(SCRIPTS_TEMP_PATH)
os.remove(LICENSE_FILE_TEMP_PATH)
if exist_notice:
os.remove(NOTICE_FILE_TEMP_PATH)
os.remove(README_FILE_TEMP_PATH)
rmtree(LOG_TEMP_PATH)
os.rmdir(TEMP_PATH)
| mbode/flink | flink-python/setup.py | Python | apache-2.0 | 10,045 |
from datetime import datetime, time, timedelta, tzinfo
from typing import Optional, Union, cast
import warnings
import numpy as np
from pandas._libs import lib, tslib
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
conversion,
fields,
get_resolution,
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
timezones,
to_offset,
tzconversion,
)
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_object_dtype,
is_period_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCPandasArray, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays._ranges import generate_regular_range
import pandas.core.common as com
from pandas.tseries.frequencies import get_period_alias
from pandas.tseries.offsets import BDay, Day, Tick
_midnight = time(0, 0)
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
def _field_accessor(name, field, docstring=None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
values, field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. versionadded:: 0.24.0
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_scalar_type = Timestamp
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
# define my properties & methods for delegation
_bool_ops = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops = ["freq", "tz"]
_field_ops = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"week",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops = ["date", "time", "timetz"]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: Union[np.dtype, DatetimeTZDtype]
_freq = None
def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy=False):
if isinstance(values, (ABCSeries, ABCIndexClass)):
values = values._values
inferred_freq = getattr(values, "_freq", None)
if isinstance(values, type(self)):
# validation
dtz = getattr(dtype, "tz", None)
if dtz and values.tz is None:
dtype = DatetimeTZDtype(tz=dtype.tz)
elif dtz and values.tz:
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
if freq is None:
freq = values.freq
values = values._data
if not isinstance(values, np.ndarray):
raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray ndarray, or Series or Index containing one of those."
)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(DT64NS_DTYPE)
if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))
self._data = values
self._dtype = dtype
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
@classmethod
def _simple_new(
cls, values, freq: Optional[BaseOffset] = None, dtype=DT64NS_DTYPE
) -> "DatetimeArray":
assert isinstance(values, np.ndarray)
if values.dtype != DT64NS_DTYPE:
assert values.dtype == "i8"
values = values.view(DT64NS_DTYPE)
result = object.__new__(cls)
result._data = values
result._freq = freq
result._dtype = dtype
return result
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_not_strict(
cls,
data,
dtype=None,
copy=False,
tz=None,
freq=lib.no_default,
dayfirst=False,
yearfirst=False,
ambiguous="raise",
):
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
subarr, tz, inferred_freq = sequence_to_dt64ns(
data,
dtype=dtype,
copy=copy,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
dtype = tz_to_dtype(tz)
result = cls._simple_new(subarr, freq=freq, dtype=dtype)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq, ambiguous=ambiguous)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(
cls,
start,
end,
periods,
freq,
tz=None,
normalize=False,
ambiguous="raise",
nonexistent="raise",
closed=None,
):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
left_closed, right_closed = dtl.validate_endpoints(closed)
start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start_tz = None if start is None else start.tz
end_tz = None if end is None else end.tz
start = _maybe_localize_point(
start, start_tz, start, freq, tz, ambiguous, nonexistent
)
end = _maybe_localize_point(
end, end_tz, end, freq, tz, ambiguous, nonexistent
)
if freq is not None:
# We break Day arithmetic (fixed 24 hour) here and opt for
# Day to mean calendar day (23/24/25 hour). Therefore, strip
# tz info from start and day to avoid DST arithmetic
if isinstance(freq, Day):
if start is not None:
start = start.tz_localize(None)
if end is not None:
end = end.tz_localize(None)
if isinstance(freq, Tick):
values = generate_regular_range(start, end, periods, freq)
else:
xdr = generate_range(start=start, end=end, periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
_tz = start.tz if start is not None else end.tz
index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))
if tz is not None and index.tz is None:
arr = tzconversion.tz_localize_to_utc(
index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
index = cls(arr)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent).asm8
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent).asm8
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
arr = (
np.linspace(0, end.value - start.value, periods, dtype="int64")
+ start.value
)
dtype = tz_to_dtype(tz)
index = cls._simple_new(
arr.astype("M8[ns]", copy=False), freq=None, dtype=dtype
)
if not left_closed and len(index) and index[0] == start:
# TODO: overload DatetimeLikeArrayMixin.__getitem__
index = cast(DatetimeArray, index[1:])
if not right_closed and len(index) and index[-1] == end:
# TODO: overload DatetimeLikeArrayMixin.__getitem__
index = cast(DatetimeArray, index[:-1])
dtype = tz_to_dtype(tz)
return cls._simple_new(index.asi8, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
if not isna(value):
self._check_compatible_with(value, setitem=setitem)
return value.asm8
return np.datetime64(value.value, "ns")
def _scalar_from_string(self, value):
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
self._assert_tzawareness_compat(other)
if setitem:
# Stricter check for setitem vs comparison methods
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x) -> Union[Timestamp, NaTType]:
return Timestamp(x, freq=self.freq, tz=self.tz)
@property
def dtype(self) -> Union[np.dtype, DatetimeTZDtype]:
"""
The dtype for the DatetimeArray.
.. warning::
A future version of pandas will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
@property
def tz(self):
"""
Return timezone, if any.
Returns
-------
datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
"""
# GH 18595
return getattr(self.dtype, "tz", None)
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Resolution:
return get_resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype)
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = int(length / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pydatetime(
data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
)
yield from converted
def astype(self, dtype, copy=True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = pandas_dtype(dtype)
if is_datetime64_ns_dtype(dtype) and not is_dtype_equal(dtype, self.dtype):
# GH#18951: datetime64_ns dtype but not equal means different tz
new_tz = getattr(dtype, "tz", None)
if getattr(self.dtype, "tz", None) is None:
return self.tz_localize(new_tz)
result = self.tz_convert(new_tz)
if copy:
result = result.copy()
if new_tz is None:
# Do we want .astype('datetime64[ns]') to be an ndarray.
# The astype in Block._astype expects this to return an
# ndarray, but we could maybe work around it there.
result = result._data
return result
elif is_datetime64tz_dtype(self.dtype) and is_dtype_equal(self.dtype, dtype):
if copy:
return self.copy()
return self
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
# -----------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
from pandas.io.formats.format import get_format_datetime64_from_values
fmt = get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(
self.asi8.ravel(), tz=self.tz, format=fmt, na_rep=na_rep
).reshape(self.shape)
# -----------------------------------------------------------------
# Comparison Methods
def _has_same_tz(self, other) -> bool:
# vzone shouldn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
if not hasattr(other, "tzinfo"):
return False
other_tz = other.tzinfo
return timezones.tz_compare(self.tzinfo, other_tz)
def _assert_tzawareness_compat(self, other):
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, "tzinfo", None)
other_dtype = getattr(other, "dtype", None)
if is_datetime64tz_dtype(other_dtype):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
# Arithmetic Methods
def _sub_datetime_arraylike(self, other):
"""subtract DatetimeArray/Index or ndarray[datetime64]"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
assert is_datetime64_dtype(other)
other = type(self)(other)
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
f"{type(self).__name__} subtraction must have the same "
"timezones or no timezones"
)
self_i8 = self.asi8
other_i8 = other.asi8
arr_mask = self._isnan | other._isnan
new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask)
if self._hasnans or other._hasnans:
np.putmask(new_values, arr_mask, iNaT)
return new_values.view("timedelta64[ns]")
def _add_offset(self, offset):
if self.ndim == 2:
return self.ravel()._add_offset(offset).reshape(self.shape)
assert not isinstance(offset, Tick)
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset._apply_array(values)
result = DatetimeArray._simple_new(result)
result = result.tz_localize(self.tz)
except NotImplementedError:
warnings.warn(
"Non-vectorized DateOffset being applied to Series or DatetimeIndex",
PerformanceWarning,
)
result = self.astype("O") + offset
if not len(self):
# GH#30336 _from_sequence won't be able to infer self.tz
return type(self)._from_sequence(result).tz_localize(self.tz)
return type(self)._from_sequence(result)
def _sub_datetimelike_scalar(self, other):
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
assert isinstance(other, (datetime, np.datetime64))
assert other is not NaT
other = Timestamp(other)
if other is NaT:
return self - NaT
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
return result.view("timedelta64[ns]")
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self):
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
if self.tz is None or timezones.is_utc(self.tz):
return self.asi8
return tzconversion.tz_convert_from_utc(self.asi8, self.tz)
def tz_convert(self, tz):
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz)
return self._simple_new(self.asi8, dtype=dtype, freq=self.freq)
def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"):
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
Time zone localization helps to switch from time zone aware to time
zone unaware objects.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[ns, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
if self.tz is not None:
if tz is None:
new_dates = tzconversion.tz_convert_from_utc(self.asi8, self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
new_dates = new_dates.view(DT64NS_DTYPE)
dtype = tz_to_dtype(tz)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):
# we can preserve freq
# TODO: Also for fixed-offsets
freq = self.freq
elif tz is None and self.tz is None:
# no-op
freq = self.freq
return self._simple_new(new_dates, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self) -> np.ndarray:
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects.
Returns
-------
datetimes : ndarray
"""
return ints_to_pydatetime(self.asi8, tz=self.tz)
def normalize(self):
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = normalize_i8_timestamps(self.asi8, self.tz)
return type(self)(new_values)._with_freq("infer").tz_localize(self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]', freq='M')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]', freq='D')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn(
"Converting to PeriodArray/Index representation "
"will drop timezone information.",
UserWarning,
)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError(
"You must pass a freq argument as current index has none."
)
res = get_period_alias(freq)
# https://github.com/pandas-dev/pandas/issues/33358
if res is None:
res = freq
freq = res
return PeriodArray._from_datetime64(self._data, freq, tz=self.tz)
def to_perioddelta(self, freq):
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets.
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
"""
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead",
FutureWarning,
stacklevel=3,
)
from pandas.core.arrays.timedeltas import TimedeltaArray
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view("m8[ns]")
return TimedeltaArray(m8delta)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "month_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
def day_name(self, locale=None):
"""
Return the day names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Index
Index of day names.
Examples
--------
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "day_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="time")
@property
def timetz(self):
"""
Returns numpy array of datetime.time also containing timezone
information. The time part of the Timestamps.
"""
return ints_to_pydatetime(self.asi8, self.tz, box="time")
@property
def date(self):
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="date")
def isocalendar(self):
"""
Returns a DataFrame with the year, week, and day calculated according to
the ISO 8601 standard.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
with columns year, week and day
See Also
--------
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
week number, and weekday for the given Timestamp object.
datetime.date.isocalendar : Return a named tuple object with
three components: year, week and weekday.
Examples
--------
>>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)
>>> idx.isocalendar()
year week day
2019-12-29 2019 52 7
2019-12-30 2020 1 1
2019-12-31 2020 1 2
2020-01-01 2020 1 3
>>> idx.isocalendar().week
2019-12-29 52
2019-12-30 1
2019-12-31 1
2020-01-01 1
Freq: D, Name: week, dtype: UInt32
"""
from pandas import DataFrame
values = self._local_timestamps()
sarray = fields.build_isocalendar_sarray(values)
iso_calendar_df = DataFrame(
sarray, columns=["year", "week", "day"], dtype="UInt32"
)
if self._hasnans:
iso_calendar_df.iloc[self._isnan] = None
return iso_calendar_df
@property
def weekofyear(self):
"""
The week ordinal of the year.
.. deprecated:: 1.1.0
weekofyear and week have been deprecated.
Please use DatetimeIndex.isocalendar().week instead.
"""
warnings.warn(
"weekofyear and week have been deprecated, please use "
"DatetimeIndex.isocalendar().week instead, which returns "
"a Series. To exactly reproduce the behavior of week and "
"weekofyear and return an Index, you may call "
"pd.Int64Index(idx.isocalendar().week)",
FutureWarning,
stacklevel=3,
)
week_series = self.isocalendar().week
if week_series.hasnans:
return week_series.to_numpy(dtype="float64", na_value=np.nan)
return week_series.to_numpy(dtype="int64")
week = weekofyear
year = _field_accessor(
"year",
"Y",
"""
The year of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="Y")
... )
>>> datetime_series
0 2000-12-31
1 2001-12-31
2 2002-12-31
dtype: datetime64[ns]
>>> datetime_series.dt.year
0 2000
1 2001
2 2002
dtype: int64
""",
)
month = _field_accessor(
"month",
"M",
"""
The month as January=1, December=12.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="M")
... )
>>> datetime_series
0 2000-01-31
1 2000-02-29
2 2000-03-31
dtype: datetime64[ns]
>>> datetime_series.dt.month
0 1
1 2
2 3
dtype: int64
""",
)
day = _field_accessor(
"day",
"D",
"""
The day of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="D")
... )
>>> datetime_series
0 2000-01-01
1 2000-01-02
2 2000-01-03
dtype: datetime64[ns]
>>> datetime_series.dt.day
0 1
1 2
2 3
dtype: int64
""",
)
hour = _field_accessor(
"hour",
"h",
"""
The hours of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="h")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 01:00:00
2 2000-01-01 02:00:00
dtype: datetime64[ns]
>>> datetime_series.dt.hour
0 0
1 1
2 2
dtype: int64
""",
)
minute = _field_accessor(
"minute",
"m",
"""
The minutes of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="T")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:01:00
2 2000-01-01 00:02:00
dtype: datetime64[ns]
>>> datetime_series.dt.minute
0 0
1 1
2 2
dtype: int64
""",
)
second = _field_accessor(
"second",
"s",
"""
The seconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="s")
... )
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
dtype: datetime64[ns]
>>> datetime_series.dt.second
0 0
1 1
2 2
dtype: int64
""",
)
microsecond = _field_accessor(
"microsecond",
"us",
"""
The microseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="us")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000
1 2000-01-01 00:00:00.000001
2 2000-01-01 00:00:00.000002
dtype: datetime64[ns]
>>> datetime_series.dt.microsecond
0 0
1 1
2 2
dtype: int64
""",
)
nanosecond = _field_accessor(
"nanosecond",
"ns",
"""
The nanoseconds of the datetime.
Examples
--------
>>> datetime_series = pd.Series(
... pd.date_range("2000-01-01", periods=3, freq="ns")
... )
>>> datetime_series
0 2000-01-01 00:00:00.000000000
1 2000-01-01 00:00:00.000000001
2 2000-01-01 00:00:00.000000002
dtype: datetime64[ns]
>>> datetime_series.dt.nanosecond
0 0
1 1
2 2
dtype: int64
""",
)
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
Returns
-------
Series or Index
Containing integers indicating the day number.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Examples
--------
>>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
Freq: D, dtype: int64
"""
day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc)
dayofweek = day_of_week
weekday = day_of_week
day_of_year = _field_accessor(
"dayofyear",
"doy",
"""
The ordinal day of the year.
""",
)
dayofyear = day_of_year
quarter = _field_accessor(
"quarter",
"q",
"""
The quarter of the date.
""",
)
days_in_month = _field_accessor(
"days_in_month",
"dim",
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
_is_month_doc = """
Indicates whether the date is the {first_or_last} day of the month.
Returns
-------
Series or array
For Series, returns a Series with boolean values.
For DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_start
array([False, False, True])
>>> idx.is_month_end
array([False, True, False])
"""
is_month_start = _field_accessor(
"is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first")
)
is_month_end = _field_accessor(
"is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last")
)
is_quarter_start = _field_accessor(
"is_quarter_start",
"is_quarter_start",
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""",
)
is_quarter_end = _field_accessor(
"is_quarter_end",
"is_quarter_end",
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""",
)
is_year_start = _field_accessor(
"is_year_start",
"is_year_start",
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_start
array([False, False, True])
""",
)
is_year_end = _field_accessor(
"is_year_end",
"is_year_end",
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""",
)
is_leap_year = _field_accessor(
"is_leap_year",
"is_leap_year",
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[ns]', freq='A-DEC')
>>> idx.is_leap_year
array([ True, False, False])
>>> dates_series = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""",
)
def to_julian_date(self):
"""
Convert Datetime Array to float64 ndarray of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
https://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (
day
+ np.fix((153 * month - 457) / 5)
+ 365 * year
+ np.floor(year / 4)
- np.floor(year / 100)
+ np.floor(year / 400)
+ 1_721_118.5
+ (
self.hour
+ self.minute / 60.0
+ self.second / 3600.0
+ self.microsecond / 3600.0 / 1e6
+ self.nanosecond / 3600.0 / 1e9
)
/ 24.0
)
# -----------------------------------------------------------------
# Reductions
def std(
self,
axis=None,
dtype=None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
):
# Because std is translation-invariant, we can get self.std
# by calculating (self - Timestamp(0)).std, and we can do it
# without creating a copy by using a view on self._ndarray
from pandas.core.arrays import TimedeltaArray
tda = TimedeltaArray(self._ndarray.view("i8"))
return tda.std(
axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna
)
# -------------------------------------------------------------------
# Constructor Helpers
def sequence_to_dt64ns(
data,
dtype=None,
copy=False,
tz=None,
dayfirst=False,
yearfirst=False,
ambiguous="raise",
):
"""
Parameters
----------
data : list-like
dtype : dtype, str, or None, default None
copy : bool, default False
tz : tzinfo, str, or None, default None
dayfirst : bool, default False
yearfirst : bool, default False
ambiguous : str, bool, or arraylike, default 'raise'
See pandas._libs.tslibs.tzconversion.tz_localize_to_utc.
Returns
-------
result : numpy.ndarray
The sequence converted to a numpy array with dtype ``datetime64[ns]``.
tz : tzinfo or None
Either the user-provided tzinfo or one inferred from the data.
inferred_freq : Tick or None
The inferred frequency of the sequence.
Raises
------
TypeError : PeriodDType data is passed
"""
inferred_freq = None
dtype = _validate_dt64_dtype(dtype)
tz = timezones.maybe_get_tz(tz)
if not hasattr(data, "dtype"):
# e.g. list, tuple
if np.ndim(data) == 0:
# i.e. generator
data = list(data)
data = np.asarray(data)
copy = False
elif isinstance(data, ABCSeries):
data = data._values
if isinstance(data, ABCPandasArray):
data = data.to_numpy()
if hasattr(data, "freq"):
# i.e. DatetimeArray/Index
inferred_freq = data.freq
# if dtype has an embedded tz, capture it
tz = validate_tz_from_dtype(dtype, tz)
if isinstance(data, ABCIndexClass):
if data.nlevels > 1:
# Without this check, data._data below is None
raise TypeError("Cannot create a DatetimeArray from a MultiIndex.")
data = data._data
# By this point we are assured to have either a numpy array or Index
data, copy = maybe_convert_dtype(data, copy)
data_dtype = getattr(data, "dtype", None)
if (
is_object_dtype(data_dtype)
or is_string_dtype(data_dtype)
or is_sparse(data_dtype)
):
# TODO: We do not have tests specific to string-dtypes,
# also complex or categorical or other extension
copy = False
if lib.infer_dtype(data, skipna=False) == "integer":
data = data.astype(np.int64)
else:
# data comes back here as either i8 to denote UTC timestamps
# or M8[ns] to denote wall times
data, inferred_tz = objects_to_datetime64ns(
data, dayfirst=dayfirst, yearfirst=yearfirst
)
if tz and inferred_tz:
# two timezones: convert to intended from base UTC repr
data = tzconversion.tz_convert_from_utc(data.view("i8"), tz)
data = data.view(DT64NS_DTYPE)
elif inferred_tz:
tz = inferred_tz
data_dtype = data.dtype
# `data` may have originally been a Categorical[datetime64[ns, tz]],
# so we need to handle these types.
if is_datetime64tz_dtype(data_dtype):
# DatetimeArray -> ndarray
tz = _maybe_infer_tz(tz, data.tz)
result = data._data
elif is_datetime64_dtype(data_dtype):
# tz-naive DatetimeArray or ndarray[datetime64]
data = getattr(data, "_data", data)
if data.dtype != DT64NS_DTYPE:
data = conversion.ensure_datetime64ns(data)
if tz is not None:
# Convert tz-naive to UTC
tz = timezones.maybe_get_tz(tz)
data = tzconversion.tz_localize_to_utc(
data.view("i8"), tz, ambiguous=ambiguous
)
data = data.view(DT64NS_DTYPE)
assert data.dtype == DT64NS_DTYPE, data.dtype
result = data
else:
# must be integer dtype otherwise
# assume this data are epoch timestamps
if tz:
tz = timezones.maybe_get_tz(tz)
if data.dtype != INT64_DTYPE:
data = data.astype(np.int64, copy=False)
result = data.view(DT64NS_DTYPE)
if copy:
# TODO: should this be deepcopy?
result = result.copy()
assert isinstance(result, np.ndarray), type(result)
assert result.dtype == "M8[ns]", result.dtype
# We have to call this again after possibly inferring a tz above
validate_tz_from_dtype(dtype, tz)
return result, tz, inferred_freq
def objects_to_datetime64ns(
data,
dayfirst,
yearfirst,
utc=False,
errors="raise",
require_iso8601=False,
allow_object=False,
):
"""
Convert data to array of timestamps.
Parameters
----------
data : np.ndarray[object]
dayfirst : bool
yearfirst : bool
utc : bool, default False
Whether to convert timezone-aware timestamps to UTC.
errors : {'raise', 'ignore', 'coerce'}
require_iso8601 : bool, default False
allow_object : bool
Whether to return an object-dtype ndarray instead of raising if the
data contains more than one timezone.
Returns
-------
result : ndarray
np.int64 dtype if returned values represent UTC timestamps
np.datetime64[ns] if returned values represent wall times
object if mixed timezones
inferred_tz : tzinfo or None
Raises
------
ValueError : if data cannot be converted to datetimes
"""
assert errors in ["raise", "ignore", "coerce"]
# if str-dtype, convert
data = np.array(data, copy=False, dtype=np.object_)
try:
result, tz_parsed = tslib.array_to_datetime(
data,
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
require_iso8601=require_iso8601,
)
except ValueError as e:
try:
values, tz_parsed = conversion.datetime_to_datetime64(data)
# If tzaware, these values represent unix timestamps, so we
# return them as i8 to distinguish from wall times
return values.view("i8"), tz_parsed
except (ValueError, TypeError):
raise e
if tz_parsed is not None:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
# Return i8 values to denote unix timestamps
return result.view("i8"), tz_parsed
elif is_datetime64_dtype(result):
# returning M8[ns] denotes wall-times; since tz is None
# the distinction is a thin one
return result, tz_parsed
elif is_object_dtype(result):
# GH#23675 when called via `pd.to_datetime`, returning an object-dtype
# array is allowed. When called via `pd.DatetimeIndex`, we can
# only accept datetime64 dtype, so raise TypeError if object-dtype
# is returned, as that indicates the values can be recognized as
# datetimes but they have conflicting timezones/awareness
if allow_object:
return result, tz_parsed
raise TypeError(result)
else: # pragma: no cover
# GH#23675 this TypeError should never be hit, whereas the TypeError
# in the object-dtype branch above is reachable.
raise TypeError(result)
def maybe_convert_dtype(data, copy):
"""
Convert data based on dtype conventions, issuing deprecation warnings
or errors where appropriate.
Parameters
----------
data : np.ndarray or pd.Index
copy : bool
Returns
-------
data : np.ndarray or pd.Index
copy : bool
Raises
------
TypeError : PeriodDType data is passed
"""
if not hasattr(data, "dtype"):
# e.g. collections.deque
return data, copy
if is_float_dtype(data.dtype):
# Note: we must cast to datetime64[ns] here in order to treat these
# as wall-times instead of UTC timestamps.
data = data.astype(DT64NS_DTYPE)
copy = False
# TODO: deprecate this behavior to instead treat symmetrically
# with integer dtypes. See discussion in GH#23675
elif is_timedelta64_dtype(data.dtype) or is_bool_dtype(data.dtype):
# GH#29794 enforcing deprecation introduced in GH#23539
raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]")
elif is_period_dtype(data.dtype):
# Note: without explicitly raising here, PeriodIndex
# test_setops.test_join_does_not_recur fails
raise TypeError(
"Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead"
)
elif is_categorical_dtype(data.dtype):
# GH#18664 preserve tz in going DTI->Categorical->DTI
# TODO: cases where we need to do another pass through this func,
# e.g. the categories are timedelta64s
data = data.categories.take(data.codes, fill_value=NaT)._values
copy = False
elif is_extension_array_dtype(data.dtype) and not is_datetime64tz_dtype(data.dtype):
# Includes categorical
# TODO: We have no tests for these
data = np.array(data, dtype=np.object_)
copy = False
return data, copy
# -------------------------------------------------------------------
# Validation and Inference
def _maybe_infer_tz(
tz: Optional[tzinfo], inferred_tz: Optional[tzinfo]
) -> Optional[tzinfo]:
"""
If a timezone is inferred from data, check that it is compatible with
the user-provided timezone, if any.
Parameters
----------
tz : tzinfo or None
inferred_tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if both timezones are present but do not match
"""
if tz is None:
tz = inferred_tz
elif inferred_tz is None:
pass
elif not timezones.tz_compare(tz, inferred_tz):
raise TypeError(
f"data is already tz-aware {inferred_tz}, unable to "
f"set specified tz: {tz}"
)
return tz
def _validate_dt64_dtype(dtype):
"""
Check that a dtype, if passed, represents either a numpy datetime64[ns]
dtype or a pandas DatetimeTZDtype.
Parameters
----------
dtype : object
Returns
-------
dtype : None, numpy.dtype, or DatetimeTZDtype
Raises
------
ValueError : invalid dtype
Notes
-----
Unlike validate_tz_from_dtype, this does _not_ allow non-existent
tz errors to go through
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, np.dtype("M8")):
# no precision, disallowed GH#24806
msg = (
"Passing in 'datetime64' dtype with no precision is not allowed. "
"Please pass in 'datetime64[ns]' instead."
)
raise ValueError(msg)
if (isinstance(dtype, np.dtype) and dtype != DT64NS_DTYPE) or not isinstance(
dtype, (np.dtype, DatetimeTZDtype)
):
raise ValueError(
f"Unexpected value for 'dtype': '{dtype}'. "
"Must be 'datetime64[ns]' or DatetimeTZDtype'."
)
return dtype
def validate_tz_from_dtype(dtype, tz: Optional[tzinfo]) -> Optional[tzinfo]:
"""
If the given dtype is a DatetimeTZDtype, extract the implied
tzinfo object from it and check that it does not conflict with the given
tz.
Parameters
----------
dtype : dtype, str
tz : None, tzinfo
Returns
-------
tz : consensus tzinfo
Raises
------
ValueError : on tzinfo mismatch
"""
if dtype is not None:
if isinstance(dtype, str):
try:
dtype = DatetimeTZDtype.construct_from_string(dtype)
except TypeError:
# Things like `datetime64[ns]`, which is OK for the
# constructors, but also nonsense, which should be validated
# but not by us. We *do* allow non-existent tz errors to
# go through
pass
dtz = getattr(dtype, "tz", None)
if dtz is not None:
if tz is not None and not timezones.tz_compare(tz, dtz):
raise ValueError("cannot supply both a tz and a dtype with a tz")
tz = dtz
if tz is not None and is_datetime64_dtype(dtype):
# We also need to check for the case where the user passed a
# tz-naive dtype (i.e. datetime64[ns])
if tz is not None and not timezones.tz_compare(tz, dtz):
raise ValueError(
"cannot supply both a tz and a "
"timezone-naive dtype (i.e. datetime64[ns])"
)
return tz
def _infer_tz_from_endpoints(
start: Timestamp, end: Timestamp, tz: Optional[tzinfo]
) -> Optional[tzinfo]:
"""
If a timezone is not explicitly given via `tz`, see if one can
be inferred from the `start` and `end` endpoints. If more than one
of these inputs provides a timezone, require that they all agree.
Parameters
----------
start : Timestamp
end : Timestamp
tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if start and end timezones do not agree
"""
try:
inferred_tz = timezones.infer_tzinfo(start, end)
except AssertionError as err:
# infer_tzinfo raises AssertionError if passed mismatched timezones
raise TypeError(
"Start and end cannot both be tz-aware with different timezones"
) from err
inferred_tz = timezones.maybe_get_tz(inferred_tz)
tz = timezones.maybe_get_tz(tz)
if tz is not None and inferred_tz is not None:
if not timezones.tz_compare(inferred_tz, tz):
raise AssertionError("Inferred time zone not equal to passed time zone")
elif inferred_tz is not None:
tz = inferred_tz
return tz
def _maybe_normalize_endpoints(
start: Optional[Timestamp], end: Optional[Timestamp], normalize: bool
):
_normalized = True
if start is not None:
if normalize:
start = start.normalize()
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = end.normalize()
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
return start, end, _normalized
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz, ambiguous, nonexistent):
"""
Localize a start or end Timestamp to the timezone of the corresponding
start or end Timestamp
Parameters
----------
ts : start or end Timestamp to potentially localize
is_none : argument that should be None
is_not_none : argument that should not be None
freq : Tick, DateOffset, or None
tz : str, timezone object or None
ambiguous: str, localization behavior for ambiguous times
nonexistent: str, localization behavior for nonexistent times
Returns
-------
ts : Timestamp
"""
# Make sure start and end are timezone localized if:
# 1) freq = a Timedelta-like frequency (Tick)
# 2) freq = None i.e. generating a linspaced range
if is_none is None and is_not_none is not None:
# Note: We can't ambiguous='infer' a singular ambiguous time; however,
# we have historically defaulted ambiguous=False
ambiguous = ambiguous if ambiguous != "infer" else False
localize_args = {"ambiguous": ambiguous, "nonexistent": nonexistent, "tz": None}
if isinstance(freq, Tick) or freq is None:
localize_args["tz"] = tz
ts = ts.tz_localize(**localize_args)
return ts
def generate_range(start=None, end=None, periods=None, offset=BDay()):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments.
Parameters
----------
start : datetime, (default None)
end : datetime, (default None)
periods : int, (default None)
offset : DateOffset, (default BDay())
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
Returns
-------
dates : generator object
"""
offset = to_offset(offset)
start = Timestamp(start)
start = start if start is not NaT else None
end = Timestamp(end)
end = end if end is not NaT else None
if start and not offset.is_on_offset(start):
start = offset.rollforward(start)
elif end and not offset.is_on_offset(end):
end = offset.rollback(end)
if periods is None and end < start and offset.n >= 0:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
if cur == end:
# GH#24252 avoid overflows by not performing the addition
# in offset.apply unless we have to
break
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError(f"Offset {offset} did not increment date")
cur = next_date
else:
while cur >= end:
yield cur
if cur == end:
# GH#24252 avoid overflows by not performing the addition
# in offset.apply unless we have to
break
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
raise ValueError(f"Offset {offset} did not decrement date")
cur = next_date
| iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/core/arrays/datetimes.py | Python | gpl-2.0 | 79,761 |
# -*- coding: utf-8 -*-
"""Top-level display functions for displaying object in different formats."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import json
import mimetypes
import os
import struct
import warnings
from IPython.core.formatters import _safe_get_formatter_method
from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
unicode_type)
from IPython.testing.skipdoctest import skip_doctest
__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'JSON', 'Javascript',
'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
'publish_display_data']
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
def publish_display_data(data, metadata=None, source=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
source : str, deprecated
Unused.
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.instance().display_pub.publish(
data=data,
metadata=metadata,
)
def display(*objs, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
"""
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
if not raw:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
if raw:
publish_display_data(data=obj, metadata=metadata)
else:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if not format_dict:
# nothing to display (e.g. _ipython_display_ took over)
continue
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data(data=format_dict, metadata=md_dict)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_markdown(*objs, **kwargs):
"""Displays the Markdown representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw markdown data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/markdown', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
def display_pdf(*objs, **kwargs):
"""Display the PDF representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/pdf', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
_show_mem_addr = False
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, string_types):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = None if filename is None else unicode_type(filename)
self.reload()
self._check_data()
def __repr__(self):
if not self._show_mem_addr:
cls = self.__class__
r = "<%s.%s object>" % (cls.__module__, cls.__name__)
else:
r = super(DisplayObject, self).__repr__()
return r
def _check_data(self):
"""Override in subclasses if there's something to check."""
pass
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
try:
from urllib.request import urlopen # Py3
except ImportError:
from urllib2 import urlopen
response = urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class TextDisplayObject(DisplayObject):
"""Validate that display data is text"""
def _check_data(self):
if self.data is not None and not isinstance(self.data, string_types):
raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
class Pretty(TextDisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(TextDisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Markdown(TextDisplayObject):
def _repr_markdown_(self):
return self.data
class Math(TextDisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(TextDisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
svg = cast_bytes_py2(svg)
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(DisplayObject):
"""JSON expects a JSON-able dict or list
not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict or list containers.
"""
# wrap data in a property, which warns about passing already-serialized JSON
_data = None
def _check_data(self):
if self.data is not None and not isinstance(self.data, (dict, list)):
raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if isinstance(data, string_types):
warnings.warn("JSON expects JSONable dict or list, not JSON strings")
data = json.loads(data)
self._data = data
def _repr_json_(self):
return self.data
css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
lib_t1 = """$.getScript("%s", function () {
"""
lib_t2 = """});
"""
class Javascript(TextDisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. Content appended to `element` will be
visible in the output area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, string_types):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, string_types):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += css_t % c
for l in self.lib:
r += lib_t1 % l
r += self.data
r += lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=u'png',
embed=None, width=None, height=None, retina=False,
unconfined=False, metadata=None):
"""Create a PNG/JPEG image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width to which to constrain the image in html
height : int
Height to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
unconfined: bool
Set unconfined=True to disable max-width confinement of the image.
metadata: dict
Specify extra metadata to attach to the image.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, string_types) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if ext is not None:
format = ext.lower()
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
elif isinstance(data, bytes) and format == 'png':
# infer image type from image data header,
# only if format might not have been specified.
if data[:2] == _JPEG:
format = 'jpeg'
self.format = unicode_type(format).lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
self.unconfined = unconfined
self.metadata = metadata
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = klass = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
if self.unconfined:
klass = ' class="unconfined"'
return u'<img src="{url}"{width}{height}{klass}/>'.format(
url=self.url,
width=width,
height=height,
klass=klass,
)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if self.unconfined:
md['unconfined'] = self.unconfined
if self.metadata:
md.update(self.metadata)
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return unicode_type(s.split('.')[-1].lower())
class Video(DisplayObject):
def __init__(self, data=None, url=None, filename=None, embed=None, mimetype=None):
"""Create a video object given raw data or an URL.
When this object is returned by an input cell or passed to the
display function, it will result in the video being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Videos from a file are always embedded.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
mimetype: unicode
Specify the mimetype in case you load in a encoded video.
Examples
--------
Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
Video('path/to/video.mp4')
Video('path/to/video.mp4', embed=False)
"""
if url is None and (data.startswith('http') or data.startswith('https')):
url = data
data = None
embed = False
elif os.path.exists(data):
filename = data
data = None
self.mimetype = mimetype
self.embed = embed if embed is not None else (filename is not None)
super(Video, self).__init__(data=data, url=url, filename=filename)
def _repr_html_(self):
# External URLs and potentially local files are not embedded into the
# notebook output.
if not self.embed:
url = self.url if self.url is not None else self.filename
output = """<video src="{0}" controls>
Your browser does not support the <code>video</code> element.
</video>""".format(url)
return output
# Embedded videos uses base64 encoded videos.
if self.filename is not None:
mimetypes.init()
mimetype, encoding = mimetypes.guess_type(self.filename)
video = open(self.filename, 'rb').read()
video_encoded = video.encode('base64')
else:
video_encoded = self.data
mimetype = self.mimetype
output = """<video controls>
<source src="data:{0};base64,{1}" type="{0}">
Your browser does not support the video tag.
</video>""".format(mimetype, video_encoded)
return output
def reload(self):
# TODO
pass
def _repr_png_(self):
# TODO
pass
def _repr_jpeg_(self):
# TODO
pass
def clear_output(wait=False):
"""Clear the output of the current cell receiving output.
Parameters
----------
wait : bool [default: false]
Wait to clear the output until new output is available to replace it."""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(wait)
else:
from IPython.utils import io
print('\033[2K\r', file=io.stdout, end='')
io.stdout.flush()
print('\033[2K\r', file=io.stderr, end='')
io.stderr.flush()
@skip_doctest
def set_matplotlib_formats(*formats, **kwargs):
"""Select figure formats for the inline backend. Optionally pass quality for JPEG.
For example, this enables PNG and JPEG output with a JPEG quality of 90%::
In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
To set this in your config files use the following::
c.InlineBackend.figure_formats = {'png', 'jpeg'}
c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
Parameters
----------
*formats : strs
One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs :
Keyword args will be relayed to ``figure.canvas.print_figure``.
"""
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import select_figure_formats
from IPython.kernel.zmq.pylab.config import InlineBackend
# build kwargs, starting with InlineBackend config
kw = {}
cfg = InlineBackend.instance()
kw.update(cfg.print_figure_kwargs)
kw.update(**kwargs)
shell = InteractiveShell.instance()
select_figure_formats(shell, formats, **kw)
@skip_doctest
def set_matplotlib_close(close=True):
"""Set whether the inline backend closes all figures automatically or not.
By default, the inline backend used in the IPython Notebook will close all
matplotlib figures automatically after each cell is run. This means that
plots in different cells won't interfere. Sometimes, you may want to make
a plot in one cell and then refine it in later cells. This can be accomplished
by::
In [1]: set_matplotlib_close(False)
To set this in your config files use the following::
c.InlineBackend.close_figures = False
Parameters
----------
close : bool
Should all matplotlib figures be automatically closed after each cell is
run?
"""
from IPython.kernel.zmq.pylab.config import InlineBackend
cfg = InlineBackend.instance()
cfg.close_figures = close
| wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/IPython/core/display.py | Python | mit | 33,202 |
#!/usr/bin/env python3
def count_consonants(string):
""" Function which returns the count of
all consonants in the string \"string\" """
consonants = "bcdfghjklmnpqrstvwxz"
counter = 0
if string:
for ch in string.lower():
if ch in consonants:
counter += 1
return counter
if __name__ == "__main__":
strng = "MOnty PYthon!"
print ("%s contains %d consonants" % (strng, count_consonants(strng)))
| sevgo/Programming101 | week1/warmups/count_consonants.py | Python | bsd-3-clause | 468 |
#!/usr/bin/env python
from ucgrad import zeros, array, arange, write_matrix
import timeit
#from pylab import plot, show
def best(run, setup, n):
best = 1e20
for i in range(n):
t = timeit.timeit(run, setup, number=1)
if t < best:
best = t
return best
trans = [(1,2,0,3), (0,3,2,1)]
setup = """
from numpy import zeros
N = %d
a = zeros((N, N, N, N))
"""
n = array(range(4, 64, 4) + range(64, 128, 8) + range(128, 256, 16))
#n = arange(4,5)
M = zeros((len(n), 1+len(trans)))
M[:,0] = n
for i, tr in enumerate(trans):
t = array([best('a.transpose(%s).reshape((N*N,N*N))'%repr(tr), setup%N, 10)
for N in n])
print i, t
#plot(n, t)
#show()
M[:,i+1] = t
write_matrix(M, "trans.dat")
| frobnitzem/slack | examples/timings/ttr.py | Python | gpl-3.0 | 760 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.import string
import string
import random
import numpy as np
"""
function for calculating the convergence of an x, y data set
main api:
test_conv(xs, ys, name, tol)
tries to fit multiple functions to the x, y data
calculates which function fits best
for tol < 0
returns the x value for which y is converged within tol of the assymtotic value
for tol > 0
returns the x_value for which dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists
for the best fit a gnuplot line is printed plotting the data, the function and the assymthotic value
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "[email protected]"
__date__ = "June 2014"
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class SplineInputError(Exception):
def __init__(self, msg):
self.msg = msg
def get_derivatives(xs, ys, fd=False):
"""
return the derivatives of y(x) at the points x
if scipy is available a spline is generated to calculate the derivatives
if scipy is not available the left and right slopes are calculated, if both exist the average is returned
putting fd to zero always returns the finite difference slopes
"""
try:
if fd:
raise SplineInputError('no spline wanted')
if len(xs) < 4:
er = SplineInputError('too few data points')
raise er
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(xs, ys)
d = spline.derivative(1)(xs)
except (ImportError, SplineInputError):
d = []
m, left, right = 0, 0, 0
for n in range(0, len(xs), 1):
try:
left = (ys[n] - ys[n-1]) / (xs[n] - xs[n-1])
m += 1
except IndexError:
pass
try:
right = (ys[n+1] - ys[n]) / (xs[n+1] - xs[n])
m += 1
except IndexError:
pass
d.append(left + right / m)
return d
"""
functions used in the fitting procedure, with initial guesses
"""
def print_and_raise_error(xs, ys, name):
print('Index error in', name)
print('ys: ', ys)
print('xs: ', xs)
raise RuntimeError
def reciprocal(x, a, b, n):
"""
reciprocal function to the power n to fit convergence data
"""
if n < 1:
n = 1
elif n > 5:
n = 5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** n)
y = np.array(y_l)
else:
y = a + b / x ** n
return y
def p0_reciprocal(xs, ys):
"""
predictor for first guess for reciprocal
"""
a0 = ys[len(ys) - 1]
b0 = ys[0]*xs[0] - a0*xs[0]
return [a0, b0, 1]
def exponential(x, a, b, n):
"""
exponential function base n to fit convergence data
"""
if n < 1.000001:
n = 1.000001
elif n > 1.2:
n = 1.2
if b < -10:
b = -10
elif b > 10:
b = 10
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b * n ** -x_v)
y = np.array(y_l)
else:
y = a + b * n ** -x
return y
def p0_exponential(xs, ys):
n0 = 1.005
b0 = (n0 ** -xs[-1] - n0 ** -xs[1]) / (ys[-1] - ys[1])
a0 = ys[1] - b0 * n0 ** -xs[1]
#a0 = ys[-1]
#b0 = (ys[0] - a0) / n0 ** xs[0]
return [a0, b0, n0]
def single_reciprocal(x, a, b, c):
"""
reciprocal function to fit convergence data
"""
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / (x_v - c))
y = np.array(y_l)
else:
y = a + b / (x - c)
return y
def p0_single_reciprocal(xs, ys):
c = 1
b = (1/(xs[-1] - c)-1/(xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b, c]
def simple_reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v)
y = np.array(y_l)
else:
y = a + b / x
return y
def p0_simple_reciprocal(xs, ys):
#b = (ys[-1] - ys[1]) / (1/xs[-1] - 1/xs[1])
#a = ys[1] - b / xs[1]
b = (ys[-1] - ys[-2]) / (1/(xs[-1]) - 1/(xs[-2]))
a = ys[-2] - b / (xs[-2])
return [a, b]
def simple_2reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
c = 2
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_2reciprocal(xs, ys):
c = 2
b = (ys[-1] - ys[1]) / (1/xs[-1]**c - 1/xs[1]**c)
a = ys[1] - b / xs[1]**c
return [a, b]
def simple_4reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
c = 4
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_4reciprocal(xs, ys):
c = 4
b = (ys[-1] - ys[1]) / (1/xs[-1]**c - 1/xs[1]**c)
a = ys[1] - b / xs[1]**c
return [a, b]
def simple_5reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
c = 0.5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_5reciprocal(xs, ys):
c = 0.5
b = (ys[-1] - ys[1]) / (1/xs[-1]**c - 1/xs[1]**c)
a = ys[1] - b / xs[1]**c
return [a, b]
def extrapolate_simple_reciprocal(xs, ys):
b = (ys[-2] - ys[-1]) / (1/(xs[-2]) - 1/(xs[-1]))
a = ys[-1] - b / (xs[-1])
return [a, b]
def extrapolate_reciprocal(xs, ys, n, noise):
"""
return the parameters such that a + b / x^n hits the last two data points
"""
if len(xs) > 4 and noise:
y1 = (ys[-3] + ys[-4]) / 2
y2 = (ys[-1] + ys[-2]) / 2
x1 = (xs[-3] + xs[-4]) / 2
x2 = (xs[-1] + xs[-2]) / 2
try:
b = (y1 - y2) / (1/x1**n - 1/x2**n)
a = y2 - b / x2**n
except IndexError:
print_and_raise_error(xs, ys, 'extrapolate_reciprocal')
else:
try:
b = (ys[-2] - ys[-1]) / (1/(xs[-2])**n - 1/(xs[-1])**n)
a = ys[-1] - b / (xs[-1])**n
except IndexError:
print_and_raise_error(xs, ys, 'extrapolate_reciprocal')
return [a, b, n]
def measure(function, xs, ys, popt, weights):
"""
measure the quality of a fit
"""
m = 0
n = 0
for x in xs:
try:
if len(popt) == 2:
m += (ys[n] - function(x, popt[0], popt[1]))**2 * weights[n]
elif len(popt) == 3:
m += (ys[n] - function(x, popt[0], popt[1], popt[2]))**2 * weights[n]
else:
raise NotImplementedError
n += 1
except IndexError:
raise RuntimeError('y does not exist for x = ', x, ' this should not happen')
return m
def get_weights(xs, ys, mode=2):
ds = get_derivatives(xs, ys, fd=True)
if mode == 1:
mind = np.inf
for d in ds:
mind = min(abs(d), mind)
weights = []
for d in ds:
weights.append(abs((mind / d)))
if mode == 2:
maxxs = max(xs)**2
weights = []
for x in xs:
weights.append(x**2 / maxxs)
else:
weights = [1] * len(xs)
return weights
def multi_curve_fit(xs, ys, verbose):
"""
fit multiple functions to the x, y data, return the best fit
"""
#functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}
functions = {
exponential: p0_exponential,
reciprocal: p0_reciprocal,
#single_reciprocal: p0_single_reciprocal,
simple_reciprocal: p0_simple_reciprocal,
simple_2reciprocal: p0_simple_2reciprocal,
simple_4reciprocal: p0_simple_4reciprocal,
simple_5reciprocal: p0_simple_5reciprocal
}
from scipy.optimize import curve_fit
fit_results = {}
best = ['', np.inf]
for function in functions:
try:
weights = get_weights(xs, ys)
popt, pcov = curve_fit(function, xs, ys, functions[function](xs, ys), maxfev=8000, sigma=weights)
pcov = []
m = measure(function, xs, ys, popt, weights)
fit_results.update({function: {'measure': m, 'popt': popt, 'pcov': pcov}})
for f in fit_results:
if fit_results[f]['measure'] <= best[1]:
best = f, fit_results[f]['measure']
if verbose:
print(str(function), m)
except RuntimeError:
print('no fit found for ', function)
return fit_results[best[0]]['popt'], fit_results[best[0]]['pcov'], best
def multi_reciprocal_extra(xs, ys, noise=False):
"""
Calculates for a series of powers ns the parameters for which the last two points are at the curve.
With these parameters measure how well the other data points fit.
return the best fit.
"""
ns = np.linspace(0.5, 6.0, num=56)
best = ['', np.inf]
fit_results = {}
weights = get_weights(xs, ys)
for n in ns:
popt = extrapolate_reciprocal(xs, ys, n, noise)
m = measure(reciprocal, xs, ys, popt, weights)
pcov = []
fit_results.update({n: {'measure': m, 'popt': popt, 'pcov': pcov}})
for n in fit_results:
if fit_results[n]['measure'] <= best[1]:
best = reciprocal, fit_results[n]['measure'], n
return fit_results[best[2]]['popt'], fit_results[best[2]]['pcov'], best
def print_plot_line(function, popt, xs, ys, name, tol=0.05, extra=''):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
f = open('convdat.'+str(idp), mode='w')
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + ' ' + str(ys[n]) + '\n')
f.close()
tol = abs(tol)
line = "plot 'convdat.%s' pointsize 4 lt 0, " % idp
line += '%s lt 3, %s lt 4, %s lt 4, ' % (popt[0], popt[0] - tol, popt[0] + tol)
if function is exponential:
line += "%s + %s * %s ** -x" % (popt[0], popt[1], min(max(1.00001, popt[2]), 1.2))
elif function is reciprocal:
line += "%s + %s / x**%s" % (popt[0], popt[1], min(max(0.5, popt[2]), 6))
elif function is single_reciprocal:
line += "%s + %s / (x - %s)" % (popt[0], popt[1], popt[2])
elif function is simple_reciprocal:
line += "%s + %s / x" % (popt[0], popt[1])
elif function is simple_2reciprocal:
line += "%s + %s / x**2" % (popt[0], popt[1])
elif function is simple_4reciprocal:
line += "%s + %s / x**4" % (popt[0], popt[1])
elif function is simple_5reciprocal:
line += "%s + %s / x**0.5" % (popt[0], popt[1])
else:
print(function, ' no plot ')
with open('plot-fits', mode='a') as f:
f.write('set title "' + name + ' - ' + extra + '"\n')
f.write("set output '" + name + '-' + idp + ".gif'" + '\n')
f.write("set yrange [" + str(popt[0] - 5 * tol) + ':' + str(popt[0] + 5 * tol)+']\n')
f.write(line + '\n')
f.write('pause -1 \n')
def determine_convergence(xs, ys, name, tol=0.0001, extra='', verbose=False, mode='extra', plots=True):
"""
test it and at which x_value dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists.
"""
if len(xs) != len(ys):
raise RuntimeError('the range of x and y are not equal')
conv = False
x_value = float('inf')
y_value = None
n_value = None
popt = [None, None, None]
if len(xs) > 2:
ds = get_derivatives(xs[0:len(ys)], ys)
try:
if None not in ys:
if mode == 'fit':
popt, pcov, func = multi_curve_fit(xs, ys, verbose)
elif mode == 'extra':
res = multi_reciprocal_extra(xs, ys)
if res is not None:
popt, pcov, func = multi_reciprocal_extra(xs, ys)
else:
print(xs, ys)
popt, pcov = None, None
elif mode == 'extra_noise':
popt, pcov, func = multi_reciprocal_extra(xs, ys, noise=True)
else:
raise NotImplementedError('unknown mode for test conv')
if func[1] > abs(tol):
print('warning function ', func[0], ' as the best fit but not a good fit: ', func[1])
# todo print this to file via a method in helper, as dict
if plots:
with open(name+'.fitdat', mode='a') as f:
f.write('{')
f.write('"popt": ' + str(popt) + ', ')
f.write('"pcov": ' + str(pcov) + ', ')
f.write('"data": [')
for n in range(0, len(ys), 1):
f.write('[' + str(xs[n]) + ' ' + str(ys[n]) + ']')
f.write(']}\n')
print_plot_line(func[0], popt, xs, ys, name, tol=tol, extra=extra)
except ImportError:
popt, pcov = None, None
for n in range(0, len(ds), 1):
if verbose:
print(n, ys[n])
print(ys)
if tol < 0:
if popt[0] is not None:
test = abs(popt[0] - ys[n])
else:
test = float('inf')
else:
test = abs(ds[n])
if verbose:
print(test)
if test < abs(tol):
if verbose:
print('converged')
conv = True
if xs[n] < x_value:
x_value = xs[n]
y_value = ys[n]
n_value = n
else:
if verbose:
print('not converged')
conv = False
x_value = float('inf')
if n_value is None:
return [conv, x_value, y_value, n_value, popt[0], None]
else:
return [conv, x_value, y_value, n_value, popt[0], ds[n_value]]
else:
return [conv, x_value, y_value, n_value, popt[0], None]
| dongsenfo/pymatgen | pymatgen/util/convergence.py | Python | mit | 14,788 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20150606_0342'),
]
operations = [
migrations.AlterField(
model_name='element',
name='answer',
field=models.TextField(null=True, blank=True),
),
migrations.AlterField(
model_name='element',
name='choices',
field=models.TextField(null=True, blank=True),
),
migrations.AlterField(
model_name='element',
name='concept',
field=models.TextField(null=True, blank=True),
),
migrations.AlterField(
model_name='element',
name='eid',
field=models.CharField(max_length=255, null=True, blank=True),
),
migrations.AlterField(
model_name='element',
name='numeric',
field=models.CharField(max_length=255, null=True, blank=True),
),
migrations.AlterField(
model_name='element',
name='question',
field=models.TextField(null=True, blank=True),
),
]
| SanaMobile/sana.protocol_builder | src-django/api/migrations/0004_auto_20150623_1841.py | Python | bsd-3-clause | 1,251 |
"""Tests for the Device Registry."""
import asyncio
from unittest.mock import patch
import asynctest
import pytest
from homeassistant.core import callback
from homeassistant.helpers import device_registry
from tests.common import flush_store, mock_device_registry
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def update_events(hass):
"""Capture update events."""
events = []
@callback
def async_capture(event):
events.append(event.data)
hass.bus.async_listen(device_registry.EVENT_DEVICE_REGISTRY_UPDATED, async_capture)
return events
async def test_get_or_create_returns_same_entry(hass, registry, update_events):
"""Make sure we do not duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
sw_version="sw-version",
name="name",
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "11:22:33:66:77:88")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
assert len(registry.devices) == 1
assert entry.id == entry2.id
assert entry.id == entry3.id
assert entry.identifiers == {("bridgeid", "0123")}
assert entry3.manufacturer == "manufacturer"
assert entry3.model == "model"
assert entry3.name == "name"
assert entry3.sw_version == "sw-version"
await hass.async_block_till_done()
# Only 2 update events. The third entry did not generate any changes.
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry.id
async def test_requirement_for_identifier_or_connection(registry):
"""Make sure we do require some descriptor of device."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers=set(),
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="1234",
connections=set(),
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="1234",
connections=set(),
identifiers=set(),
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 2
assert entry
assert entry2
assert entry3 is None
async def test_multiple_config_entries(registry):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 1
assert entry.id == entry2.id
assert entry.id == entry3.id
assert entry2.config_entries == {"123", "456"}
async def test_loading_from_storage(hass, hass_storage):
"""Test loading stored devices on start."""
hass_storage[device_registry.STORAGE_KEY] = {
"version": device_registry.STORAGE_VERSION,
"data": {
"devices": [
{
"config_entries": ["1234"],
"connections": [["Zigbee", "01.23.45.67.89"]],
"id": "abcdefghijklm",
"identifiers": [["serial", "12:34:56:AB:CD:EF"]],
"manufacturer": "manufacturer",
"model": "model",
"name": "name",
"sw_version": "version",
"area_id": "12345A",
"name_by_user": "Test Friendly Name",
}
]
},
}
registry = await device_registry.async_get_registry(hass)
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("Zigbee", "01.23.45.67.89")},
identifiers={("serial", "12:34:56:AB:CD:EF")},
manufacturer="manufacturer",
model="model",
)
assert entry.id == "abcdefghijklm"
assert entry.area_id == "12345A"
assert entry.name_by_user == "Test Friendly Name"
assert isinstance(entry.config_entries, set)
async def test_removing_config_entries(hass, registry, update_events):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:78:CD:EF:12")},
identifiers={("bridgeid", "4567")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 2
assert entry.id == entry2.id
assert entry.id != entry3.id
assert entry2.config_entries == {"123", "456"}
registry.async_clear_config_entry("123")
entry = registry.async_get_device({("bridgeid", "0123")}, set())
entry3_removed = registry.async_get_device({("bridgeid", "4567")}, set())
assert entry.config_entries == {"456"}
assert entry3_removed is None
await hass.async_block_till_done()
assert len(update_events) == 5
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry2.id
assert update_events[2]["action"] == "create"
assert update_events[2]["device_id"] == entry3.id
assert update_events[3]["action"] == "update"
assert update_events[3]["device_id"] == entry.id
assert update_events[4]["action"] == "remove"
assert update_events[4]["device_id"] == entry3.id
async def test_removing_area_id(registry):
"""Make sure we can clear area id."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry_w_area = registry.async_update_device(entry.id, area_id="12345A")
registry.async_clear_area_id("12345A")
entry_wo_area = registry.async_get_device({("bridgeid", "0123")}, set())
assert not entry_wo_area.area_id
assert entry_w_area != entry_wo_area
async def test_specifying_via_device_create(registry):
"""Test specifying a via_device and updating."""
via = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "0123")},
manufacturer="manufacturer",
model="via",
)
light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert light.via_device_id == via.id
async def test_specifying_via_device_update(registry):
"""Test specifying a via_device and updating."""
light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert light.via_device_id is None
via = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "0123")},
manufacturer="manufacturer",
model="via",
)
light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert light.via_device_id == via.id
async def test_loading_saving_data(hass, registry):
"""Test that we load/save data correctly."""
orig_via = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "0123")},
manufacturer="manufacturer",
model="via",
)
orig_light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert len(registry.devices) == 2
# Now load written data in new registry
registry2 = device_registry.DeviceRegistry(hass)
await flush_store(registry._store)
await registry2.async_load()
# Ensure same order
assert list(registry.devices) == list(registry2.devices)
new_via = registry2.async_get_device({("hue", "0123")}, set())
new_light = registry2.async_get_device({("hue", "456")}, set())
assert orig_via == new_via
assert orig_light == new_light
async def test_no_unnecessary_changes(registry):
"""Make sure we do not consider devices changes."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("ethernet", "12:34:56:78:90:AB:CD:EF")},
identifiers={("hue", "456"), ("bla", "123")},
)
with patch(
"homeassistant.helpers.device_registry.DeviceRegistry.async_schedule_save"
) as mock_save:
entry2 = registry.async_get_or_create(
config_entry_id="1234", identifiers={("hue", "456")}
)
assert entry.id == entry2.id
assert len(mock_save.mock_calls) == 0
async def test_format_mac(registry):
"""Make sure we normalize mac addresses."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for mac in ["123456ABCDEF", "123456abcdef", "12:34:56:ab:cd:ef", "1234.56ab.cdef"]:
test_entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, mac)},
)
assert test_entry.id == entry.id, mac
assert test_entry.connections == {
(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:ab:cd:ef")
}
# This should not raise
for invalid in [
"invalid_mac",
"123456ABCDEFG", # 1 extra char
"12:34:56:ab:cdef", # not enough :
"12:34:56:ab:cd:e:f", # too many :
"1234.56abcdef", # not enough .
"123.456.abc.def", # too many .
]:
invalid_mac_entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, invalid)},
)
assert list(invalid_mac_entry.connections)[0][1] == invalid
async def test_update(registry):
"""Verify that we can update some attributes of a device."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "456"), ("bla", "123")},
)
new_identifiers = {("hue", "654"), ("bla", "321")}
assert not entry.area_id
assert not entry.name_by_user
with patch.object(registry, "async_schedule_save") as mock_save:
updated_entry = registry.async_update_device(
entry.id,
area_id="12345A",
name_by_user="Test Friendly Name",
new_identifiers=new_identifiers,
via_device_id="98765B",
)
assert mock_save.call_count == 1
assert updated_entry != entry
assert updated_entry.area_id == "12345A"
assert updated_entry.name_by_user == "Test Friendly Name"
assert updated_entry.identifiers == new_identifiers
assert updated_entry.via_device_id == "98765B"
async def test_update_remove_config_entries(hass, registry, update_events):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:78:CD:EF:12")},
identifiers={("bridgeid", "4567")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 2
assert entry.id == entry2.id
assert entry.id != entry3.id
assert entry2.config_entries == {"123", "456"}
updated_entry = registry.async_update_device(
entry2.id, remove_config_entry_id="123"
)
removed_entry = registry.async_update_device(
entry3.id, remove_config_entry_id="123"
)
assert updated_entry.config_entries == {"456"}
assert removed_entry is None
removed_entry = registry.async_get_device({("bridgeid", "4567")}, set())
assert removed_entry is None
await hass.async_block_till_done()
assert len(update_events) == 5
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry2.id
assert update_events[2]["action"] == "create"
assert update_events[2]["device_id"] == entry3.id
assert update_events[3]["action"] == "update"
assert update_events[3]["device_id"] == entry.id
assert update_events[4]["action"] == "remove"
assert update_events[4]["device_id"] == entry3.id
async def test_loading_race_condition(hass):
"""Test only one storage load called when concurrent loading occurred ."""
with asynctest.patch(
"homeassistant.helpers.device_registry.DeviceRegistry.async_load"
) as mock_load:
results = await asyncio.gather(
device_registry.async_get_registry(hass),
device_registry.async_get_registry(hass),
)
mock_load.assert_called_once_with()
assert results[0] == results[1]
| Teagan42/home-assistant | tests/helpers/test_device_registry.py | Python | apache-2.0 | 16,243 |
from django.core import mail
from reviewboard.reviews.models import Review
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (review_reply_item_mimetype,
review_reply_list_mimetype)
from reviewboard.webapi.tests.mixins import (BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_review import (ReviewItemMixin,
ReviewListMixin)
from reviewboard.webapi.tests.urls import (get_review_reply_item_url,
get_review_reply_list_url)
class BaseResourceTestCase(BaseWebAPITestCase):
def _create_test_review(self, with_local_site=False):
review_request = self.create_review_request(
submitter=self.user,
with_local_site=with_local_site)
file_attachment = self.create_file_attachment(review_request)
review_request.publish(review_request.submitter)
review = self.create_review(review_request, publish=True)
self.create_file_attachment_comment(review, file_attachment)
return review
class ResourceListTests(ReviewListMixin, ReviewRequestChildListMixin,
BaseResourceTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewReplyResource list APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/reviews/<id>/replies/'
resource = resources.review_reply
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, publish=True)
return (get_review_reply_list_url(review),
review_reply_list_mimetype)
def compare_item(self, item_rsp, reply):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], reply.body_top)
self.assertEqual(item_rsp['body_bottom'], reply.body_bottom)
if reply.body_top_rich_text:
self.assertEqual(item_rsp['body_top_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_top_text_type'], 'plain')
if reply.body_bottom_rich_text:
self.assertEqual(item_rsp['body_bottom_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_bottom_text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, publish=True)
if populate_items:
items = [self.create_reply(review, publish=True)]
else:
items = []
return (get_review_reply_list_url(review, local_site_name),
review_reply_list_mimetype,
items)
def test_get_with_counts_only(self):
"""Testing the
GET review-requests/<id>/reviews/<id>/replies/?counts-only=1 API
"""
review = self._create_test_review()
self.create_reply(review, user=self.user, publish=True)
rsp = self.api_get(
'%s?counts-only=1' % get_review_reply_list_url(review),
expected_mimetype=review_reply_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['count'], 1)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, publish=True)
return (get_review_reply_list_url(review, local_site_name),
review_reply_item_mimetype,
{},
[review])
def check_post_result(self, user, rsp, review):
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertFalse(reply.body_top_rich_text)
self.compare_item(rsp['reply'], reply)
def test_post_with_body_top(self):
"""Testing the POST review-requests/<id>/reviews/<id>/replies/ API
with body_top
"""
body_top = 'My Body Top'
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
rsp = self.api_post(
get_review_reply_list_url(review),
{'body_top': body_top},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.body_top, body_top)
def test_post_with_body_bottom(self):
"""Testing the POST review-requests/<id>/reviews/<id>/replies/ API
with body_bottom
"""
body_bottom = 'My Body Bottom'
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
rsp = self.api_post(
get_review_reply_list_url(review),
{'body_bottom': body_bottom},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.body_bottom, body_bottom)
class ResourceItemTests(ReviewItemMixin, ReviewRequestChildItemMixin,
BaseResourceTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewReplyResource item APIs."""
fixtures = ['test_users']
sample_api_url = 'review-requests/<id>/reviews/<id>/replies/<id>/'
resource = resources.review_reply
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, publish=True)
reply = self.create_reply(review, publish=True)
return (get_review_reply_item_url(review, reply.pk),
review_reply_item_mimetype)
def compare_item(self, item_rsp, reply):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], reply.body_top)
self.assertEqual(item_rsp['body_bottom'], reply.body_bottom)
if reply.body_top_rich_text:
self.assertEqual(item_rsp['body_top_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_top_text_type'], 'plain')
if reply.body_bottom_rich_text:
self.assertEqual(item_rsp['body_bottom_text_type'], 'markdown')
else:
self.assertEqual(item_rsp['body_bottom_text_type'], 'plain')
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
[reply, review])
def check_delete_result(self, user, reply, review):
self.assertNotIn(reply, review.replies.all())
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
review_reply_item_mimetype,
reply)
def test_get_not_modified(self):
"""Testing the GET review-requests/<id>/reviews/<id>/
with Not Modified response
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
reply = self.create_reply(review, publish=True)
self._testHttpCaching(
get_review_reply_item_url(reply.base_reply_to, reply.id),
check_etags=True)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user, publish=True)
reply = self.create_reply(review, user=user)
return (get_review_reply_item_url(review, reply.pk, local_site_name),
review_reply_item_mimetype,
{'body_top': 'New body top'},
reply,
[])
def check_put_result(self, user, item_rsp, reply, *args):
self.assertEqual(item_rsp['id'], reply.pk)
self.assertEqual(item_rsp['body_top'], 'New body top')
self.assertEqual(item_rsp['body_top_text_type'], 'plain')
reply = Review.objects.get(pk=reply.pk)
self.compare_item(item_rsp, reply)
def test_put_with_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/replies/<id>/?public=1 API
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
rsp, response = self.api_post_with_response(
get_review_reply_list_url(review),
expected_mimetype=review_reply_item_mimetype)
self.assertIn('Location', response)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
with self.siteconfig_settings({'mail_send_review_mail': True},
reload_settings=False):
rsp = self.api_put(
response['Location'],
{
'body_top': 'Test',
'public': True,
},
expected_mimetype=review_reply_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertEqual(reply.public, True)
self.assertEqual(len(mail.outbox), 1)
def test_put_with_publish_and_trivial(self):
"""Testing the PUT review-requests/<id>/draft/ API with trivial
changes
"""
review_request = self.create_review_request(submitter=self.user,
publish=True)
review = self.create_review(review_request, publish=True)
rsp, response = self.api_post_with_response(
get_review_reply_list_url(review),
expected_mimetype=review_reply_item_mimetype)
self.assertIn('Location', response)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
with self.siteconfig_settings({'mail_send_review_mail': True},
reload_settings=False):
rsp = self.api_put(
response['Location'],
{
'body_top': 'Test',
'public': True,
'trivial': True
},
expected_mimetype=review_reply_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('reply', rsp)
self.assertIn('id', rsp['reply'])
reply = Review.objects.get(pk=rsp['reply']['id'])
self.assertTrue(reply.public)
self.assertEqual(len(mail.outbox), 0)
| reviewboard/reviewboard | reviewboard/webapi/tests/test_review_reply.py | Python | mit | 12,104 |
# -*- coding: utf-8 -*-
"""
flask.testsuite.blueprints
~~~~~~~~~~~~~~~~~~~~~~~~~~
Blueprints (and currently modules)
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
import warnings
from flask.testsuite import FlaskTestCase, emits_module_deprecation_warning
from flask._compat import text_type
from werkzeug.exceptions import NotFound
from werkzeug.http import parse_cache_control_header
from jinja2 import TemplateNotFound
# import moduleapp here because it uses deprecated features and we don't
# want to see the warnings
warnings.simplefilter('ignore', DeprecationWarning)
from moduleapp import app as moduleapp
warnings.simplefilter('default', DeprecationWarning)
class ModuleTestCase(FlaskTestCase):
@emits_module_deprecation_warning
def test_basic_module(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin', url_prefix='/admin')
@admin.route('/')
def admin_index():
return 'admin index'
@admin.route('/login')
def admin_login():
return 'admin login'
@admin.route('/logout')
def admin_logout():
return 'admin logout'
@app.route('/')
def index():
return 'the index'
app.register_module(admin)
c = app.test_client()
self.assert_equal(c.get('/').data, b'the index')
self.assert_equal(c.get('/admin/').data, b'admin index')
self.assert_equal(c.get('/admin/login').data, b'admin login')
self.assert_equal(c.get('/admin/logout').data, b'admin logout')
@emits_module_deprecation_warning
def test_default_endpoint_name(self):
app = flask.Flask(__name__)
mod = flask.Module(__name__, 'frontend')
def index():
return 'Awesome'
mod.add_url_rule('/', view_func=index)
app.register_module(mod)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'Awesome')
with app.test_request_context():
self.assert_equal(flask.url_for('frontend.index'), '/')
@emits_module_deprecation_warning
def test_request_processing(self):
catched = []
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin', url_prefix='/admin')
@admin.before_request
def before_admin_request():
catched.append('before-admin')
@admin.after_request
def after_admin_request(response):
catched.append('after-admin')
return response
@admin.route('/')
def admin_index():
return 'the admin'
@app.before_request
def before_request():
catched.append('before-app')
@app.after_request
def after_request(response):
catched.append('after-app')
return response
@app.route('/')
def index():
return 'the index'
app.register_module(admin)
c = app.test_client()
self.assert_equal(c.get('/').data, b'the index')
self.assert_equal(catched, ['before-app', 'after-app'])
del catched[:]
self.assert_equal(c.get('/admin/').data, b'the admin')
self.assert_equal(catched, ['before-app', 'before-admin',
'after-admin', 'after-app'])
@emits_module_deprecation_warning
def test_context_processors(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin', url_prefix='/admin')
@app.context_processor
def inject_all_regular():
return {'a': 1}
@admin.context_processor
def inject_admin():
return {'b': 2}
@admin.app_context_processor
def inject_all_module():
return {'c': 3}
@app.route('/')
def index():
return flask.render_template_string('{{ a }}{{ b }}{{ c }}')
@admin.route('/')
def admin_index():
return flask.render_template_string('{{ a }}{{ b }}{{ c }}')
app.register_module(admin)
c = app.test_client()
self.assert_equal(c.get('/').data, b'13')
self.assert_equal(c.get('/admin/').data, b'123')
@emits_module_deprecation_warning
def test_late_binding(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin')
@admin.route('/')
def index():
return '42'
app.register_module(admin, url_prefix='/admin')
self.assert_equal(app.test_client().get('/admin/').data, b'42')
@emits_module_deprecation_warning
def test_error_handling(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin')
@admin.app_errorhandler(404)
def not_found(e):
return 'not found', 404
@admin.app_errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@admin.route('/')
def index():
flask.abort(404)
@admin.route('/error')
def error():
1 // 0
app.register_module(admin)
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'not found')
rv = c.get('/error')
self.assert_equal(rv.status_code, 500)
self.assert_equal(b'internal server error', rv.data)
def test_templates_and_static(self):
app = moduleapp
app.testing = True
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Hello from the Frontend')
rv = c.get('/admin/')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/index2')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/static/test.txt')
self.assert_equal(rv.data.strip(), b'Admin File')
rv.close()
rv = c.get('/admin/static/css/test.css')
self.assert_equal(rv.data.strip(), b'/* nested file */')
rv.close()
with app.test_request_context():
self.assert_equal(flask.url_for('admin.static', filename='test.txt'),
'/admin/static/test.txt')
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
self.assert_equal(e.name, 'missing.html')
else:
self.assert_true(0, 'expected exception')
with flask.Flask(__name__).test_request_context():
self.assert_equal(flask.render_template('nested/nested.txt'), 'I\'m nested')
def test_safe_access(self):
app = moduleapp
with app.test_request_context():
f = app.view_functions['admin.static']
try:
f('/etc/passwd')
except NotFound:
pass
else:
self.assert_true(0, 'expected exception')
try:
f('../__init__.py')
except NotFound:
pass
else:
self.assert_true(0, 'expected exception')
# testcase for a security issue that may exist on windows systems
import os
import ntpath
old_path = os.path
os.path = ntpath
try:
try:
f('..\\__init__.py')
except NotFound:
pass
else:
self.assert_true(0, 'expected exception')
finally:
os.path = old_path
@emits_module_deprecation_warning
def test_endpoint_decorator(self):
from werkzeug.routing import Submount, Rule
from flask import Module
app = flask.Flask(__name__)
app.testing = True
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
module = Module(__name__, __name__)
@module.endpoint('bar')
def bar():
return 'bar'
@module.endpoint('index')
def index():
return 'index'
app.register_module(module)
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
class BlueprintTestCase(FlaskTestCase):
def test_blueprint_specific_error_handling(self):
frontend = flask.Blueprint('frontend', __name__)
backend = flask.Blueprint('backend', __name__)
sideend = flask.Blueprint('sideend', __name__)
@frontend.errorhandler(403)
def frontend_forbidden(e):
return 'frontend says no', 403
@frontend.route('/frontend-no')
def frontend_no():
flask.abort(403)
@backend.errorhandler(403)
def backend_forbidden(e):
return 'backend says no', 403
@backend.route('/backend-no')
def backend_no():
flask.abort(403)
@sideend.route('/what-is-a-sideend')
def sideend_no():
flask.abort(403)
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
app.register_blueprint(sideend)
@app.errorhandler(403)
def app_forbidden(e):
return 'application itself says no', 403
c = app.test_client()
self.assert_equal(c.get('/frontend-no').data, b'frontend says no')
self.assert_equal(c.get('/backend-no').data, b'backend says no')
self.assert_equal(c.get('/what-is-a-sideend').data, b'application itself says no')
def test_blueprint_url_definitions(self):
bp = flask.Blueprint('test', __name__)
@bp.route('/foo', defaults={'baz': 42})
def foo(bar, baz):
return '%s/%d' % (bar, baz)
@bp.route('/bar')
def bar(bar):
return text_type(bar)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/1', url_defaults={'bar': 23})
app.register_blueprint(bp, url_prefix='/2', url_defaults={'bar': 19})
c = app.test_client()
self.assert_equal(c.get('/1/foo').data, b'23/42')
self.assert_equal(c.get('/2/foo').data, b'19/42')
self.assert_equal(c.get('/1/bar').data, b'23')
self.assert_equal(c.get('/2/bar').data, b'19')
def test_blueprint_url_processors(self):
bp = flask.Blueprint('frontend', __name__, url_prefix='/<lang_code>')
@bp.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', flask.g.lang_code)
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code')
@bp.route('/')
def index():
return flask.url_for('.about')
@bp.route('/about')
def about():
return flask.url_for('.index')
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
self.assert_equal(c.get('/de/').data, b'/de/about')
self.assert_equal(c.get('/de/about').data, b'/de/')
def test_templates_and_static(self):
from blueprintapp import app
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Hello from the Frontend')
rv = c.get('/admin/')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/index2')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/static/test.txt')
self.assert_equal(rv.data.strip(), b'Admin File')
rv.close()
rv = c.get('/admin/static/css/test.css')
self.assert_equal(rv.data.strip(), b'/* nested file */')
rv.close()
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
expected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == expected_max_age:
expected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = expected_max_age
rv = c.get('/admin/static/css/test.css')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, expected_max_age)
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
with app.test_request_context():
self.assert_equal(flask.url_for('admin.static', filename='test.txt'),
'/admin/static/test.txt')
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
self.assert_equal(e.name, 'missing.html')
else:
self.assert_true(0, 'expected exception')
with flask.Flask(__name__).test_request_context():
self.assert_equal(flask.render_template('nested/nested.txt'), 'I\'m nested')
def test_default_static_cache_timeout(self):
app = flask.Flask(__name__)
class MyBlueprint(flask.Blueprint):
def get_send_file_max_age(self, filename):
return 100
blueprint = MyBlueprint('blueprint', __name__, static_folder='static')
app.register_blueprint(blueprint)
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
with app.test_request_context():
unexpected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == unexpected_max_age:
unexpected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = unexpected_max_age
rv = blueprint.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 100)
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
def test_templates_list(self):
from blueprintapp import app
templates = sorted(app.jinja_env.list_templates())
self.assert_equal(templates, ['admin/index.html',
'frontend/index.html'])
def test_dotted_names(self):
frontend = flask.Blueprint('myapp.frontend', __name__)
backend = flask.Blueprint('myapp.backend', __name__)
@frontend.route('/fe')
def frontend_index():
return flask.url_for('myapp.backend.backend_index')
@frontend.route('/fe2')
def frontend_page2():
return flask.url_for('.frontend_index')
@backend.route('/be')
def backend_index():
return flask.url_for('myapp.frontend.frontend_index')
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
c = app.test_client()
self.assert_equal(c.get('/fe').data.strip(), b'/be')
self.assert_equal(c.get('/fe2').data.strip(), b'/fe')
self.assert_equal(c.get('/be').data.strip(), b'/fe')
def test_dotted_names_from_app(self):
app = flask.Flask(__name__)
app.testing = True
test = flask.Blueprint('test', __name__)
@app.route('/')
def app_index():
return flask.url_for('test.index')
@test.route('/test/')
def index():
return flask.url_for('app_index')
app.register_blueprint(test)
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'/test/')
def test_empty_url_defaults(self):
bp = flask.Blueprint('bp', __name__)
@bp.route('/', defaults={'page': 1})
@bp.route('/page/<int:page>')
def something(page):
return str(page)
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
self.assert_equal(c.get('/').data, b'1')
self.assert_equal(c.get('/page/2').data, b'2')
def test_route_decorator_custom_endpoint(self):
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
@bp.route('/bar', endpoint='bar')
def foo_bar():
return flask.request.endpoint
@bp.route('/bar/123', endpoint='123')
def foo_bar_foo():
return flask.request.endpoint
@bp.route('/bar/foo')
def bar_foo():
return flask.request.endpoint
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.request.endpoint
c = app.test_client()
self.assertEqual(c.get('/').data, b'index')
self.assertEqual(c.get('/py/foo').data, b'bp.foo')
self.assertEqual(c.get('/py/bar').data, b'bp.bar')
self.assertEqual(c.get('/py/bar/123').data, b'bp.123')
self.assertEqual(c.get('/py/bar/foo').data, b'bp.bar_foo')
def test_route_decorator_custom_endpoint_with_dots(self):
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
try:
@bp.route('/bar', endpoint='bar.bar')
def foo_bar():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
try:
@bp.route('/bar/123', endpoint='bar.123')
def foo_bar_foo():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
def foo_foo_foo():
pass
self.assertRaises(
AssertionError,
lambda: bp.add_url_rule(
'/bar/123', endpoint='bar.123', view_func=foo_foo_foo
)
)
self.assertRaises(
AssertionError,
bp.route('/bar/123', endpoint='bar.123'),
lambda: None
)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
c = app.test_client()
self.assertEqual(c.get('/py/foo').data, b'bp.foo')
# The rule's didn't actually made it through
rv = c.get('/py/bar')
assert rv.status_code == 404
rv = c.get('/py/bar/123')
assert rv.status_code == 404
def test_template_filter(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_add_template_filter(self):
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_template_filter_with_name(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('strrev')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_add_template_filter_with_name(self):
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'strrev')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_template_filter_with_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_filter_after_route_with_template(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_template(self):
bp = flask.Blueprint('bp', __name__)
def super_reverse(s):
return s[::-1]
bp.add_app_template_filter(super_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_filter_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'super_reverse')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_test(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('is_boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['is_boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['is_boolean'](False))
def test_add_template_test(self):
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('is_boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['is_boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['is_boolean'](False))
def test_template_test_with_name(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test_with_name(self):
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_template_test_after_route_with_template(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_template(self):
bp = flask.Blueprint('bp', __name__)
def boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_template_test_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BlueprintTestCase))
suite.addTest(unittest.makeSuite(ModuleTestCase))
return suite
| zwChan/VATEC | ~/eb-virt/Lib/site-packages/flask/testsuite/blueprints.py | Python | apache-2.0 | 28,089 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class MonitorManagementClientConfiguration(Configuration):
"""Configuration for MonitorManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(MonitorManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2017-04-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-monitor/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| Azure/azure-sdk-for-python | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_04_01/aio/_configuration.py | Python | mit | 3,214 |
#!/usr/bin/python2
# if not specified, learning rate for RNN = 0.001, for others = 0.1
# `full` in dir name means using both training & validation set to train the model
# git branch `diff`: using the difference between adjacent frame pair as model input
# git branch `filter`: only using part of the categories
# frame_level DBOF model | iter 16958
f2="/DATACENTER/3/xiw/yt8m/frame_level_dbof_rgb_audio/predictions-16958.csv"
# frame_level LSTM model | iter 72020
f3="/DATACENTER/3/xiw/yt8m/frame_level_lstm_rgb_audio/predictions-72020.csv"
# video_level MOE model | iter 23010
f4="/DATACENTER/3/xiw/yt8m/video_level_moe_rgb_audio/predictions.csv"
# frame_level GRU model | iter 98465
f5="/DATACENTER/3/xiw/yt8m/frame_level_gru_rgb_audio/predictions_98465.csv"
# frame_level LayerNorm LSTM model (dropout = 0.75)| iter 158413
f6="/DATACENTER/3/xiw/yt8m/frame_level_lnblstm_rgb_audio/predictions.csv"
# frame_level GRU model (using the difference between adjacent frame pair as model input) | iter 107961
f7="/DATACENTER/3/xiw/yt8m/frame_level_grud_rgb_audio_stable/predictions.csv"
# frame_level GRU model (using the difference between adjacent frame pair as model input) (learning rate = 0.0005) | iter 124777
f8="/DATACENTER/3/xiw/yt8m/frame_level_grud_rgb_audio_lrd2/predictions.csv"
# frame_level GRU model (using the difference between adjacent frame pair as model input) | iter 107961
f9="/DATACENTER/3/xiw/yt8m/frame_level_grud_rgb_audio_stable_full/predictions.csv"
# frame_level LayerNorm LSTM model (dropout = 0.50)| iter 146268
f10="/DATACENTER/3/xiw/yt8m/frame_level_lnblstm_rgb_audio_d50/predictions_146268.csv"
# frame_level LayerNorm LSTM model (dropout = 0.50) | iter 144006
f11="/DATACENTER/3/xiw/yt8m/frame_level_lnblstm_rgb_audio_d50_full/prediction_144006.csv"
# video_level MOE model (only using 3571 categories) | iter 27400
f12="/DATACENTER/3/xiw/yt8m/video_level_moe_rgb_audio_full_filter/prediction.csv"
# frame_level LayerNorm LSTM model (dropout = 0.50) | iter 203360
f13="/DATACENTER/3/xiw/yt8m/frame_level_lnblstm_rgb_audio_d50_full/predictions-203360.csv"
# frame_level LayerNorm LSTM model (dropout = 0.50) | iter 240360
f14="/DATACENTER/3/xiw/yt8m/frame_level_lnblstm_rgb_audio_d50_full/predictions-240360.csv"
# frame_level LayerNorm LSTM model (dropout = 0.50) | iter 222360
f15="/DATACENTER/3/xiw/yt8m/frame_level_lnblstm_rgb_audio_d50_full/predictions-222360.csv"
# frame_level GRU model | iter 60860
f16="/DATACENTER/3/xiw/yt8m/frame_level_gru_rgb_audio/predictions-60860.csv"
# frame_level GRU model | iter 80297
f17="/DATACENTER/3/xiw/yt8m/frame_level_gru_rgb_audio/predictions-80297.csv"
# frame_level LayerNorm LSTM model (dropout = 0.75)| iter 150168
f18="/DATACENTER/3/xiw/yt8m/frame_level_lnblstm_rgb_audio/predictions-lnblstm-150168.csv"
# frame_level GRU model (using the difference between adjacent frame pair as model input) | iter 80177
f19="/DATACENTER/3/xiw/yt8m/frame_level_grud_rgb_audio_stable_full/predictions-80177.csv"
# frame_level GRU model (using the difference between adjacent frame pair as model input) | iter 60954
f20="/DATACENTER/3/xiw/yt8m/frame_level_grud_rgb_audio_stable_full/predictions-60954.csv"
# frame_level LayerNorm LSTM model (dropout = 0.50) | iter 286374
f21="/DATACENTER/3/xiw/yt8m/frame_level_lnblstm_rgb_audio_d50_full/predictions-286k.csv"
# video_level MOE model (only using 2534 categories) | iter 27177
f22="/DATACENTER/3/xiw/yt8m/video_level_moe_rgb_audio_full_filter-2534_r05/predictions.csv"
f = [f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22]
r = [0.5, 0.1, 0.7, 1.6, 1.3, 0.3, 0.3, 0.6, 1.1, 1.0, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.6, 0.1, 0.1, 0.8, 0.8]
import numpy as np
#r = [x / sum(rraw) for x in rraw]
print 'VideoId,LabelConfidencePairs'
res = {}
ci = 0
for fi in f:
with open(fi) as file:
ca = file.readlines()
cc = [x.strip().split(',') for x in ca[1:]]
for k in range(len(cc)):
if cc[k][0] in res:
d = res[cc[k][0]]
else:
d = {}
t=100
id = cc[k][1].split()[:t:2]
val = cc[k][1].split()[1:t:2]
for i in range(len(id)):
pred = float(val[i])
if id[i] in d:
d[id[i]] += pred * r[ci]
else:
d[id[i]] = pred * r[ci]
res[cc[k][0]] = d
ci += 1
for n in res:
p = n +','
for id, value in sorted(res[n].iteritems(), key=lambda (k,v): (-v,k))[:30]:
p += id + ' ' + "%f" % value + ' '
print p
| forwchen/yt8m | fusion/simple_fusion.py | Python | apache-2.0 | 4,444 |
import logging
import signal
import socket
import configparser
import importlib.machinery
import serial
import copy
import zmq
class Dispatcher(object):
""" Superclass for all Dispatchers.
This is the part of the simulator that handles the connections.
"""
def __init__(self, dispatcher_type, dispatcher_id):
self.name = name
self.dispatcher_id = dispatcher_id
self.call_backs = {}
self.go_on = True
logger = logging.getLogger('{0}_simulator'
.format(dispatcher_type))
logger.setLevel(logging.INFO)
logfile = '/tmp/test.log'
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
self.logger = logger
self.context = zmq.Context(1)
def control_c_handler():
"""Controlled shutdown so we can cleanup."""
self.go_on = False
return False
def create_sockets(self, accept_socket):
# Open a socket to listen for commands from the scenario player
address = "tcp://*:{0}".format(self.command_listen_port)
self.logger.info("Command subscription at {0}".format(address))
command_socket = self.context.socket(zmq.SUB)
command_socket.bind(address)
command_socket.setsockopt(zmq.SUBSCRIBE, "")
# Add the sockets to the zmq poller.
self.poller = zmq.Poller()
self.poller.register(accept_socket, zmq.POLLIN)
self.poller.register(command_socket, zmq.POLLIN)
# Register the call backs.
self.call_backs[accept_socket.fileno()] = (accept_socket, self.accept)
self.call_backs[command_socket] = (command_socket,
self.process_player_command)
# Not part of the poller
# Message forwarding link to player
address = "tcp://*:{0}".format(self.message_forward_port)
self.logger.info("Publishing on " + address)
self.repeater_socket = self.context.socket(zmq.PUB)
self.repeater_socket.bind(address)
def accept(self, a_socket):
"""Accept a connection from the system.
"""
system_socket, address = a_socket.accept()
self.logger.info('Connection from ' + str(address))
# Register this socket too so we look for incoming data
self.poller.register(system_socket, zmq.POLLIN)
self.call_backs[system_socket.fileno()] = (
system_socket, self.process_message)
self.system_socket = system_socket
def process_player_command(self, a_socket):
""" Process a command from the scenario player.
"""
# receive the command
command = a_socket.recv_pyobj()
self.logger.info('received command from scenario player: {0}'
.format(type(command)))
self.system_socket.send(self.message.to_message(command))
def process_message(self, a_socket):
""" Receive and forward a message from the system """
self.logger.info( 'Data from the system' )
# We do not know beforehand how big the blob is.
data = a_socket.recv( 2048 )
if data == "" :
# Connection was closed, so unregister and close the socket.
self.poller.unregister(a_socket)
del self.call_backs[a_socket.fileno()]
a_socket.close()
self.system_socket = None
else :
a_message = message.from_message(data)
self.logger.info('Copying data to player')
self.repeater_socket.send_pyobj(a_message)
def run(self):
# Catch any Control-C
signal.signal(signal.SIGINT, self.control_c_handler)
self.create_sockets()
while self.go_on :
# Note that poller uses fileno() as the key for non-zmq sockets.
socks = dict(self.poller.poll(60000)) # Timeout in ms, 1 minute
for socket_key in self.call_backs.copy() :
# Need copy here cause we might modify the call_backs
# while in the call back functions.
if socket_key in socks and socks[socket_key] == zmq.POLLIN:
if socket_key in self.call_backs:
cbp = self.call_backs[socket_key]
function = cbp[1]
function(cbp[0])
self.logger.info("Still alive")
self.run(socks)
self.logger.info("Stopping")
self.context.term()
#------------------------------------------------------------------------------
class TCPDispatcher(Dispatcher):
""" Dispatcher subclass for TCP connections"""
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, name, dispatcher_id)
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# address and port to listen on for messages from the system
self.accept_address = entries['AcceptAddress']
self.listen_port = entries['ListenPort']
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid tcp section found in config file')
def create_sockets(self):
""" Create the TCP sockets between the system and the
Scenario player
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Open a tcp socket to listen for new connections
# from the system.
self.logger.info("Listening on address {0}"
.format(str(self.accept_address)))
self.logger.info("Listening on port {0}".format(str(self.listen_port)))
accept_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
accept_socket.bind((self.accept_address, self.port))
# Only handle one connection at a time.
accept_socket.listen(1)
# Let the superclass finish the creation of the rest of the
# sockets, because it is the same.
Dispatcher.create_sockets(self, accept_socket)
def run(self):
# TCP dispatcher has no extra steps to add to the default loop.
# We will just exit this method.
pass
#------------------------------------------------------------------------------
class SerialDispatcher(Dispatcher):
""" Dispatcher subclass for Serial connections"""
SERIAL_PARITY = {'none':serial.PARITY_NONE , 'even':serial.PARITY_EVEN ,
'odd':serial.PARITY_ODD , 'mark':serial.PARITY_MARK ,
'space':serial.PARITY_SPACE}
SERIAL_STOPBITS= {'one':serial.STOPBITS_ONE ,
'onePointFive': serial.STOPBITS_ONE_POINT_FIVE,
'two':serial.STOPBITS_TWO }
default_timeout = 60000
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, dispatcher_type, dispatcher_id)
self.repeater_socket = None
self.poller = None
self.call_backs = None
self.serial_link = None
self.timeout = default_timeout
self.receiving = False
self.blob = ""
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# Settings for the serial link to the system.
self.serial_device = entries['Device']
self.serial_baudrate = int(entries['BaudRate'])
self.serial_bytesize = int(entries['ByteSize'])
self.serial_parity = SERIAL_PARITY.get(entries['Parity'])
self.serial_stopbits = SERIAL_STOPBITS.get(entries['StopBits'])
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid serial section '
'found in config file')
def create_sockets(self):
""" Create the socket to the scenario player and set up the
serial link to the system
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Setup a serial link to listen to the system
self.logger.info("Opening serial device {0} ".format(serial_device))
self.serial_link = serial.Serial(serial_device, serial_baudrate,
serial_parity, serial_bytesize,
serial_stopbits)
# Open a socket to listen for commands from the scenario player
address = "tcp://*:{0}".format(self.command_listen_port)
self.logger.info("Command subscription at {0}".format(address))
command_socket = self.context.socket(zmq.SUB)
command_socket.bind(address)
command_socket.setsockopt(zmq.SUBSCRIBE, "")
# Add the sockets to the zmq poller.
self.poller = zmq.Poller()
if(self.serial_link):
self.poller.register(self.serial_link, zmq_POLLIN)
# Register callback
self.call_backs[self.serial_link.fileno()] = (self.serial_link,
self.read_message)
self.poller.register(command_socket, zmq.POLLIN)
# Register the call backs.
self.call_backs[command_socket] = (command_socket,
self.process_player_command)
# Not part of the poller
# Message forwarding link to player
address = "tcp://*:{0}".format(self.message_forward_port)
self.logger.info("Publishing on " + address)
self.repeater_socket = self.context.socket(zmq.PUB)
self.repeater_socket.bind(address)
def read_message(self, link):
"""Read one or more bytes from the system
"""
# We do not know beforehand how big the blob is and data might come in
# parts, mostly one character at a time, sometimes a few more.
blob = handle.read()
self.blob += blob
# Set timeout to a low value. We should receive a new byte within
# this period otherwise we assume it is the end of the message. If we
# make this too high, there will be a delay in processing the message.
# The baud rate is 57600 so a single character takes
# 8/57600 == 0.000138 seconds == 0.138 milliseconds
# So 10ms should be enough.
self.timeout = 10 # in ms
self.receiving = True
def process_message(self):
"""Receive and forward a message from the system.
"""
self.logger.info('Received a full message from the system')
self.logger.info(",".join(map(lambda x: hex(ord(x)), self.blob)))
a_message = self.message.from_message( self.blob )
self.logger.info('Copying data to player')
self.repeater_socket.send_pyobj(a_message)
self.blob = ""
def process_player_command(self, a_socket):
""" Process a command from the scenario player.
"""
# receive the command
command = a_socket.recv_pyobj()
self.logger.info('received command from scenario player: {0}'
.format(type(command)))
self.serial_link.write(self.message.to_message(command))
def run(self, socks):
if len(socks) == 0 and self.receiving :
# We were in the process of receiving data from OBIS.
# We did not receive any new bytes, so we assume it's
# the end of the message.
self.process_message()
self.receiving = False
# Set timeout back to a high value, so we not waste CPU
# cycles.
self.timeout = default_timeout
self.blob = "" # Reset the message buffer
elif len(socks) == 0 and self.timeout == default_timeout :
self.logger.info("Nothing happened for a long time.")
else:
pass
self.logger.info('Stopping')
self.context.term()
#------------------------------------------------------------------------------
class UDPDispatcher(Dispatcher):
""" Dispatcher subclass for UDP connections"""
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, dispatcher_type, dispatcher_id)
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# address and port to listen on for messages from the system
self.accept_address = entries['AcceptAddress']
self.listen_port = entries['ListenPort']
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid udp section found in config file')
def create_sockets(self):
""" Create the UDP sockets between the system and the
Scenario player
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Open an UDP socket to listen for new connections
# from the system.
self.logger.info("Listening on address {0}"
.format(str(self.accept_address)))
self.logger.info("Listening on port {0}".format(str(self.listen_port)))
address = "udp://{0}:{1}".format(accept_address, listen_port)
accept_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
accept_socket.bind((self.accept_address, self.listen_port))
# Let the superclass finish the creation of the rest of the
# sockets, because it is the same.
Dispatcher.create_sockets(self, accept_socket)
def run(self):
pass
#------------------------------------------------------------------------------
class HttpDispatcher(Dispatcher):
""" Dispatcher subclass for Http connections"""
def __init__(self, dispatcher_type, dispatcher_id):
Dispatcher.__init__(self, dispatcher_type, dispatcher_id)
config = configparser.ConfigParser()
config.read('simulator.conf')
dispatcher_section = ('dispatcher-{0}-{1}'
.format(dispatcher_type, dispatcher_id))
if (dispatcher_section) in config.sections():
entries = config[dispatcher_section]
# path to the message class
self.message_path = entries['MessagePath']
if message_path is not None:
loader = importlib.machinery.SourceFileLoader('message',
message_path)
message_module = loader.exec_module('message')
message = message_module.Message()
# address and port to listen on for messages from the system
self.accept_address = entries['AcceptAddress']
self.listen_port = entries['ListenPort']
# port to listen on for commands from the player.
self.command_listen_port = entries['CommandListenPort']
# port to forward messages to the player.
self.message_forward_port = entries['MessageForwardPort']
else:
self.logger.critical('no valid http section found in config file')
def create_sockets(self):
""" Create the UDP sockets between the system and the
Scenario player
"""
self.logger.info('Creating sockets for {0} {1}'
.format(self.name, self.dispatcher_id))
# Open an UDP socket to listen for new connections
# from the system.
self.logger.info("Listening on address {0}"
.format(str(self.accept_address)))
self.logger.info("Listening on port {0}".format(str(self.listen_port)))
address = "{0}:{1}".format(accept_address, listen_port)
accept_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
accept_socket.bind((self.accept_address, self.listen_port))
# Let the superclass finish the creation of the rest of the
# sockets, because it is the same.
Dispatcher.create_sockets(self, accept_socket)
def process_messsage(self, a_socket):
""" Method to process a HTTP request from the system.
:param a_socket: the socket on which the message arrives.
:type a_socket: socket
"""
self.logger.info('HTTP Request')
if self.http_request == None:
self.http_request = Message()
self.logger.info('Parsing request')
if self.http_request.from_message(a_socket):
self.logger.info('Waiting for more data')
else:
# We received the full request.
# Send a reply and close the connection.
a_socket.send( "HTTP/1.1 200 OK\n" )
self.logger.info('Complete request received.')
self.poller.unregister(a_socket)
del self.call_backs[ a_socket.fileno() ]
a_socket.close()
self.client_socket = None
self.logger.info('Forwarding request to player')
self.forward_socket.send_pyobj(self.http_request)
self.http_request = None
def run(self):
# Clean-up
if self.forward_socket is not None:
self.forward_socket.close()
if self.command_socket is not None:
self.command_socket.close()
if self.accept_socket is not None:
self.accept_socket.close()
if self.client_socket is not None:
self.client_socket.close()
| InTraffic/TSTK | TSTK/dispatcher.py | Python | gpl-3.0 | 20,467 |
__simple_example = '''
- task: process.tasks.slack.send_message
args:
message: 'hello!'
'''
__meta = {
'args': {
'message': { 'type': 'str', 'default': 'hello world'},
},
'examples': [
{ 'title': 'Basic usage', 'description': __simple_example },
]
}
def say_hello(message='hello world', *args, **kwargs):
'''Prints `message` or "hello world" if no message is provied'''
print(message)
return {'result': message} | toast38coza/DJProcess | process/tasks/io/say_hello.py | Python | mit | 463 |
"""Library for handling batch HTTP requests for apitools."""
import collections
import email.generator as generator
import email.mime.multipart as mime_multipart
import email.mime.nonmultipart as mime_nonmultipart
import email.parser as email_parser
import itertools
import StringIO
import time
import urllib
import urlparse
import uuid
from six.moves import http_client
from googlecloudapis.apitools.base.py import exceptions
from googlecloudapis.apitools.base.py import http_wrapper
__all__ = [
'BatchApiRequest',
]
class RequestResponseAndHandler(collections.namedtuple(
'RequestResponseAndHandler', ['request', 'response', 'handler'])):
"""Container for data related to completing an HTTP request.
This contains an HTTP request, its response, and a callback for handling
the response from the server.
Attributes:
request: An http_wrapper.Request object representing the HTTP request.
response: The http_wrapper.Response object returned from the server.
handler: A callback function accepting two arguments, response
and exception. Response is an http_wrapper.Response object, and
exception is an apiclient.errors.HttpError object if an error
occurred, or otherwise None.
"""
class BatchApiRequest(object):
class ApiCall(object):
"""Holds request and response information for each request.
ApiCalls are ultimately exposed to the client once the HTTP batch request
has been completed.
Attributes:
http_request: A client-supplied http_wrapper.Request to be
submitted to the server.
response: A http_wrapper.Response object given by the server as a
response to the user request, or None if an error occurred.
exception: An apiclient.errors.HttpError object if an error
occurred, or None.
"""
def __init__(self, request, retryable_codes, service, method_config):
"""Initialize an individual API request.
Args:
request: An http_wrapper.Request object.
retryable_codes: A list of integer HTTP codes that can be retried.
service: A service inheriting from base_api.BaseApiService.
method_config: Method config for the desired API request.
"""
self.__retryable_codes = list(
set(retryable_codes + [http_client.UNAUTHORIZED]))
self.__http_response = None
self.__service = service
self.__method_config = method_config
self.http_request = request
# TODO(user): Add some validation to these fields.
self.__response = None
self.__exception = None
@property
def is_error(self):
return self.exception is not None
@property
def response(self):
return self.__response
@property
def exception(self):
return self.__exception
@property
def authorization_failed(self):
return (self.__http_response and (
self.__http_response.status_code == http_client.UNAUTHORIZED))
@property
def terminal_state(self):
return (self.__http_response and (
self.__http_response.status_code not in self.__retryable_codes))
def HandleResponse(self, http_response, exception):
"""Handles an incoming http response to the request in http_request.
This is intended to be used as a callback function for
BatchHttpRequest.Add.
Args:
http_response: Deserialized http_wrapper.Response object.
exception: apiclient.errors.HttpError object if an error occurred.
"""
self.__http_response = http_response
self.__exception = exception
if self.terminal_state and not self.__exception:
self.__response = self.__service.ProcessHttpResponse(
self.__method_config, self.__http_response)
def __init__(self, batch_url=None, retryable_codes=None):
"""Initialize a batch API request object.
Args:
batch_url: Base URL for batch API calls.
retryable_codes: A list of integer HTTP codes that can be retried.
"""
self.api_requests = []
self.retryable_codes = retryable_codes or []
self.batch_url = batch_url or 'https://www.googleapis.com/batch'
def Add(self, service, method, request, global_params=None):
"""Add a request to the batch.
Args:
service: A class inheriting base_api.BaseApiService.
method: A string indicated desired method from the service. See
the example in the class docstring.
request: An input message appropriate for the specified service.method.
global_params: Optional additional parameters to pass into
method.PrepareHttpRequest.
Returns:
None
"""
# Retrieve the configs for the desired method and service.
method_config = service.GetMethodConfig(method)
upload_config = service.GetUploadConfig(method)
# Prepare the HTTP Request.
http_request = service.PrepareHttpRequest(
method_config, request, global_params=global_params,
upload_config=upload_config)
# Create the request and add it to our master list.
api_request = self.ApiCall(
http_request, self.retryable_codes, service, method_config)
self.api_requests.append(api_request)
def Execute(self, http, sleep_between_polls=5, max_retries=5):
"""Execute all of the requests in the batch.
Args:
http: httplib2.Http object for use in the request.
sleep_between_polls: Integer number of seconds to sleep between polls.
max_retries: Max retries. Any requests that have not succeeded by
this number of retries simply report the last response or
exception, whatever it happened to be.
Returns:
List of ApiCalls.
"""
requests = [request for request in self.api_requests if not
request.terminal_state]
for attempt in range(max_retries):
if attempt:
time.sleep(sleep_between_polls)
# Create a batch_http_request object and populate it with incomplete
# requests.
batch_http_request = BatchHttpRequest(batch_url=self.batch_url)
for request in requests:
batch_http_request.Add(request.http_request, request.HandleResponse)
batch_http_request.Execute(http)
# Collect retryable requests.
requests = [request for request in self.api_requests if not
request.terminal_state]
if (any(request.authorization_failed for request in requests)
and hasattr(http.request, 'credentials')):
http.request.credentials.refresh(http)
if not requests:
break
return self.api_requests
class BatchHttpRequest(object):
"""Batches multiple http_wrapper.Request objects into a single request."""
def __init__(self, batch_url, callback=None):
"""Constructor for a BatchHttpRequest.
Args:
batch_url: URL to send batch requests to.
callback: A callback to be called for each response, of the
form callback(response, exception). The first parameter is
the deserialized Response object. The second is an
apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no error occurred.
"""
# Endpoint to which these requests are sent.
self.__batch_url = batch_url
# Global callback to be called for each individual response in the batch.
self.__callback = callback
# List of requests, responses and handlers.
self.__request_response_handlers = {}
# The last auto generated id.
self.__last_auto_id = itertools.count()
# Unique ID on which to base the Content-ID headers.
self.__base_id = uuid.uuid4()
def _ConvertIdToHeader(self, request_id):
"""Convert an id to a Content-ID header value.
Args:
request_id: String identifier for a individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is prepended to
the value because Content-ID headers are supposed to be universally
unique.
"""
return '<%s+%s>' % (self.__base_id, urllib.quote(request_id))
@staticmethod
def _ConvertHeaderToId(header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that
_ConvertIdToHeader() returns.
Args:
header: A string indicating the Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if not (header.startswith('<') or header.endswith('>')):
raise exceptions.BatchError('Invalid value for Content-ID: %s' % header)
if '+' not in header:
raise exceptions.BatchError('Invalid value for Content-ID: %s' % header)
_, request_id = header[1:-1].rsplit('+', 1)
return urllib.unquote(request_id)
def _SerializeRequest(self, request):
"""Convert a http_wrapper.Request object into a string.
Args:
request: A http_wrapper.Request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urlparse.urlsplit(request.url)
request_line = urlparse.urlunsplit(
(None, None, parsed.path, parsed.query, None))
status_line = request.http_method + ' ' + request_line + ' HTTP/1.1\n'
major, minor = request.headers.get(
'content-type', 'application/json').split('/')
msg = mime_nonmultipart.MIMENonMultipart(major, minor)
# MIMENonMultipart adds its own Content-Type header.
# Keep all of the other headers in `request.headers`.
for key, value in request.headers.items():
if key == 'content-type':
continue
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
# Serialize the mime message.
str_io = StringIO.StringIO()
# maxheaderlen=0 means don't line wrap headers.
gen = generator.Generator(str_io, maxheaderlen=0)
gen.flatten(msg, unixfrom=False)
body = str_io.getvalue()
# Strip off the \n\n that the MIME lib tacks onto the end of the payload.
if request.body is None:
body = body[:-2]
return status_line.encode('utf-8') + body
def _DeserializeResponse(self, payload):
"""Convert string into Response and content.
Args:
payload: Header and body string to be deserialized.
Returns:
A Response object
"""
# Strip off the status line.
status_line, payload = payload.split('\n', 1)
_, status, _ = status_line.split(' ', 2)
# Parse the rest of the response.
parser = email_parser.Parser()
msg = parser.parsestr(payload)
# Get the headers.
info = dict(msg)
info['status'] = status
# Create Response from the parsed headers.
content = msg.get_payload()
return http_wrapper.Response(info, content, self.__batch_url)
def _NewId(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
A new unique id string.
"""
return str(next(self.__last_auto_id))
def Add(self, request, callback=None):
"""Add a new request.
Args:
request: A http_wrapper.Request to add to the batch.
callback: A callback to be called for this response, of the
form callback(response, exception). The first parameter is the
deserialized response object. The second is an
apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors occurred.
Returns:
None
"""
self.__request_response_handlers[self._NewId()] = RequestResponseAndHandler(
request, None, callback)
def _Execute(self, http):
"""Serialize batch request, send to server, process response.
Args:
http: A httplib2.Http object to be used to make the request with.
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = mime_multipart.MIMEMultipart('mixed')
# Message should not write out its own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for key in self.__request_response_handlers:
msg = mime_nonmultipart.MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._ConvertIdToHeader(key)
body = self._SerializeRequest(
self.__request_response_handlers[key].request)
msg.set_payload(body)
message.attach(msg)
request = http_wrapper.Request(self.__batch_url, 'POST')
request.body = message.as_string()
request.headers['content-type'] = (
'multipart/mixed; boundary="%s"') % message.get_boundary()
response = http_wrapper.MakeRequest(http, request)
if response.status_code >= 300:
raise exceptions.HttpError.FromResponse(response)
# Prepend with a content-type header so Parser can handle it.
header = 'content-type: %s\r\n\r\n' % response.info['content-type']
parser = email_parser.Parser()
mime_response = parser.parsestr(header + response.content)
if not mime_response.is_multipart():
raise exceptions.BatchError('Response not in multipart/mixed format.')
for part in mime_response.get_payload():
request_id = self._ConvertHeaderToId(part['Content-ID'])
response = self._DeserializeResponse(part.get_payload())
# Disable protected access because namedtuple._replace(...)
# is not actually meant to be protected.
self.__request_response_handlers[request_id] = (
self.__request_response_handlers[request_id]._replace( # pylint: disable=protected-access
response=response))
def Execute(self, http):
"""Execute all the requests as a single batched HTTP request.
Args:
http: A httplib2.Http object to be used with the request.
Returns:
None
Raises:
BatchError if the response is the wrong format.
"""
self._Execute(http)
for key in self.__request_response_handlers:
response = self.__request_response_handlers[key].response
callback = self.__request_response_handlers[key].handler
exception = None
if response.status_code >= 300:
exception = exceptions.HttpError.FromResponse(response)
if callback is not None:
callback(response, exception)
if self.__callback is not None:
self.__callback(response, exception)
| ychen820/microblog | y/google-cloud-sdk/lib/googlecloudapis/apitools/base/py/batch.py | Python | bsd-3-clause | 14,581 |
###
# Copyright (c) 2013, KG-Bot
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class ESimTestCase(PluginTestCase):
plugins = ('ESim',)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| kg-bot/SupyBot | plugins/ESim/test.py | Python | gpl-3.0 | 1,722 |
'''
Created on = '7/9/14"
Author = 'mmunn'
Unit test : EUCA-8906 De-register/Re-register of Cluster with Different Name Doesn't Work
setUp : Install Credentials,
test : De-register a cluster Re-register Cluster with Different Name make sure it goes to ENABLED
tearDown : Cleanup artifacts
cloud.conf:( place in same directory as this test)
IP ADDRESS CENTOS 6.5 64 BZR [CC00 CLC SC00 WS]
IP ADDRESS CENTOS 6.5 64 BZR [NC00]
'''
import unittest
import shutil
from eucaops import Eucaops
class EucaTest(unittest.TestCase):
def setUp(self):
self.conf = "cloud.conf"
self.tester = Eucaops(config_file=self.conf, password="foobar")
self.doAuth()
self.STARTC = '\033[1m\033[1m\033[42m'
self.ENDC = '\033[0m'
self.cc= self.tester.service_manager.get_all_cluster_controllers()[3]
self.orig_name = self.cc.name
def tearDown(self):
# deregister cluster
self.runSysCmd("/opt/eucalyptus/usr/sbin/euca_conf --deregister-cluster --partition "
+ self.cc.partition + " --host " + self.cc.hostname + " --component " + self.cc.name + '_TEST' )
# register cluster
self.runSysCmd("/opt/eucalyptus/usr/sbin/euca_conf --register-cluster --partition "
+ self.cc.partition + " --host " + self.cc.hostname + " --component " + self.orig_name)
self.tester.cleanup_artifacts()
self.tester.delete_keypair(self.keypair)
self.tester.local("rm " + self.keypair.name + ".pem")
shutil.rmtree(self.tester.credpath)
def runSysCmd(self, cmd):
self.source = "source " + self.tester.credpath + "/eucarc && "
self.out = self.tester.sys(self.source + cmd)
def doAuth(self):
self.keypair = self.tester.add_keypair()
self.group = self.tester.add_group()
self.tester.authorize_group(self.group)
def test(self):
# deregister cluster
self.runSysCmd("/opt/eucalyptus/usr/sbin/euca_conf --deregister-cluster --partition "
+ self.cc.partition + " --host " + self.cc.hostname + " --component " + self.cc.name)
# register cluster
self.runSysCmd("/opt/eucalyptus/usr/sbin/euca_conf --register-cluster --partition "
+ self.cc.partition + " --host " + self.cc.hostname + " --component " + self.cc.name + '_TEST' )
# Sleep for 10 seconds while cluster Enables
print self.STARTC + " Sleep for 10 seconds while cluster Enables. " + self.ENDC
self.tester.sleep(10)
# Make sure newly registered cluster with a different name is ENABLED
try :
check_cc = self.tester.service_manager.get_all_cluster_controllers(hostname=self.cc.hostname,
state="ENABLED", use_cached_list=False)[0]
print self.STARTC + "Success " + str(check_cc.name) + " ENABLED " + self.ENDC
pass
except Exception, e:
self.fail("Renamed cluster not enabled!")
if __name__ == "__main__":
unittest.main() | shaon/eutester | testcases/cloud_admin/4-0/euca8906.py | Python | bsd-2-clause | 3,119 |
# Copyright 2015, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# mapping from protocol to python class. The protocol matches the string
# used by vttablet as a -binlog_player_protocol parameter.
update_stream_conn_classes = dict()
def register_conn_class(protocol, c):
"""Used by implementations to register themselves.
Args:
protocol: short string to document the protocol.
c: class to register.
"""
update_stream_conn_classes[protocol] = c
def connect(protocol, *pargs, **kargs):
"""Return a dialed UpdateStreamConnection to an update stream server.
Args:
protocol: The registered protocol to use.
*pargs: Passed to the registered protocol __init__ method.
**kargs: Passed to the registered protocol __init__ method.
Returns:
A dialed UpdateStreamConnection.
Raises:
ValueError: On bad protocol.
"""
if protocol not in update_stream_conn_classes:
raise ValueError('Unknown update stream protocol', protocol)
conn = update_stream_conn_classes[protocol](*pargs, **kargs)
conn.dial()
return conn
class StreamEvent(object):
"""StreamEvent describes a single event in the update stream.
Eventually we will use the proto3 definition object.
"""
ERR = 0
DML = 1
DDL = 2
POS = 3
def __init__(self, category, table_name, fields, rows, sql, timestamp,
transaction_id):
self.category = category
self.table_name = table_name
self.fields = fields
self.rows = rows
self.sql = sql
self.timestamp = timestamp
self.transaction_id = transaction_id
class UpdateStreamConnection(object):
"""The interface for the update stream client implementations.
All implementations must implement all these methods. If something
goes wrong with the connection, this object will be thrown out.
"""
def __init__(self, addr, timeout):
"""Initialize an update stream connection.
Args:
addr: server address. Can be protocol dependent.
timeout: connection timeout (float, in seconds).
"""
pass
def dial(self):
"""Dial to the server. If successful, call close() to close the connection.
"""
pass
def close(self):
"""Close the connection. This object may be re-used again by calling dial().
"""
pass
def is_closed(self):
"""Checks the connection status.
Returns:
True if this connection is closed.
"""
pass
def stream_update(self, position, timeout=3600.0):
"""Generator method to stream the updates from a given replication point.
Args:
position: Starting position to stream from.
timeout: Should stop streaming after we reach this timeout.
Returns:
This is a generator method that yields StreamEvent objects.
"""
pass
| danielmt/vshard | vendor/github.com/youtube/vitess/py/vtdb/update_stream.py | Python | mit | 2,847 |
from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ModelResource
from userprofile.models import UserProfile
class UserResource(ModelResource):
class Meta:
queryset = User.objects.all()
resource_name = 'user'
fields = ['id','username']
allowed_methods = ['get']
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
queryset = UserProfile.objects.all()
resource_name = 'userprofile' | peasnrice/pamplemousse | userprofile/api.py | Python | mit | 542 |
#
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django import forms
from django.utils.translation import gettext_lazy as _
class TOSForm(forms.Form):
confirm = forms.BooleanField(
label=_("I agree with the Terms of Service document"), required=True
)
next = forms.CharField(required=False, widget=forms.HiddenInput)
| phw/weblate | weblate/legal/forms.py | Python | gpl-3.0 | 1,055 |
import urllib.request
import simplejson
from book import Book # right? it's in this package
__author__ = 'braxton'
class Lookup(object):
def __init__(self):
self.lookup_url = "http://openlibrary.org/api/books?bibkeys="
self.search_url = "http://openlibrary.org/search.json?"
def _get_publisher_from_json_dict(self, data):
"""
Get the publisher data from a JSON dict.
@param data: a JSON dictionary
@return: the author
"""
return data.get('name')
def _get_author_from_json_dict(self, data):
"""
Get the author data from a JSON dict.
@param data: a JSON dictionary
@return: the author
"""
return data.get('name', None)
def _get_book_from_json_dict(self, data):
"""
Create a new Book instance based on a JSON dict.
@param data: a JSON dictionary
@return: a new Book instance (sans ISBN)
"""
publishers = ', '.join([self._get_publisher_from_json_dict(p) for p in data['publishers']])
authors = ', '.join([self._get_author_from_json_dict(a) for a in data['authors']])
book = Book(0) # better to create an object, even if there's no valid barcode yet
book.title = data.get('title', None)
book.publisher = publishers
book.authors = authors
book.pages = data.get('number_of_pages', None) # might cause issue, be careful.
book.publ_year = data.get('publish_date', None)
book.description = data.get('excerpts', None)
return book
@classmethod
def choose_item(cls, items, choice):
"""
Choose a book from the list returned from searching.
@param items: the list of items to choose from
@param choice: the choice to pull and format
@return: the selected item formatted from the list
"""
for book in items.get('items', []):
if book['id'] == choice:
return cls._get_book_from_json_dict(book)
else:
pass
def by_isbn(self, isbn):
"""
Search for one book on OpenLibrary by ISBN
@param isbn: the book's ISBN to retrieve
@return: a dict containing data from that book.
"""
if len(isbn) != 10 and len(isbn) != 13:
raise ValueError
url = urllib.request.urlopen(self.lookup_url+"ISBN"+":%s&jscmd=data&format=json" % isbn)
data = simplejson.load(url)['%s:%s' % ("ISBN", isbn)]
book = self._get_book_from_json_dict(data)
book.isbn = isbn
return book
def by_title(self, title):
"""
Search for a book on OpenLibrary by title
@param title: the title to search for
@return: the raw data of all results
"""
title = title.replace(' ', '+').lower()
url = urllib.request.urlopen(self.search_url+'title='+title)
data = simplejson.load(url)['docs']
for result in data:
book = Book(0)
book.title = result['title']
try:
book.authors = ', '.join(result['author_name']) if isinstance(result['publisher'], list) else result['author_name']
except KeyError:
book.authors = "None"
try:
book.publisher = ', '.join(result['publisher']) if isinstance(result['publisher'], list) else result['publisher']
except KeyError:
book.publisher = "No publisher found."
try:
book.publ_year = result['first_publish_year']
except KeyError:
book.publ_year = 0
try:
book.description = ''.join(result['first_sentence'])
except KeyError:
book.description = "No description found."
yield book | bjschafer/pyberry | Pyberry/com/bjschafer/pyberry/lookup.py | Python | gpl-3.0 | 3,861 |
<<<<<<< HEAD
<<<<<<< HEAD
import unittest
import sys
import os
import subprocess
import shutil
from copy import copy
from test.support import (run_unittest, TESTFN, unlink, check_warnings,
captured_stdout, skip_unless_symlink)
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var, _main)
import _osx_support
class TestSysConfig(unittest.TestCase):
def setUp(self):
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._set_uname(('',)*5)
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = sysconfig._CONFIG_VARS, copy(sysconfig._CONFIG_VARS)
self._added_envvars = []
self._changed_envvars = []
for var in ('MACOSX_DEPLOYMENT_TARGET', 'PATH'):
if var in os.environ:
self._changed_envvars.append((var, os.environ[var]))
else:
self._added_envvars.append(var)
def tearDown(self):
sys.path[:] = self.sys_path
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = self._config_vars[0]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(self._config_vars[1])
for var, value in self._changed_envvars:
os.environ[var] = value
for var in self._added_envvars:
os.environ.pop(var, None)
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = os.uname_result(uname)
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = sorted(wanted.items())
scheme = sorted(scheme.items())
self.assertEqual(scheme, wanted)
def test_get_path(self):
# XXX make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertIsInstance(cvars, dict)
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-ppc')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxsize = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-i386')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxsize = maxint
# macbook with fat binaries (fat, universal or fat64)
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3' % arch)
self.assertEqual(get_platform(), 'macosx-10.4-%s' % arch)
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user')
self.assertEqual(get_scheme_names(), wanted)
@skip_unless_symlink
def test_symlink(self):
# On Windows, the EXE needs to know where pythonXY.dll is at so we have
# to add the directory to the path.
if sys.platform == "win32":
os.environ["PATH"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["PATH"])
# Issue 7880
def get(python):
cmd = [python, '-c',
'import sysconfig; print(sysconfig.get_platform())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=os.environ)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue #8759: make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
# the global scheme mirrors the distinction between prefix and
# exec-prefix but not the user scheme, so we have to adapt the paths
# before comparing (issue #9100)
adapt = sys.base_prefix != sys.base_exec_prefix
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
if adapt:
global_path = global_path.replace(sys.exec_prefix, sys.base_prefix)
base = base.replace(sys.exec_prefix, sys.base_prefix)
elif sys.base_prefix != sys.prefix:
# virtual environment? Likewise, we have to adapt the paths
# before comparing
global_path = global_path.replace(sys.base_prefix, sys.prefix)
base = base.replace(sys.base_prefix, sys.prefix)
user_path = get_path(name, 'posix_user')
self.assertEqual(user_path, global_path.replace(base, user, 1))
def test_main(self):
# just making sure _main() runs and returns things in the stdout
with captured_stdout() as output:
_main()
self.assertTrue(len(output.getvalue().split('\n')) > 0)
@unittest.skipIf(sys.platform == "win32", "Does not apply to Windows")
def test_ldshared_value(self):
ldflags = sysconfig.get_config_var('LDFLAGS')
ldshared = sysconfig.get_config_var('LDSHARED')
self.assertIn(ldflags, ldshared)
@unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX")
def test_platform_in_subprocess(self):
my_platform = sysconfig.get_platform()
# Test without MACOSX_DEPLOYMENT_TARGET in the environment
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
# Test with MACOSX_DEPLOYMENT_TARGET in the environment, and
# using a value that is unlikely to be the default one.
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
def test_srcdir(self):
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
self.assertTrue(os.path.isabs(srcdir), srcdir)
self.assertTrue(os.path.isdir(srcdir), srcdir)
if sysconfig._PYTHON_BUILD:
# The python executable has not been installed so srcdir
# should be a full source checkout.
Python_h = os.path.join(srcdir, 'Include', 'Python.h')
self.assertTrue(os.path.exists(Python_h), Python_h)
self.assertTrue(sysconfig._is_python_source_dir(srcdir))
elif os.name == 'posix':
makefile_dir = os.path.dirname(sysconfig.get_makefile_filename())
# Issue #19340: srcdir has been realpath'ed already
makefile_dir = os.path.realpath(makefile_dir)
self.assertEqual(makefile_dir, srcdir)
def test_srcdir_independent_of_cwd(self):
# srcdir should be independent of the current working directory
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
cwd = os.getcwd()
try:
os.chdir('..')
srcdir2 = sysconfig.get_config_var('srcdir')
finally:
os.chdir(cwd)
self.assertEqual(srcdir, srcdir2)
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_deprecation(self):
self.assertWarns(DeprecationWarning,
sysconfig.get_config_var, 'SO')
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_value(self):
with check_warnings(('', DeprecationWarning)):
self.assertEqual(sysconfig.get_config_var('SO'),
sysconfig.get_config_var('EXT_SUFFIX'))
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_in_vars(self):
vars = sysconfig.get_config_vars()
self.assertIsNotNone(vars['SO'])
self.assertEqual(vars['SO'], vars['EXT_SUFFIX'])
class MakefileTests(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith('win'),
'Test is not Windows compatible')
def test_get_makefile_filename(self):
makefile = sysconfig.get_makefile_filename()
self.assertTrue(os.path.isfile(makefile), makefile)
def test_parse_makefile(self):
self.addCleanup(unlink, TESTFN)
with open(TESTFN, "w") as makefile:
print("var1=a$(VAR2)", file=makefile)
print("VAR2=b$(var3)", file=makefile)
print("var3=42", file=makefile)
print("var4=$/invalid", file=makefile)
print("var5=dollar$$5", file=makefile)
vars = sysconfig._parse_makefile(TESTFN)
self.assertEqual(vars, {
'var1': 'ab42',
'VAR2': 'b42',
'var3': 42,
'var4': '$/invalid',
'var5': 'dollar$5',
})
def test_main():
run_unittest(TestSysConfig, MakefileTests)
if __name__ == "__main__":
test_main()
=======
import unittest
import sys
import os
import subprocess
import shutil
from copy import copy
from test.support import (run_unittest, TESTFN, unlink, check_warnings,
captured_stdout, skip_unless_symlink)
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var, _main)
import _osx_support
class TestSysConfig(unittest.TestCase):
def setUp(self):
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._set_uname(('',)*5)
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = sysconfig._CONFIG_VARS, copy(sysconfig._CONFIG_VARS)
self._added_envvars = []
self._changed_envvars = []
for var in ('MACOSX_DEPLOYMENT_TARGET', 'PATH'):
if var in os.environ:
self._changed_envvars.append((var, os.environ[var]))
else:
self._added_envvars.append(var)
def tearDown(self):
sys.path[:] = self.sys_path
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = self._config_vars[0]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(self._config_vars[1])
for var, value in self._changed_envvars:
os.environ[var] = value
for var in self._added_envvars:
os.environ.pop(var, None)
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = os.uname_result(uname)
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = sorted(wanted.items())
scheme = sorted(scheme.items())
self.assertEqual(scheme, wanted)
def test_get_path(self):
# XXX make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertIsInstance(cvars, dict)
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-ppc')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxsize = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-i386')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxsize = maxint
# macbook with fat binaries (fat, universal or fat64)
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3' % arch)
self.assertEqual(get_platform(), 'macosx-10.4-%s' % arch)
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user')
self.assertEqual(get_scheme_names(), wanted)
@skip_unless_symlink
def test_symlink(self):
# On Windows, the EXE needs to know where pythonXY.dll is at so we have
# to add the directory to the path.
if sys.platform == "win32":
os.environ["PATH"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["PATH"])
# Issue 7880
def get(python):
cmd = [python, '-c',
'import sysconfig; print(sysconfig.get_platform())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=os.environ)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue #8759: make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
# the global scheme mirrors the distinction between prefix and
# exec-prefix but not the user scheme, so we have to adapt the paths
# before comparing (issue #9100)
adapt = sys.base_prefix != sys.base_exec_prefix
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
if adapt:
global_path = global_path.replace(sys.exec_prefix, sys.base_prefix)
base = base.replace(sys.exec_prefix, sys.base_prefix)
elif sys.base_prefix != sys.prefix:
# virtual environment? Likewise, we have to adapt the paths
# before comparing
global_path = global_path.replace(sys.base_prefix, sys.prefix)
base = base.replace(sys.base_prefix, sys.prefix)
user_path = get_path(name, 'posix_user')
self.assertEqual(user_path, global_path.replace(base, user, 1))
def test_main(self):
# just making sure _main() runs and returns things in the stdout
with captured_stdout() as output:
_main()
self.assertTrue(len(output.getvalue().split('\n')) > 0)
@unittest.skipIf(sys.platform == "win32", "Does not apply to Windows")
def test_ldshared_value(self):
ldflags = sysconfig.get_config_var('LDFLAGS')
ldshared = sysconfig.get_config_var('LDSHARED')
self.assertIn(ldflags, ldshared)
@unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX")
def test_platform_in_subprocess(self):
my_platform = sysconfig.get_platform()
# Test without MACOSX_DEPLOYMENT_TARGET in the environment
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
# Test with MACOSX_DEPLOYMENT_TARGET in the environment, and
# using a value that is unlikely to be the default one.
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
def test_srcdir(self):
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
self.assertTrue(os.path.isabs(srcdir), srcdir)
self.assertTrue(os.path.isdir(srcdir), srcdir)
if sysconfig._PYTHON_BUILD:
# The python executable has not been installed so srcdir
# should be a full source checkout.
Python_h = os.path.join(srcdir, 'Include', 'Python.h')
self.assertTrue(os.path.exists(Python_h), Python_h)
self.assertTrue(sysconfig._is_python_source_dir(srcdir))
elif os.name == 'posix':
makefile_dir = os.path.dirname(sysconfig.get_makefile_filename())
# Issue #19340: srcdir has been realpath'ed already
makefile_dir = os.path.realpath(makefile_dir)
self.assertEqual(makefile_dir, srcdir)
def test_srcdir_independent_of_cwd(self):
# srcdir should be independent of the current working directory
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
cwd = os.getcwd()
try:
os.chdir('..')
srcdir2 = sysconfig.get_config_var('srcdir')
finally:
os.chdir(cwd)
self.assertEqual(srcdir, srcdir2)
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_deprecation(self):
self.assertWarns(DeprecationWarning,
sysconfig.get_config_var, 'SO')
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_value(self):
with check_warnings(('', DeprecationWarning)):
self.assertEqual(sysconfig.get_config_var('SO'),
sysconfig.get_config_var('EXT_SUFFIX'))
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_in_vars(self):
vars = sysconfig.get_config_vars()
self.assertIsNotNone(vars['SO'])
self.assertEqual(vars['SO'], vars['EXT_SUFFIX'])
class MakefileTests(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith('win'),
'Test is not Windows compatible')
def test_get_makefile_filename(self):
makefile = sysconfig.get_makefile_filename()
self.assertTrue(os.path.isfile(makefile), makefile)
def test_parse_makefile(self):
self.addCleanup(unlink, TESTFN)
with open(TESTFN, "w") as makefile:
print("var1=a$(VAR2)", file=makefile)
print("VAR2=b$(var3)", file=makefile)
print("var3=42", file=makefile)
print("var4=$/invalid", file=makefile)
print("var5=dollar$$5", file=makefile)
vars = sysconfig._parse_makefile(TESTFN)
self.assertEqual(vars, {
'var1': 'ab42',
'VAR2': 'b42',
'var3': 42,
'var4': '$/invalid',
'var5': 'dollar$5',
})
def test_main():
run_unittest(TestSysConfig, MakefileTests)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import unittest
import sys
import os
import subprocess
import shutil
from copy import copy
from test.support import (run_unittest, TESTFN, unlink, check_warnings,
captured_stdout, skip_unless_symlink)
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var, _main)
import _osx_support
class TestSysConfig(unittest.TestCase):
def setUp(self):
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._set_uname(('',)*5)
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = sysconfig._CONFIG_VARS, copy(sysconfig._CONFIG_VARS)
self._added_envvars = []
self._changed_envvars = []
for var in ('MACOSX_DEPLOYMENT_TARGET', 'PATH'):
if var in os.environ:
self._changed_envvars.append((var, os.environ[var]))
else:
self._added_envvars.append(var)
def tearDown(self):
sys.path[:] = self.sys_path
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = self._config_vars[0]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(self._config_vars[1])
for var, value in self._changed_envvars:
os.environ[var] = value
for var in self._added_envvars:
os.environ.pop(var, None)
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = os.uname_result(uname)
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = sorted(wanted.items())
scheme = sorted(scheme.items())
self.assertEqual(scheme, wanted)
def test_get_path(self):
# XXX make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertIsInstance(cvars, dict)
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-ppc')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxsize = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-i386')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxsize = maxint
# macbook with fat binaries (fat, universal or fat64)
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3' % arch)
self.assertEqual(get_platform(), 'macosx-10.4-%s' % arch)
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user')
self.assertEqual(get_scheme_names(), wanted)
@skip_unless_symlink
def test_symlink(self):
# On Windows, the EXE needs to know where pythonXY.dll is at so we have
# to add the directory to the path.
if sys.platform == "win32":
os.environ["PATH"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["PATH"])
# Issue 7880
def get(python):
cmd = [python, '-c',
'import sysconfig; print(sysconfig.get_platform())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=os.environ)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue #8759: make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
# the global scheme mirrors the distinction between prefix and
# exec-prefix but not the user scheme, so we have to adapt the paths
# before comparing (issue #9100)
adapt = sys.base_prefix != sys.base_exec_prefix
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
if adapt:
global_path = global_path.replace(sys.exec_prefix, sys.base_prefix)
base = base.replace(sys.exec_prefix, sys.base_prefix)
elif sys.base_prefix != sys.prefix:
# virtual environment? Likewise, we have to adapt the paths
# before comparing
global_path = global_path.replace(sys.base_prefix, sys.prefix)
base = base.replace(sys.base_prefix, sys.prefix)
user_path = get_path(name, 'posix_user')
self.assertEqual(user_path, global_path.replace(base, user, 1))
def test_main(self):
# just making sure _main() runs and returns things in the stdout
with captured_stdout() as output:
_main()
self.assertTrue(len(output.getvalue().split('\n')) > 0)
@unittest.skipIf(sys.platform == "win32", "Does not apply to Windows")
def test_ldshared_value(self):
ldflags = sysconfig.get_config_var('LDFLAGS')
ldshared = sysconfig.get_config_var('LDSHARED')
self.assertIn(ldflags, ldshared)
@unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX")
def test_platform_in_subprocess(self):
my_platform = sysconfig.get_platform()
# Test without MACOSX_DEPLOYMENT_TARGET in the environment
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
# Test with MACOSX_DEPLOYMENT_TARGET in the environment, and
# using a value that is unlikely to be the default one.
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
def test_srcdir(self):
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
self.assertTrue(os.path.isabs(srcdir), srcdir)
self.assertTrue(os.path.isdir(srcdir), srcdir)
if sysconfig._PYTHON_BUILD:
# The python executable has not been installed so srcdir
# should be a full source checkout.
Python_h = os.path.join(srcdir, 'Include', 'Python.h')
self.assertTrue(os.path.exists(Python_h), Python_h)
self.assertTrue(sysconfig._is_python_source_dir(srcdir))
elif os.name == 'posix':
makefile_dir = os.path.dirname(sysconfig.get_makefile_filename())
# Issue #19340: srcdir has been realpath'ed already
makefile_dir = os.path.realpath(makefile_dir)
self.assertEqual(makefile_dir, srcdir)
def test_srcdir_independent_of_cwd(self):
# srcdir should be independent of the current working directory
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
cwd = os.getcwd()
try:
os.chdir('..')
srcdir2 = sysconfig.get_config_var('srcdir')
finally:
os.chdir(cwd)
self.assertEqual(srcdir, srcdir2)
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_deprecation(self):
self.assertWarns(DeprecationWarning,
sysconfig.get_config_var, 'SO')
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_value(self):
with check_warnings(('', DeprecationWarning)):
self.assertEqual(sysconfig.get_config_var('SO'),
sysconfig.get_config_var('EXT_SUFFIX'))
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_in_vars(self):
vars = sysconfig.get_config_vars()
self.assertIsNotNone(vars['SO'])
self.assertEqual(vars['SO'], vars['EXT_SUFFIX'])
class MakefileTests(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith('win'),
'Test is not Windows compatible')
def test_get_makefile_filename(self):
makefile = sysconfig.get_makefile_filename()
self.assertTrue(os.path.isfile(makefile), makefile)
def test_parse_makefile(self):
self.addCleanup(unlink, TESTFN)
with open(TESTFN, "w") as makefile:
print("var1=a$(VAR2)", file=makefile)
print("VAR2=b$(var3)", file=makefile)
print("var3=42", file=makefile)
print("var4=$/invalid", file=makefile)
print("var5=dollar$$5", file=makefile)
vars = sysconfig._parse_makefile(TESTFN)
self.assertEqual(vars, {
'var1': 'ab42',
'VAR2': 'b42',
'var3': 42,
'var4': '$/invalid',
'var5': 'dollar$5',
})
def test_main():
run_unittest(TestSysConfig, MakefileTests)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| ArcherSys/ArcherSys | Lib/test/test_sysconfig.py | Python | mit | 50,780 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.project_info.tasks.dependencies import Dependencies
from pants.backend.project_info.tasks.depmap import Depmap
from pants.backend.project_info.tasks.eclipse_gen import EclipseGen
from pants.backend.project_info.tasks.ensime_gen import EnsimeGen
from pants.backend.project_info.tasks.export import Export
from pants.backend.project_info.tasks.filedeps import FileDeps
from pants.backend.project_info.tasks.idea_gen import IdeaGen
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
pass
# TODO https://github.com/pantsbuild/pants/issues/604 register_goals
def register_goals():
# IDE support.
task(name='idea', action=IdeaGen).install()
task(name='eclipse', action=EclipseGen).install()
task(name='ensime', action=EnsimeGen).install()
task(name='export', action=Export).install()
task(name='depmap', action=Depmap).install()
task(name='dependencies', action=Dependencies).install()
task(name='filedeps', action=FileDeps).install('filedeps')
| jtrobec/pants | src/python/pants/backend/project_info/register.py | Python | apache-2.0 | 1,316 |
import matplotlib.pylab as plt
import numpy as np
fig1 = plt.figure(figsize=(8,6))
ax = fig1.add_subplot(111)
ax.plot(np.arange(0,100), np.arange(0,100), 'b-', linewidth =2)
plt.tight_layout()
plt.show | bps10/emmetrop | doc/source/pyplots/AxisFormatDemo1.py | Python | mit | 209 |
'''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_git_credentials_dialog.py
'''
from PyQt5 import QtWidgets
import wb_dialog_bases
class WbGitCredentialsDialog(wb_dialog_bases.WbDialog):
def __init__( self, app, parent ):
self.app = app
super().__init__( parent )
self.setWindowTitle( T_('Git Credentials - %s') % (' '.join( app.app_name_parts ),) )
self.url = QtWidgets.QLabel()
self.username = QtWidgets.QLineEdit()
self.password = QtWidgets.QLineEdit()
self.password.setEchoMode( self.password.Password )
self.username.textChanged.connect( self.nameTextChanged )
self.password.textChanged.connect( self.nameTextChanged )
em = self.fontMetrics().width( 'M' )
self.addRow( T_('URL'), self.url )
self.addRow( T_('Username'), self.username, min_width=50*em )
self.addRow( T_('Password'), self.password )
self.addButtons()
def nameTextChanged( self, text ):
self.ok_button.setEnabled( self.getUsername() != '' and self.getPassword() != '' )
def setFields( self, url, username=None ):
self.url.setText( url )
if username is not None:
self.username.setReadOnly( True )
self.username.setText( username )
self.password.setFocus()
else:
self.username.setFocus()
def getUsername( self ):
return self.username.text().strip()
def getPassword( self ):
return self.password.text().strip()
if __name__ == '__main__':
def T_(s):
return s
def S_(s, p, n):
if n == 1:
return s
else:
return p
app = QtWidgets.QApplication( ['foo'] )
cred = WbGitCredentialsDialog( None, None )
cred.setFields( 'http://fred.com/foo', 'bob' )
if cred.exec_():
print( cred.getUsername() )
print( cred.getPassword() )
del cred
del app
| barry-scott/git-workbench | Source/Git/wb_git_credentials_dialog.py | Python | apache-2.0 | 2,228 |
'''
Created on Aug 26, 2014
@author: preethi
'''
import os
import sys
import shutil
sys.path.insert(0,os.path.abspath(os.path.dirname(__file__) + '/' + '../..')) #trick to make it run from CLI
import unittest
import pydot
from jnpr.openclos.model import Device, InterfaceDefinition
from jnpr.openclos.writer import ConfigWriter, CablingPlanWriter
from test_model import createPod, createPodDevice
from test_dao import InMemoryDao
class TestWriterBase(unittest.TestCase):
def setUp(self):
self._conf = {}
self._conf['outputDir'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'out')
self._conf['DOT'] = {'ranksep' : '5 equally', 'colors': ['red', 'green', 'blue']}
self._conf['deviceFamily'] = {
"qfx-5100-24q-2p": {
"ports": 'et-0/0/[0-23]'
},
"qfx-5100-48s-6q": {
"uplinkPorts": 'et-0/0/[48-53]',
"downlinkPorts": 'xe-0/0/[0-47]'
}
}
self._dao = InMemoryDao.getInstance()
''' Deletes 'out' folder under test dir'''
shutil.rmtree(self._conf['outputDir'], ignore_errors=True)
def tearDown(self):
''' Deletes 'out' folder under test dir'''
shutil.rmtree(self._conf['outputDir'], ignore_errors=True)
InMemoryDao._destroy()
class TestConfigWriter(TestWriterBase):
def testWriteConfigInFile(self):
from jnpr.openclos.model import DeviceConfig
self._conf['writeConfigInFile'] = True
with self._dao.getReadWriteSession() as session:
pod = createPod('pod1', session)
device = Device('test_device', "",'admin', 'admin', 'spine', "", "", pod)
device.config = DeviceConfig(device.id, "dummy config")
configWriter = ConfigWriter(self._conf, pod, self._dao)
configWriter.write(device)
self.assertTrue(os.path.exists(os.path.join(configWriter.outputDir, device.id+'__test_device.conf')))
class TestCablingPlanWriter(TestWriterBase):
def testInitWithTemplate(self):
from jinja2 import TemplateNotFound
with self._dao.getReadWriteSession() as session:
pod = createPod('pod1', session)
cablingPlanWriter = CablingPlanWriter(self._conf, pod, self._dao)
self.assertIsNotNone(cablingPlanWriter.template)
with self.assertRaises(TemplateNotFound) as e:
cablingPlanWriter.templateEnv.get_template('unknown-template')
self.assertTrue('unknown-template' in e.exception.message)
def testCreateDeviceInGraph(self):
testDeviceTopology = pydot.Dot(graph_type='graph', )
with self._dao.getReadWriteSession() as session:
pod = createPod('pod1', session)
cablingPlanWriter = CablingPlanWriter(self._conf, pod, self._dao)
device = createPodDevice(session, 'Preethi', pod)
device.id = 'preethi-1'
cablingPlanWriter.createDeviceInGraph(device.name, device, testDeviceTopology)
path = cablingPlanWriter.outputDir + '/testDevicelabel.dot'
testDeviceTopology.write_raw(path)
data = open(path, 'r').read()
#check the generated label for device
self.assertTrue('"preethi-1"' in data and 'label=Preethi' in data)
def testcreateLinksInGraph(self):
testLinksInTopology = pydot.Dot(graph_type='graph')
with self._dao.getReadWriteSession() as session:
pod = createPod('pod1', session)
cablingPlanWriter = CablingPlanWriter(self._conf, pod, self._dao)
deviceOne = Device('spine01','qfx-5100-24q-2p', 'admin', 'admin', 'spine', "", "", pod)
deviceOne.id = 'spine01'
IF1 = InterfaceDefinition('IF1', deviceOne, 'downlink')
IF1.id = 'IF1'
deviceTwo = Device('leaf01','qfx-5100-48s-6q', 'admin', 'admin', 'leaf', "", "", pod)
deviceTwo.id = 'leaf01'
IF21 = InterfaceDefinition('IF1', deviceTwo, 'uplink')
IF21.id = 'IF21'
IF1.peer = IF21
IF21.peer = IF1
linkLabel = {deviceOne.id + ':' + IF1.id : deviceTwo.id + ':' + IF21.id}
cablingPlanWriter.createLinksInGraph(linkLabel, testLinksInTopology, 'red')
path = cablingPlanWriter.outputDir + '/testLinklabel.dot'
testLinksInTopology.write_raw(path)
data = open(path, 'r').read()
#check generated label for links
self.assertTrue('spine01:IF1 -- leaf01:IF21 [color=red];' in data)
def testcreateDOTFile(self):
# create pod
# create device
#create interface
with self._dao.getReadWriteSession() as session:
pod = createPod('pod1', session)
cablingPlanWriter = CablingPlanWriter(self._conf, pod, self._dao)
#check the DOT file is generated
cablingPlanWriter.writeDOT()
data = open(cablingPlanWriter.outputDir + '/cablingPlan.dot', 'r').read()
#check generated label for links
self.assertTrue('splines=polyline;' in data)
| yunli2004/OpenClos | jnpr/openclos/tests/unit/test_writer.py | Python | apache-2.0 | 5,203 |
import sys
import time
import yappi
def generate_func(func_name, code):
code = """def {0}(*args, **kwargs): {1}""".format(func_name, code)
exec(code, globals(), locals())
func = locals()[func_name]
globals()[func.__name__] = func
return func
print("Generating functions...")
FUNC_COUNT = int(sys.argv[1])
MAX_STACK_DEPTH = int(sys.argv[2])
top_level_funcs = []
# todo: generate functions that are N stack depth
for i in range(FUNC_COUNT):
func = generate_func('func_{0}'.format(i), "pass")
for k in range(MAX_STACK_DEPTH):
func = generate_func('func_{0}_{1}'.format(i, k), func.__name__ + '()')
top_level_funcs.append(func)
#print(globals())
print("Calling functions...")
t0 = time.time()
#yappi.start()
for f in top_level_funcs:
f(i)
print("Elapsed %0.6f secs" % (time.time() - t0))
#yappi.get_func_stats().print_all()
| sumerc/yappi | tests/manual/_test_performance2.py | Python | mit | 872 |
from modules.module import Module
import random
class DiceRoller(Module):
def __init__(self, client):
Module.__init__(self, 'DiceRoller', client)
def get_commands(self):
return {'!roll': self.roll_dice}
async def roll_dice(self, message, content):
minval = 1
maxval = 100
num_dice = 1
name = message.author.name
if len(content) > 1:
args = content[1].split('x')
try:
maxval = int(args[0])
if maxval < 1:
return
except TypeError:
return
if len(args) > 1:
try:
num_dice = int(args[1])
if num_dice < 1:
return
elif num_dice > 20:
responses = ['I refuse to roll this many dice.',
'I don\'t have time for this.',
'No.',
'Absolutely not.',
'Don\'t.',
'Stop.',
'Please reconsider.',
'Nope.',
'ERROR: EXCESSIVE_DICE_DETECTED']
await self.client.send_message(message.channel, random.choice(responses))
return
except TypeError:
return
results = []
for i in range(0, num_dice):
val = random.randint(minval, maxval)
results.append(val)
response = ''
if num_dice == 1:
response = '%s has rolled a **%d**.' % (name, results[0])
else:
liststr = ', '.join(map(lambda x : str(x), results))
response = '%s has rolled a **%d**. (%s)' % (name, sum(results), liststr)
await self.client.send_message(message.channel, response)
| chudooder/BotDooder | modules/diceroller.py | Python | gpl-3.0 | 1,938 |
# -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019 RERO
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test utils."""
import os
from datetime import datetime
from rero_ils.modules.patron_types.api import PatronType
from rero_ils.modules.patrons.api import Patron
from rero_ils.modules.utils import add_years, extracted_data_from_ref, \
get_endpoint_configuration, get_schema_for_resource, read_json_record
from rero_ils.utils import get_current_language, language_iso639_2to1, \
language_mapping, unique_list
def test_unique_list():
"""Test unicity of list."""
list = ['python', 'snail', 'python', 'snail']
assert ['python', 'snail'] == unique_list(list)
def test_read_json_record(request):
"""Test IlsRecord PID after validation failed"""
file_name = os.path.join(request.fspath.dirname, '..', 'data',
'documents.json')
with open(file_name) as json_file:
count = 0
for record in read_json_record(json_file):
count += 1
assert record.get('pid') == str(count)
assert count == 2
def test_add_years():
"""Test adding years to a date."""
initial_date = datetime.now()
one_year_later = add_years(initial_date, 1)
assert initial_date.year == one_year_later.year - 1
initial_date = datetime.strptime('2020-02-29', '%Y-%m-%d')
tow_years_later = add_years(initial_date, 2)
four_years_later = add_years(initial_date, 4)
assert tow_years_later.month == 3 and tow_years_later.day == 1
assert four_years_later.month == initial_date.month and \
four_years_later.day == initial_date.day
def test_get_schema_for_resources(app):
"""Test get_schemas_for_resource function."""
json_schema = 'https://bib.rero.ch/schemas/patrons/patron-v0.0.1.json'
assert get_schema_for_resource(Patron) == json_schema
assert get_schema_for_resource('ptrn') == json_schema
def test_get_endpoint_configuration(app):
"""Test get_endpoint_configuration."""
assert get_endpoint_configuration('loc')['pid_type'] == 'loc'
assert get_endpoint_configuration('locations')['pid_type'] == 'loc'
assert get_endpoint_configuration(PatronType)['pid_type'] == 'ptty'
assert get_endpoint_configuration('dummy') is None
def test_extract_data_from_ref(app, patron_sion_data,
patron_type_grown_sion):
"""Test extract_data_from_ref."""
# Check real data
ptty = patron_sion_data['patron']['type']
assert extracted_data_from_ref(ptty, data='pid') == 'ptty4'
assert extracted_data_from_ref(ptty, data='resource') == 'patron_types'
assert extracted_data_from_ref(ptty, data='record_class') == PatronType
ptty_record = extracted_data_from_ref(ptty, data='record')
assert ptty_record.pid == patron_type_grown_sion.pid
assert extracted_data_from_ref(ptty, data='es_record')['pid'] == 'ptty4'
# check dummy data
assert extracted_data_from_ref('dummy_data', data='pid') is None
assert extracted_data_from_ref('dummy_data', data='resource') is None
assert extracted_data_from_ref('dummy_data', data='record_class') is None
assert extracted_data_from_ref('dummy_data', data='record') is None
assert extracted_data_from_ref(ptty, data='dummy') is None
assert extracted_data_from_ref('dummy_data', data='es_record') is None
def test_current_language(app):
"""Test current language."""
# Just test this function return otherwise than None
assert get_current_language()
def test_language_iso639_2to1(app):
"""Test convert MARC language code to language."""
assert language_iso639_2to1('eng') == 'en'
assert language_iso639_2to1('fre') == 'fr'
assert language_iso639_2to1('ger') == 'de'
assert language_iso639_2to1('ita') == 'it'
# default language
assert language_iso639_2to1('rus') == 'en'
def test_language_mapping(app):
"""Test language mapping."""
assert 'fre' == language_mapping('fre')
assert 'dut' == language_mapping('dum')
| rero/reroils-app | tests/unit/test_utils.py | Python | gpl-2.0 | 4,590 |
class Solution:
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
if target >= letters[-1] or target < letters[0]: return letters[0]
for i in range(1, len(letters)):
if target < letters[i]:
return letters[i] | YiqunPeng/Leetcode-pyq | solutions/744FindSmallestLetterGreaterThanTarget.py | Python | gpl-3.0 | 364 |
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2006, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Implementation of generic PDF objects (dictionary, number, string, and so on)
"""
__author__ = "Mathieu Fenniak"
__author_email__ = "[email protected]"
import re
from utils import readNonWhitespace, RC4_encrypt
import filters
import utils
import decimal
import codecs
def readObject(stream, pdf):
tok = stream.read(1)
stream.seek(-1, 1) # reset to start
if tok == 't' or tok == 'f':
# boolean object
return BooleanObject.readFromStream(stream)
elif tok == '(':
# string object
return readStringFromStream(stream)
elif tok == '/':
# name object
return NameObject.readFromStream(stream)
elif tok == '[':
# array object
return ArrayObject.readFromStream(stream, pdf)
elif tok == 'n':
# null object
return NullObject.readFromStream(stream)
elif tok == '<':
# hexadecimal string OR dictionary
peek = stream.read(2)
stream.seek(-2, 1) # reset to start
if peek == '<<':
return DictionaryObject.readFromStream(stream, pdf)
else:
return readHexStringFromStream(stream)
elif tok == '%':
# comment
while tok not in ('\r', '\n'):
tok = stream.read(1)
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
return readObject(stream, pdf)
else:
# number object OR indirect reference
if tok == '+' or tok == '-':
# number
return NumberObject.readFromStream(stream)
peek = stream.read(20)
stream.seek(-len(peek), 1) # reset to start
if re.match(r"(\d+)\s(\d+)\sR[^a-zA-Z]", peek) != None:
return IndirectObject.readFromStream(stream, pdf)
else:
return NumberObject.readFromStream(stream)
class PdfObject(object):
def getObject(self):
"""Resolves indirect references."""
return self
class NullObject(PdfObject):
def writeToStream(self, stream, encryption_key):
stream.write("null")
def readFromStream(stream):
nulltxt = stream.read(4)
if nulltxt != "null":
raise utils.PdfReadError, "error reading null object"
readFromStream = staticmethod(readFromStream)
class BooleanObject(PdfObject):
def __init__(self, value):
self.value = value
def writeToStream(self, stream, encryption_key):
if self.value:
stream.write("true")
else:
stream.write("false")
def readFromStream(stream):
word = stream.read(4)
if word == "true":
return BooleanObject(True)
elif word == "fals":
stream.read(1)
return BooleanObject(False)
assert False
readFromStream = staticmethod(readFromStream)
class ArrayObject(list, PdfObject):
def writeToStream(self, stream, encryption_key):
stream.write("[")
for data in self:
stream.write(" ")
data.writeToStream(stream, encryption_key)
stream.write(" ]")
def readFromStream(stream, pdf):
tmp = stream.read(1)
if tmp != "[":
raise utils.PdfReadError, "error reading array"
while True:
# skip leading whitespace
tok = stream.read(1)
while tok.isspace():
tok = stream.read(1)
stream.seek(-1, 1)
# check for array ending
peekahead = stream.read(1)
if peekahead == "]":
break
stream.seek(-1, 1)
# read and append obj
arr.append(readObject(stream, pdf))
return arr
readFromStream = staticmethod(readFromStream)
class IndirectObject(PdfObject):
def __init__(self, idnum, generation, pdf):
self.idnum = idnum
self.generation = generation
self.pdf = pdf
def getObject(self):
return self.pdf.getObject(self).getObject()
def __repr__(self):
return "IndirectObject(%r, %r)" % (self.idnum, self.generation)
def __eq__(self, other):
return (
other != None and
isinstance(other, IndirectObject) and
self.idnum == other.idnum and
self.generation == other.generation and
self.pdf is other.pdf
)
def __ne__(self, other):
return not self.__eq__(other)
def writeToStream(self, stream, encryption_key):
stream.write("%s %s R" % (self.idnum, self.generation))
def readFromStream(stream, pdf):
idnum = ""
while True:
tok = stream.read(1)
if tok.isspace():
break
idnum += tok
generation = ""
while True:
tok = stream.read(1)
if tok.isspace():
break
generation += tok
r = stream.read(1)
if r != "R":
raise utils.PdfReadError("error reading indirect object reference")
return IndirectObject(int(idnum), int(generation), pdf)
readFromStream = staticmethod(readFromStream)
class FloatObject(decimal.Decimal, PdfObject):
def __new__(cls, value="0", context=None):
return decimal.Decimal.__new__(cls, str(value), context)
def __repr__(self):
return str(self)
def writeToStream(self, stream, encryption_key):
stream.write(str(self))
class NumberObject(int, PdfObject):
def __init__(self, value):
int.__init__(self, value)
def writeToStream(self, stream, encryption_key):
stream.write(repr(self))
def readFromStream(stream):
name = ""
while True:
tok = stream.read(1)
if tok != '+' and tok != '-' and tok != '.' and not tok.isdigit():
stream.seek(-1, 1)
break
name += tok
if name.find(".") != -1:
return FloatObject(name)
else:
return NumberObject(name)
readFromStream = staticmethod(readFromStream)
##
# Given a string (either a "str" or "unicode"), create a ByteStringObject or a
# TextStringObject to represent the string.
def createStringObject(string):
if isinstance(string, unicode):
return TextStringObject(string)
elif isinstance(string, str):
if string.startswith(codecs.BOM_UTF16_BE):
retval = TextStringObject(string.decode("utf-16"))
retval.autodetect_utf16 = True
return retval
else:
# This is probably a big performance hit here, but we need to
# convert string objects into the text/unicode-aware version if
# possible... and the only way to check if that's possible is
# to try. Some strings are strings, some are just byte arrays.
try:
retval = TextStringObject(decode_pdfdocencoding(string))
retval.autodetect_pdfdocencoding = True
return retval
except UnicodeDecodeError:
return ByteStringObject(string)
else:
raise TypeError("createStringObject should have str or unicode arg")
def readHexStringFromStream(stream):
stream.read(1)
txt = ""
x = ""
while True:
tok = readNonWhitespace(stream)
if tok == ">":
break
x += tok
if len(x) == 2:
txt += chr(int(x, base=16))
x = ""
if len(x) == 1:
x += "0"
if len(x) == 2:
txt += chr(int(x, base=16))
return createStringObject(txt)
def readStringFromStream(stream):
tok = stream.read(1)
parens = 1
txt = ""
while True:
tok = stream.read(1)
if tok == "(":
parens += 1
elif tok == ")":
parens -= 1
if parens == 0:
break
elif tok == "\\":
tok = stream.read(1)
if tok == "n":
tok = "\n"
elif tok == "r":
tok = "\r"
elif tok == "t":
tok = "\t"
elif tok == "b":
tok == "\b"
elif tok == "f":
tok = "\f"
elif tok == "(":
tok = "("
elif tok == ")":
tok = ")"
elif tok == "\\":
tok = "\\"
elif tok.isdigit():
tok += stream.read(2)
tok = chr(int(tok, base=8))
elif tok in "\n\r":
# This case is hit when a backslash followed by a line
# break occurs. If it's a multi-char EOL, consume the
# second character:
tok = stream.read(1)
if not tok in "\n\r":
stream.seek(-1, 1)
# Then don't add anything to the actual string, since this
# line break was escaped:
tok = ''
else:
raise utils.PdfReadError("Unexpected escaped string")
txt += tok
return createStringObject(txt)
##
# Represents a string object where the text encoding could not be determined.
# This occurs quite often, as the PDF spec doesn't provide an alternate way to
# represent strings -- for example, the encryption data stored in files (like
# /O) is clearly not text, but is still stored in a "String" object.
class ByteStringObject(str, PdfObject):
##
# For compatibility with TextStringObject.original_bytes. This method
# returns self.
original_bytes = property(lambda self: self)
def writeToStream(self, stream, encryption_key):
bytearr = self
if encryption_key:
bytearr = RC4_encrypt(encryption_key, bytearr)
stream.write("<")
stream.write(bytearr.encode("hex"))
stream.write(">")
##
# Represents a string object that has been decoded into a real unicode string.
# If read from a PDF document, this string appeared to match the
# PDFDocEncoding, or contained a UTF-16BE BOM mark to cause UTF-16 decoding to
# occur.
class TextStringObject(unicode, PdfObject):
autodetect_pdfdocencoding = False
autodetect_utf16 = False
##
# It is occasionally possible that a text string object gets created where
# a byte string object was expected due to the autodetection mechanism --
# if that occurs, this "original_bytes" property can be used to
# back-calculate what the original encoded bytes were.
original_bytes = property(lambda self: self.get_original_bytes())
def get_original_bytes(self):
# We're a text string object, but the library is trying to get our raw
# bytes. This can happen if we auto-detected this string as text, but
# we were wrong. It's pretty common. Return the original bytes that
# would have been used to create this object, based upon the autodetect
# method.
if self.autodetect_utf16:
return codecs.BOM_UTF16_BE + self.encode("utf-16be")
elif self.autodetect_pdfdocencoding:
return encode_pdfdocencoding(self)
else:
raise Exception("no information about original bytes")
def writeToStream(self, stream, encryption_key):
# Try to write the string out as a PDFDocEncoding encoded string. It's
# nicer to look at in the PDF file. Sadly, we take a performance hit
# here for trying...
try:
bytearr = encode_pdfdocencoding(self)
except UnicodeEncodeError:
bytearr = codecs.BOM_UTF16_BE + self.encode("utf-16be")
if encryption_key:
bytearr = RC4_encrypt(encryption_key, bytearr)
obj = ByteStringObject(bytearr)
obj.writeToStream(stream, None)
else:
stream.write("(")
for c in bytearr:
if not c.isalnum() and c != ' ':
stream.write("\\%03o" % ord(c))
else:
stream.write(c)
stream.write(")")
class NameObject(str, PdfObject):
delimiterCharacters = "(", ")", "<", ">", "[", "]", "{", "}", "/", "%"
def __init__(self, data):
str.__init__(self, data)
def writeToStream(self, stream, encryption_key):
stream.write(self)
def readFromStream(stream):
name = stream.read(1)
if name != "/":
raise utils.PdfReadError, "name read error"
while True:
tok = stream.read(1)
if tok.isspace() or tok in NameObject.delimiterCharacters:
stream.seek(-1, 1)
break
name += tok
return NameObject(name)
readFromStream = staticmethod(readFromStream)
class DictionaryObject(dict, PdfObject):
def __init__(self, *args, **kwargs):
if len(args) == 0:
self.update(kwargs)
elif len(args) == 1:
arr = args[0]
# If we're passed a list/tuple, make a dict out of it
if not hasattr(arr, "iteritems"):
newarr = {}
for k, v in arr:
newarr[k] = v
arr = newarr
self.update(arr)
else:
raise TypeError("dict expected at most 1 argument, got 3")
def update(self, arr):
# note, a ValueError halfway through copying values
# will leave half the values in this dict.
for k, v in arr.iteritems():
self.__setitem__(k, v)
def raw_get(self, key):
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
if not isinstance(key, PdfObject):
raise ValueError("key must be PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("value must be PdfObject")
return dict.__setitem__(self, key, value)
def setdefault(self, key, value=None):
if not isinstance(key, PdfObject):
raise ValueError("key must be PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("value must be PdfObject")
return dict.setdefault(self, key, value)
def __getitem__(self, key):
return dict.__getitem__(self, key).getObject()
##
# Retrieves XMP (Extensible Metadata Platform) data relevant to the
# this object, if available.
# <p>
# Stability: Added in v1.12, will exist for all future v1.x releases.
# @return Returns a {@link #xmp.XmpInformation XmlInformation} instance
# that can be used to access XMP metadata from the document. Can also
# return None if no metadata was found on the document root.
def getXmpMetadata(self):
metadata = self.get("/Metadata", None)
if metadata == None:
return None
metadata = metadata.getObject()
import xmp
if not isinstance(metadata, xmp.XmpInformation):
metadata = xmp.XmpInformation(metadata)
self[NameObject("/Metadata")] = metadata
return metadata
##
# Read-only property that accesses the {@link
# #DictionaryObject.getXmpData getXmpData} function.
# <p>
# Stability: Added in v1.12, will exist for all future v1.x releases.
xmpMetadata = property(lambda self: self.getXmpMetadata(), None, None)
def writeToStream(self, stream, encryption_key):
stream.write("<<\n")
for key, value in self.items():
key.writeToStream(stream, encryption_key)
stream.write(" ")
value.writeToStream(stream, encryption_key)
stream.write("\n")
stream.write(">>")
def readFromStream(stream, pdf):
tmp = stream.read(2)
if tmp != "<<":
raise utils.PdfReadError, "dictionary read error"
data = {}
while True:
tok = readNonWhitespace(stream)
if tok == ">":
stream.read(1)
break
stream.seek(-1, 1)
key = readObject(stream, pdf)
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
value = readObject(stream, pdf)
if key in data:
# multiple definitions of key not permitted
raise utils.PdfReadError, "multiple definitions in dictionary"
data[key] = value
pos = stream.tell()
s = readNonWhitespace(stream)
if s == 's' and stream.read(5) == 'tream':
eol = stream.read(1)
# odd PDF file output has spaces after 'stream' keyword but before EOL.
# patch provided by Danial Sandler
while eol == ' ':
eol = stream.read(1)
assert eol in ("\n", "\r")
if eol == "\r":
# read \n after
stream.read(1)
# this is a stream object, not a dictionary
assert "/Length" in data
length = data["/Length"]
if isinstance(length, IndirectObject):
t = stream.tell()
length = pdf.getObject(length)
stream.seek(t, 0)
data["__streamdata__"] = stream.read(length)
e = readNonWhitespace(stream)
ndstream = stream.read(8)
if (e + ndstream) != "endstream":
# (sigh) - the odd PDF file has a length that is too long, so
# we need to read backwards to find the "endstream" ending.
# ReportLab (unknown version) generates files with this bug,
# and Python users into PDF files tend to be our audience.
# we need to do this to correct the streamdata and chop off
# an extra character.
pos = stream.tell()
stream.seek(-10, 1)
end = stream.read(9)
if end == "endstream":
# we found it by looking back one character further.
data["__streamdata__"] = data["__streamdata__"][:-1]
else:
stream.seek(pos, 0)
raise utils.PdfReadError, "Unable to find 'endstream' marker after stream."
else:
stream.seek(pos, 0)
if "__streamdata__" in data:
return StreamObject.initializeFromDictionary(data)
else:
retval.update(data)
return retval
readFromStream = staticmethod(readFromStream)
class StreamObject(DictionaryObject):
def __init__(self):
self._data = None
self.decodedSelf = None
def writeToStream(self, stream, encryption_key):
self[NameObject("/Length")] = NumberObject(len(self._data))
DictionaryObject.writeToStream(self, stream, encryption_key)
del self["/Length"]
stream.write("\nstream\n")
data = self._data
if encryption_key:
data = RC4_encrypt(encryption_key, data)
stream.write(data)
stream.write("\nendstream")
def initializeFromDictionary(data):
if "/Filter" in data:
else:
retval._data = data["__streamdata__"]
del data["__streamdata__"]
del data["/Length"]
retval.update(data)
return retval
initializeFromDictionary = staticmethod(initializeFromDictionary)
def flateEncode(self):
if "/Filter" in self:
f = self["/Filter"]
if isinstance(f, ArrayObject):
f.insert(0, NameObject("/FlateDecode"))
else:
newf = ArrayObject()
newf.append(NameObject("/FlateDecode"))
newf.append(f)
f = newf
else:
f = NameObject("/FlateDecode")
retval = EncodedStreamObject()
retval[NameObject("/Filter")] = f
retval._data = filters.FlateDecode.encode(self._data)
return retval
class DecodedStreamObject(StreamObject):
def getData(self):
return self._data
def setData(self, data):
self._data = data
class EncodedStreamObject(StreamObject):
def __init__(self):
self.decodedSelf = None
def getData(self):
if self.decodedSelf:
# cached version of decoded object
return self.decodedSelf.getData()
else:
# create decoded object
decoded = DecodedStreamObject()
decoded._data = filters.decodeStreamData(self)
for key, value in self.items():
if not key in ("/Length", "/Filter", "/DecodeParms"):
decoded[key] = value
self.decodedSelf = decoded
return decoded._data
def setData(self, data):
raise utils.PdfReadError, "Creating EncodedStreamObject is not currently supported"
class RectangleObject(ArrayObject):
def __init__(self, arr):
# must have four points
assert len(arr) == 4
# automatically convert arr[x] into NumberObject(arr[x]) if necessary
ArrayObject.__init__(self, [self.ensureIsNumber(x) for x in arr])
def ensureIsNumber(self, value):
if not isinstance(value, (NumberObject, FloatObject)):
value = FloatObject(value)
return value
def __repr__(self):
return "RectangleObject(%s)" % repr(list(self))
def getLowerLeft_x(self):
return self[0]
def getLowerLeft_y(self):
return self[1]
def getUpperRight_x(self):
return self[2]
def getUpperRight_y(self):
return self[3]
def getUpperLeft_x(self):
return self.getLowerLeft_x()
def getUpperLeft_y(self):
return self.getUpperRight_y()
def getLowerRight_x(self):
return self.getUpperRight_x()
def getLowerRight_y(self):
return self.getLowerLeft_y()
def getLowerLeft(self):
return self.getLowerLeft_x(), self.getLowerLeft_y()
def getLowerRight(self):
return self.getLowerRight_x(), self.getLowerRight_y()
def getUpperLeft(self):
return self.getUpperLeft_x(), self.getUpperLeft_y()
def getUpperRight(self):
return self.getUpperRight_x(), self.getUpperRight_y()
def setLowerLeft(self, value):
self[0], self[1] = [self.ensureIsNumber(x) for x in value]
def setLowerRight(self, value):
self[2], self[1] = [self.ensureIsNumber(x) for x in value]
def setUpperLeft(self, value):
self[0], self[3] = [self.ensureIsNumber(x) for x in value]
def setUpperRight(self, value):
self[2], self[3] = [self.ensureIsNumber(x) for x in value]
lowerLeft = property(getLowerLeft, setLowerLeft, None, None)
lowerRight = property(getLowerRight, setLowerRight, None, None)
upperLeft = property(getUpperLeft, setUpperLeft, None, None)
upperRight = property(getUpperRight, setUpperRight, None, None)
def encode_pdfdocencoding(unicode_string):
retval = ''
for c in unicode_string:
try:
retval += chr(_pdfDocEncoding_rev[c])
except KeyError:
raise UnicodeEncodeError("pdfdocencoding", c, -1, -1,
"does not exist in translation table")
return retval
def decode_pdfdocencoding(byte_array):
retval = u''
for b in byte_array:
c = _pdfDocEncoding[ord(b)]
if c == u'\u0000':
raise UnicodeDecodeError("pdfdocencoding", b, -1, -1,
"does not exist in translation table")
retval += c
return retval
_pdfDocEncoding = (
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u02d8', u'\u02c7', u'\u02c6', u'\u02d9', u'\u02dd', u'\u02db', u'\u02da', u'\u02dc',
u'\u0020', u'\u0021', u'\u0022', u'\u0023', u'\u0024', u'\u0025', u'\u0026', u'\u0027',
u'\u0028', u'\u0029', u'\u002a', u'\u002b', u'\u002c', u'\u002d', u'\u002e', u'\u002f',
u'\u0030', u'\u0031', u'\u0032', u'\u0033', u'\u0034', u'\u0035', u'\u0036', u'\u0037',
u'\u0038', u'\u0039', u'\u003a', u'\u003b', u'\u003c', u'\u003d', u'\u003e', u'\u003f',
u'\u0040', u'\u0041', u'\u0042', u'\u0043', u'\u0044', u'\u0045', u'\u0046', u'\u0047',
u'\u0048', u'\u0049', u'\u004a', u'\u004b', u'\u004c', u'\u004d', u'\u004e', u'\u004f',
u'\u0050', u'\u0051', u'\u0052', u'\u0053', u'\u0054', u'\u0055', u'\u0056', u'\u0057',
u'\u0058', u'\u0059', u'\u005a', u'\u005b', u'\u005c', u'\u005d', u'\u005e', u'\u005f',
u'\u0060', u'\u0061', u'\u0062', u'\u0063', u'\u0064', u'\u0065', u'\u0066', u'\u0067',
u'\u0068', u'\u0069', u'\u006a', u'\u006b', u'\u006c', u'\u006d', u'\u006e', u'\u006f',
u'\u0070', u'\u0071', u'\u0072', u'\u0073', u'\u0074', u'\u0075', u'\u0076', u'\u0077',
u'\u0078', u'\u0079', u'\u007a', u'\u007b', u'\u007c', u'\u007d', u'\u007e', u'\u0000',
u'\u2022', u'\u2020', u'\u2021', u'\u2026', u'\u2014', u'\u2013', u'\u0192', u'\u2044',
u'\u2039', u'\u203a', u'\u2212', u'\u2030', u'\u201e', u'\u201c', u'\u201d', u'\u2018',
u'\u2019', u'\u201a', u'\u2122', u'\ufb01', u'\ufb02', u'\u0141', u'\u0152', u'\u0160',
u'\u0178', u'\u017d', u'\u0131', u'\u0142', u'\u0153', u'\u0161', u'\u017e', u'\u0000',
u'\u20ac', u'\u00a1', u'\u00a2', u'\u00a3', u'\u00a4', u'\u00a5', u'\u00a6', u'\u00a7',
u'\u00a8', u'\u00a9', u'\u00aa', u'\u00ab', u'\u00ac', u'\u0000', u'\u00ae', u'\u00af',
u'\u00b0', u'\u00b1', u'\u00b2', u'\u00b3', u'\u00b4', u'\u00b5', u'\u00b6', u'\u00b7',
u'\u00b8', u'\u00b9', u'\u00ba', u'\u00bb', u'\u00bc', u'\u00bd', u'\u00be', u'\u00bf',
u'\u00c0', u'\u00c1', u'\u00c2', u'\u00c3', u'\u00c4', u'\u00c5', u'\u00c6', u'\u00c7',
u'\u00c8', u'\u00c9', u'\u00ca', u'\u00cb', u'\u00cc', u'\u00cd', u'\u00ce', u'\u00cf',
u'\u00d0', u'\u00d1', u'\u00d2', u'\u00d3', u'\u00d4', u'\u00d5', u'\u00d6', u'\u00d7',
u'\u00d8', u'\u00d9', u'\u00da', u'\u00db', u'\u00dc', u'\u00dd', u'\u00de', u'\u00df',
u'\u00e0', u'\u00e1', u'\u00e2', u'\u00e3', u'\u00e4', u'\u00e5', u'\u00e6', u'\u00e7',
u'\u00e8', u'\u00e9', u'\u00ea', u'\u00eb', u'\u00ec', u'\u00ed', u'\u00ee', u'\u00ef',
u'\u00f0', u'\u00f1', u'\u00f2', u'\u00f3', u'\u00f4', u'\u00f5', u'\u00f6', u'\u00f7',
u'\u00f8', u'\u00f9', u'\u00fa', u'\u00fb', u'\u00fc', u'\u00fd', u'\u00fe', u'\u00ff'
)
assert len(_pdfDocEncoding) == 256
_pdfDocEncoding_rev = {}
for i in xrange(256):
char = _pdfDocEncoding[i]
if char == u"\u0000":
continue
assert char not in _pdfDocEncoding_rev
_pdfDocEncoding_rev[char] = i
| 3dfxsoftware/cbss-addons | openerp_print/pyPdf/generic.py | Python | gpl-2.0 | 28,159 |
# markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from . import odict
from . import inlinepatterns
def build_treeprocessors(md_instance, **kwargs):
""" Build the default treeprocessors for Markdown. """
treeprocessors = odict.OrderedDict()
treeprocessors["inline"] = InlineProcessor(md_instance)
treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
return treeprocessors
def isString(s):
""" Check if it's string """
if not isinstance(s, util.AtomicString):
return isinstance(s, util.string_type)
return False
class Treeprocessor(util.Processor):
"""
Treeprocessors are run on the ElementTree object before serialization.
Each Treeprocessor implements a "run" method that takes a pointer to an
ElementTree, modifies it as necessary and returns an ElementTree
object.
Treeprocessors must extend markdown.Treeprocessor.
"""
def run(self, root):
"""
Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None.
"""
pass
class InlineProcessor(Treeprocessor):
"""
A Treeprocessor that traverses a tree, applying inline patterns.
"""
def __init__(self, md):
self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
self.__placeholder_suffix = util.ETX
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+ len(self.__placeholder_suffix)
self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
self.markdown = md
def __makePlaceholder(self, type):
""" Generate a placeholder """
id = "%04d" % len(self.stashed_nodes)
hash = util.INLINE_PLACEHOLDER % id
return hash, id
def __findPlaceholder(self, data, index):
"""
Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder.
"""
m = self.__placeholder_re.search(data, index)
if m:
return m.group(1), m.end()
else:
return None, index + 1
def __stashNode(self, node, type):
""" Add node to stash """
placeholder, id = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder
def __handleInline(self, data, patternIndex=0):
"""
Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders.
"""
if not isinstance(data, util.AtomicString):
startIndex = 0
while patternIndex < len(self.markdown.inlinePatterns):
data, matched, startIndex = self.__applyPattern(
self.markdown.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex)
if not matched:
patternIndex += 1
return data
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode)
if not isText and node is not subnode:
pos = list(node).index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
def __processPlaceholders(self, data, parent):
"""
Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns.
"""
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
else:
if parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if index != -1:
id, phEndIndex = self.__findPlaceholder(data, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = data[strartIndex:index]
linkText(text)
if not isString(node): # it's Element
for child in [node] + list(node):
if child.tail:
if child.tail.strip():
self.__processElementText(node, child,False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else: # it's just a string
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else: # wrong placeholder
end = index + len(self.__placeholder_prefix)
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
if isinstance(data, util.AtomicString):
# We don't want to loose the AtomicString
text = util.AtomicString(text)
linkText(text)
data = ""
return result
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
"""
Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we start searching
Returns: String with placeholders instead of ElementTree elements.
"""
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if not match:
return data, False, 0
node = pattern.handleMatch(match)
if node is None:
return data, True, len(leftData)+match.span(len(match.groups()))[0]
if not isString(node):
if not isinstance(node.text, util.AtomicString):
# We need to process current node too
for child in [node] + list(node):
if not isString(node):
if child.text:
child.text = self.__handleInline(child.text,
patternIndex + 1)
if child.tail:
child.tail = self.__handleInline(child.tail,
patternIndex)
placeholder = self.__stashNode(node, pattern.type())
return "%s%s%s%s" % (leftData,
match.group(1),
placeholder, match.groups()[-1]), True, 0
def run(self, tree):
"""Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want to process your data with inline paterns, instead of normal string,
use subclass AtomicString:
node.text = markdown.AtomicString("This will not be processed.")
Arguments:
* tree: ElementTree object, representing Markdown tree.
Returns: ElementTree object with applied inline patterns.
"""
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement:
if child.text and not isinstance(child.text, util.AtomicString):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(
text), child)
stack += lst
insertQueue.append((child, lst))
if child.tail:
tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d')
tailResult = self.__processPlaceholders(tail, dumby)
if dumby.text:
child.tail = dumby.text
else:
child.tail = None
pos = list(currElement).index(child) + 1
tailResult.reverse()
for newChild in tailResult:
currElement.insert(pos, newChild)
if list(child):
stack.append(child)
for element, lst in insertQueue:
if self.markdown.enable_attributes:
if element.text and isString(element.text):
element.text = \
inlinepatterns.handleAttributes(element.text,
element)
i = 0
for newChild in lst:
if self.markdown.enable_attributes:
# Processing attributes
if newChild.tail and isString(newChild.tail):
newChild.tail = \
inlinepatterns.handleAttributes(newChild.tail,
element)
if newChild.text and isString(newChild.text):
newChild.text = \
inlinepatterns.handleAttributes(newChild.text,
newChild)
element.insert(i, newChild)
i += 1
return tree
class PrettifyTreeprocessor(Treeprocessor):
""" Add linebreaks to the html document. """
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = "\n"
if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) \
and len(elem) and util.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if util.isBlockLevel(e.tag):
self._prettifyETree(e)
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
def run(self, root):
""" Add linebreaks to ElementTree root object. """
self._prettifyETree(root)
# Do <br />'s seperately as they are often in the middle of
# inline content and missed by _prettifyETree.
brs = root.iter('br')
for br in brs:
if not br.tail or not br.tail.strip():
br.tail = '\n'
else:
br.tail = '\n%s' % br.tail
# Clean up extra empty lines at end of code blocks.
pres = root.iter('pre')
for pre in pres:
if len(pre) and pre[0].tag == 'code':
pre[0].text = pre[0].text.rstrip() + '\n'
| nwjs/chromium.src | third_party/markdown/treeprocessors.py | Python | bsd-3-clause | 14,596 |
import logging
import pytest
from django.test import override_settings
from osmaxx.core.templatetags.navigation import siteabsoluteurl, logger
@override_settings(
OSMAXX=dict(
)
)
def test_siteabsoluteurl_without_secured_proxy_adds_scheme_and_netloc_and_path_prefix(rf, log_warning_mock):
relative_url = 'foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(relative_url, request) == 'http://testserver/another/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
)
)
def test_siteabsoluteurl_without_secured_proxy_adds_scheme_and_netloc(rf, log_warning_mock):
netloc_relative_url = '/foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(netloc_relative_url, request) == 'http://testserver/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
)
)
def test_siteabsoluteurl_without_secured_proxy_adds_scheme(rf, log_warning_mock):
scheme_relative_url = '//example.com/foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(scheme_relative_url, request) == 'http://example.com/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
)
)
def test_siteabsoluteurl_without_secured_proxy_returns_absolute_http_urls_unchanged(rf, log_warning_mock):
absolute_url = 'http://example.com/foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(absolute_url, request) == 'http://example.com/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
)
)
def test_siteabsoluteurl_without_secured_proxy_returns_absolute_https_urls_unchanged(rf, log_warning_mock):
absolute_url = 'https://example.com/foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(absolute_url, request) == 'https://example.com/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
)
)
def test_siteabsoluteurl_without_secured_proxy_returns_absolute_nonhttp_urls_unchanged(rf, log_warning_mock):
absolute_url = 'ftp://example.com/foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(absolute_url, request) == 'ftp://example.com/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
SECURED_PROXY=True,
)
)
def test_siteabsoluteurl_when_secured_proxy_in_use_adds_https_and_netloc_and_path_prefix(rf, log_warning_mock):
relative_url = 'foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(relative_url, request) == 'https://testserver/another/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
SECURED_PROXY=True,
)
)
def test_siteabsoluteurl_when_secured_proxy_in_use_adds_https_and_netloc(rf, log_warning_mock):
netloc_relative_url = '/foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(netloc_relative_url, request) == 'https://testserver/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
SECURED_PROXY=True,
)
)
def test_siteabsoluteurl_when_secured_proxy_in_use_adds_https(rf, log_warning_mock):
scheme_relative_url = '//example.com/foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(scheme_relative_url, request) == 'https://example.com/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
SECURED_PROXY=True,
)
)
def test_siteabsoluteurl_when_secured_proxy_in_use_returns_absolute_http_urls_converted_to_https(rf, log_warning_mock):
absolute_url = 'http://example.com/foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(absolute_url, request) == 'https://example.com/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
SECURED_PROXY=True,
)
)
def test_siteabsoluteurl_when_secured_proxy_in_use_returns_absolute_https_urls_unchanged(rf, log_warning_mock):
absolute_url = 'https://example.com/foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(absolute_url, request) == 'https://example.com/foo/bar'
log_warning_mock.assert_not_called()
@override_settings(
OSMAXX=dict(
SECURED_PROXY=True,
)
)
def test_siteabsoluteurl_when_secured_proxy_in_use_returns_absolute_nonhttp_urls_unchanged(rf, log_warning_mock):
absolute_url = 'ftp://example.com/foo/bar'
request = rf.get('/another/path')
assert siteabsoluteurl(absolute_url, request) == 'ftp://example.com/foo/bar'
log_warning_mock.assert_called_with(
"ftp://example.com/foo/bar has not been converted to HTTPS, because it isn't an HTTP URL.")
@pytest.yield_fixture
def log_warning_mock(mocker):
original_level = logger.level
logger.setLevel(logging.WARNING)
yield mocker.patch.object(logger, 'warning')
logger.setLevel(original_level)
| geometalab/osmaxx | tests/core/templatetags/test_navigation.py | Python | mit | 4,931 |
# Copyright 2008 Peter Bulychev
# http://clonedigger.sourceforge.net
#
# This file is part of Clone Digger.
#
# Clone Digger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Clone Digger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Clone Digger. If not, see <http://www.gnu.org/licenses/>.
import os
import xml.parsers.expat
from abstract_syntax_tree import *
class JsANTLRSourceFile (SourceFile):
extension = 'js'
size_threshold = 5
distance_threshold = 5
def __init__(self, file_name):
SourceFile.__init__(self, file_name)
class ExpatHandler:
def __init__(self, start_node, parent):
self.parent = parent
self.stack = [start_node]
def start_element(expat_self, xml_node_name, attrs):
line_number = int(attrs["line_number"])-1
line_numbers = [line_number]
if line_numbers == [-1]:
line_numbers = []
name = attrs["name"]
r = AbstractSyntaxTree(name, line_numbers, self)
if xml_node_name == "statement_node":
#if name in ["CALL", "BLOCK"]:
r.markAsStatement()
else:
assert(xml_node_name == "node")
expat_self.stack[-1].addChild(r)
expat_self.stack.append(r)
def end_element(self, name):
self.stack.pop()
tree_file_name = 'temporary_ast.xml'
producer_class_path = os.path.join('.','js_antlr', 'TreeProducer.jar')
antlr_class_path = os.path.join('.','antlr_runtime', 'antlr-3.1.1.jar')
if os.name in ['mac', 'posix']:
class_path_delimeter = ':'
elif os.name in ['nt', 'dos', 'ce']:
class_path_delimeter = ';'
else:
print 'unsupported OS'
assert(0)
if os.system('java -classpath ' + producer_class_path + class_path_delimeter + antlr_class_path + ' TreeProducer %s %s 2>err.log'%(file_name, tree_file_name)):
f = open('err.log')
s = f.read()
f.close()
raise Exception(s)
f = open('err.log')
s = f.read()
f.close()
if s:
print s
self._tree = AbstractSyntaxTree('program')
handler = ExpatHandler(self._tree, self)
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = handler.start_element
p.EndElementHandler = handler.end_element
f = open(tree_file_name)
p.ParseFile(f)
f.close()
# os.remove(tree_file_name)
| h2oloopan/easymerge | EasyMerge/clonedigger/js_antlr.py | Python | mit | 3,119 |
# -*- coding: utf-8 -*-
import unittest
from coding_challenge import find_related_tags
stream1 = [
'system.load.1|1|host:a,role:web,region:us-east-1a',
'system.load.15|1|host:b,role:web,region:us-east-1b',
'system.cpu.user|20|host:a,role:web,region:us-east-1a',
'postgresql.locks|12|host:c,role:db,db_role:master,region:us-east-1e',
'postgresql.db.count|2|host:d,role:db,db_role:replica,region:us-east-1a',
'kafka.consumer.lag|20000|host:e,role:intake,region:us-east-1a',
'kafka.consumer.offset|3000000|host:e,role:intake,region:us-east-1a',
'kafka.broker.offset|25000|host:f,role:kafka,region:us-east-1a'
]
stream2 = [
"cpu.load|0.5|role:web,env:stag,region:us",
"cpu.load|0.6|role:app,env:prod,region:us",
"cpu.load|0.7|role:web,env:prod,region:eu",
]
class TestCodingChallenge(unittest.TestCase):
def test_find_related_tags(self):
tests = [
(stream2, ["role:web", "env:prod"], ["region:eu"]),
(stream1, ["role:web"], ["host:a", "region:us-east-1a", "host:b", "region:us-east-1b"]),
(stream1, ["role:db", "db_role:master"], ["host:c", "region:us-east-1e"]),
(stream1, ["host:a", "role:web"], ["region:us-east-1a"]),
(stream2, ["role:web", "region:us"], ["env:stag"]),
(stream2, ["role:web", "env:stag", "region:us"], []),
]
for test in tests:
actual = find_related_tags(test[0], test[1])
expected = test[2]
self.assertEqual(actual, expected,
'failed test={} with actual={}'
.format(test, actual))
| topliceanu/learn | interview/datadog/test_coding_challenge.py | Python | mit | 1,633 |
from tokens.andd import And
from tokens.expression import Expression
from tokens.iff import Iff
from tokens.kfalse import ConstantFalse
from tokens.ktrue import ConstantTrue
from tokens.nop import Not
from tokens.orr import Or
from tokens.then import Then
from tokens.variable import Variable
class TokenParser:
"""This parser only works with atomic expressions,
so parenthesis are needed everywhere to group items"""
@staticmethod
def parse_expression(string):
# Separate parenthesis so they're new tokens
# Also convert [ or { to the same parenthesis (
for s in '([{':
string = string.replace(s, ' ( ')
for s in ')]}':
string = string.replace(s, ' ) ')
# Get all operators so we can iterate over them
#
# Note that the order here is important. We first need to replace long
# expressions, such as '<->' with their single character representations.
#
# If we didn't do this, after we tried to separate the tokens from other
# expressions by adding spaces on both sides of the operator, '->' would
# break '<->' turning it into '< ->', which would not be recognised.
#
# We add spaces between the tokens so it's easy to split them and identify them.
# Another way would be to iterate over the string and finding the tokens. Once
# identified, they'd be put, in order, on a different list. However, this is
# not as simple as the currently used approach.
operators = [Iff, Then, Not, Or, And, ConstantTrue, ConstantFalse]
# Find all the representations on the string and add surrounding spaces,
# this will allow us to call 'string.split()' to separate variable names
# from the operators so the user doesn't need to enter them separated
for operator in operators:
for representation in operator.representations:
string = string.replace(representation, ' '+operator.single_char_representation+' ')
# Get all the tokens
words = string.split()
# Store the found nested expressions on the stack
expressions_stack = [Expression()]
for w in words:
done = False
for operator in operators:
# We replaced all the operator with their single character representations. We
# don't need to check whether the current word (representation) is any of the
# available representations for this operator, since it's the single-character one.
if w == operator.single_char_representation:
expressions_stack[-1].add_token(operator())
done = True
break
if done:
pass
elif w == '(':
expressions_stack.append(Expression())
elif w == ')':
e = expressions_stack.pop()
expressions_stack[-1].add_token(e)
else:
expressions_stack[-1].add_token(Variable(w))
# Tokenize the top expression (this will also tokenize its children)
expressions_stack[0].tokenize()
# Return the top expression once it's completely valid
return expressions_stack[0]
| LonamiWebs/Py-Utils | logicmind/token_parser.py | Python | mit | 3,318 |
import os
import functools
import pytest
import lammps
# Redefine Lammps command-line args so no annoying logs or stdout
Lammps = functools.partial(lammps.Lammps, args=[
'-log', 'none',
'-screen', 'none'
])
# Lammps command line arguments
def test_lammps_init_default_units():
lmp = Lammps()
assert lmp.units == 'lj'
def test_lammps_init_set_get_units():
lmp = Lammps(units='metal')
assert lmp.units == 'metal'
def test_lammps_init_invalid_units():
with pytest.raises(ValueError):
Lammps(units='invalid_units')
# MPI_Comm comm (don't want to do mpi tests)
def test_lammps_init_invalid_comm():
with pytest.raises(TypeError):
Lammps(comm='invalid_comm')
# Style
def test_lammps_init_default_style():
lmp = Lammps()
assert lmp.system.style == 'atomic'
def test_lammps_init_set_get_style():
lmp = Lammps(style='full')
assert lmp.system.style == 'full'
def test_lammps_init_invalid_style():
with pytest.raises(ValueError):
Lammps(style='invalid_style')
# version
def test_lammps_version():
lmp = Lammps()
assert isinstance(lmp.__version__, str)
# command
def test_lammps_command():
lmp = Lammps()
lmp.command('timestep 2.0')
assert lmp.dt == 2.0
# file
def test_lammps_file(tmpdir):
tmpfile = tmpdir.join("test_file.in")
tmpfile.write("timestep 1.0\n")
lmp = Lammps()
lmp.file(str(tmpfile))
assert lmp.dt == 1.0
def test_lammps_file_twice(tmpdir):
tmpfile1 = tmpdir.join("test_file1.in")
tmpfile1.write("timestep 1.0\n")
tmpfile2 = tmpdir.join("test_file2.in")
tmpfile2.write("timestep 2.0\n")
lmp = Lammps()
lmp.file(str(tmpfile1))
assert lmp.dt == 1.0
lmp.file(str(tmpfile2))
assert lmp.dt == 2.0
# Run
def test_lammps_run():
# This tests has a dependency of the
# LAMMPS example melt
# dt tested
# time step tested
# time tested
# This is hardly a unit test... (a better way?)
lmp = Lammps()
lmp.file(os.path.join(lammps.__path__[0], 'data', 'melt.in'))
assert lmp.dt == 0.005
assert lmp.time_step == 100
assert lmp.time == lmp.time_step * lmp.dt
# time_step
def test_lammps_default_time_step():
lmp = Lammps()
assert lmp.time_step == 0
def test_lammps_set_get_time_step():
lmp = Lammps()
lmp.time_step = 100
assert lmp.time_step == 100
# dt
def test_lammps_default_dt():
lmp = Lammps()
assert lmp.dt == 0.005
def test_lammps_set_get_dt():
lmp = Lammps()
lmp.dt = 13.0
assert lmp.dt == 13.0
# time
def test_lammps_default_time():
lmp = Lammps()
assert lmp.time == 0.0
# reset
def test_lammps_reset():
lmp = Lammps()
lmp.dt = 13.0
lmp.reset()
assert lmp.dt == 0.005
if __name__ == '__main__':
pytest.main()
| costrouc/lammps-python | lammps/test/test_lammps.py | Python | gpl-3.0 | 2,816 |
# -*- coding: utf-8 -*-
from boxbranding import getBoxType
import struct, socket, fcntl, sys, os, time
from sys import modules
import os
import time
def getVersionString():
return getImageVersionString()
def getImageVersionString():
try:
if os.path.isfile('/var/lib/opkg/status'):
st = os.stat('/var/lib/opkg/status')
else:
st = os.stat('/usr/lib/ipkg/status')
tm = time.localtime(st.st_mtime)
if tm.tm_year >= 2011:
return time.strftime("%Y-%m-%d %H:%M:%S", tm)
except:
pass
return _("unavailable")
def getFlashDateString():
try:
return time.strftime(_("%Y-%m-%d %H:%M"), time.localtime(os.stat("/boot").st_ctime))
except:
return _("unknown")
def getEnigmaVersionString():
import enigma
enigma_version = enigma.getEnigmaVersionString()
if '-(no branch)' in enigma_version:
enigma_version = enigma_version [:-12]
return enigma_version
def getGStreamerVersionString():
import enigma
return enigma.getGStreamerVersionString()
def getKernelVersionString():
try:
f = open("/proc/version","r")
kernelversion = f.read().split(' ', 4)[2].split('-',2)[0]
f.close()
return kernelversion
except:
return _("unknown")
def getDriverInstalledDate():
try:
from glob import glob
driver = [x.split("-")[-2:-1][0][-8:] for x in open(glob("/var/lib/opkg/info/*-dvb-modules-*.control")[0], "r") if x.startswith("Version:")][0]
return "%s-%s-%s" % (driver[:4], driver[4:6], driver[6:])
except:
return _("unknown")
def getChipSetString():
if getBoxType() in ('dm7080','dm820'):
return "7435"
else:
try:
f = open('/proc/stb/info/chipset', 'r')
chipset = f.read()
f.close()
return str(chipset.lower().replace('\n','').replace('bcm','').replace('brcm','').replace('sti',''))
except:
return _("unavailable")
def getModelString():
try:
file = open("/proc/stb/info/boxtype", "r")
model = file.readline().strip()
file.close()
return model
except IOError:
return _("unknown")
def getPythonVersionString():
try:
import commands
status, output = commands.getstatusoutput("python -V")
return output.split(' ')[1]
except:
return _("unknown")
def getCPUString():
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("system type"):
system = splitted[1].split(' ')[0]
elif splitted[0].startswith("Processor"):
system = splitted[1].split(' ')[0]
file.close()
return system
except IOError:
return _("unavailable")
def getCPUSpeedString():
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("cpu MHz"):
speed = splitted[1].split('.')[0]
file.close()
return speed
except IOError:
return _("unavailable")
def getCpuCoresString():
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("processor"):
if int(splitted[1]) > 0:
cores = 2
else:
cores = 1
file.close()
return cores
except IOError:
return _("unavailable")
def getCPUTempString():
try:
if os.path.isfile('/proc/stb/fp/temp_sensor_avs'):
temperature = open("/proc/stb/fp/temp_sensor_avs").readline().replace('\n','')
return _("%s°C") % temperature
except:
pass
return ""
def _ifinfo(sock, addr, ifname):
iface = struct.pack('256s', ifname[:15])
info = fcntl.ioctl(sock.fileno(), addr, iface)
if addr == 0x8927:
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1].upper()
else:
return socket.inet_ntoa(info[20:24])
def getIfConfig(ifname):
ifreq = {'ifname': ifname}
infos = {}
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# offsets defined in /usr/include/linux/sockios.h on linux 2.6
infos['addr'] = 0x8915 # SIOCGIFADDR
infos['brdaddr'] = 0x8919 # SIOCGIFBRDADDR
infos['hwaddr'] = 0x8927 # SIOCSIFHWADDR
infos['netmask'] = 0x891b # SIOCGIFNETMASK
try:
print "in TRYYYYYYY", ifname
for k,v in infos.items():
print infos.items()
ifreq[k] = _ifinfo(sock, v, ifname)
except:
print "IN EXCEEEEEEEEPT", ifname
pass
sock.close()
return ifreq
def getIfTransferredData(ifname):
f = open('/proc/net/dev', 'r')
for line in f:
if ifname in line:
data = line.split('%s:' % ifname)[1].split()
rx_bytes, tx_bytes = (data[0], data[8])
f.close()
return rx_bytes, tx_bytes
def getHardwareTypeString():
try:
if os.path.isfile("/proc/stb/info/boxtype"):
return open("/proc/stb/info/boxtype").read().strip().upper()
if os.path.isfile("/proc/stb/info/azmodel"):
return "AZBOX " + open("/proc/stb/info/azmodel").read().strip().upper() + "(" + open("/proc/stb/info/version").read().strip().upper() + ")"
if os.path.isfile("/proc/stb/info/vumodel"):
return "VU+" + open("/proc/stb/info/vumodel").read().strip().upper() + "(" + open("/proc/stb/info/version").read().strip().upper() + ")"
if os.path.isfile("/proc/stb/info/model"):
return open("/proc/stb/info/model").read().strip().upper()
except:
pass
return _("unavailable")
def getImageTypeString():
try:
return open("/etc/issue").readlines()[-2].capitalize().strip()[:-6]
except:
pass
return _("undefined")
# For modules that do "from About import about"
about = modules[__name__]
| vitmod/dvbapp | lib/python/Components/About.py | Python | gpl-2.0 | 5,538 |
import os
from os.path import exists
import pytest
from pip._internal.locations import write_delete_marker_file
from pip._internal.status_codes import PREVIOUS_BUILD_DIR_ERROR
from tests.lib import need_mercurial
from tests.lib.local_repos import local_checkout
def test_cleanup_after_install(script, data):
"""
Test clean up after installing a package.
"""
script.pip(
'install', '--no-index', '--find-links=%s' % data.find_links, 'simple'
)
build = script.venv_path / "build"
src = script.venv_path / "src"
assert not exists(build), "build/ dir still exists: %s" % build
assert not exists(src), "unexpected src/ dir exists: %s" % src
script.assert_no_temp()
@pytest.mark.network
def test_no_clean_option_blocks_cleaning_after_install(script, data):
"""
Test --no-clean option blocks cleaning after install
"""
build = script.base_path / 'pip-build'
script.pip(
'install', '--no-clean', '--no-index', '--build', build,
'--find-links=%s' % data.find_links, 'simple', expect_temp=True,
)
assert exists(build)
@pytest.mark.network
@need_mercurial
def test_cleanup_after_install_editable_from_hg(script, tmpdir):
"""
Test clean up after cloning from Mercurial.
"""
script.pip(
'install',
'-e',
'%s#egg=ScriptTest' %
local_checkout(
'hg+https://bitbucket.org/ianb/scripttest',
tmpdir.join("cache"),
),
expect_error=True,
)
build = script.venv_path / 'build'
src = script.venv_path / 'src'
assert not exists(build), "build/ dir still exists: %s" % build
assert exists(src), "expected src/ dir doesn't exist: %s" % src
script.assert_no_temp()
def test_cleanup_after_install_from_local_directory(script, data):
"""
Test clean up after installing from a local directory.
"""
to_install = data.packages.join("FSPkg")
script.pip('install', to_install, expect_error=False)
build = script.venv_path / 'build'
src = script.venv_path / 'src'
assert not exists(build), "unexpected build/ dir exists: %s" % build
assert not exists(src), "unexpected src/ dir exist: %s" % src
script.assert_no_temp()
def test_cleanup_req_satisifed_no_name(script, data):
"""
Test cleanup when req is already satisfied, and req has no 'name'
"""
# this test confirms Issue #420 is fixed
# reqs with no 'name' that were already satisfied were leaving behind tmp
# build dirs
# 2 examples of reqs that would do this
# 1) https://bitbucket.org/ianb/initools/get/tip.zip
# 2) parent-0.1.tar.gz
dist = data.packages.join("parent-0.1.tar.gz")
script.pip('install', dist)
script.pip('install', dist)
build = script.venv_path / 'build'
assert not exists(build), "unexpected build/ dir exists: %s" % build
script.assert_no_temp()
def test_cleanup_after_install_exception(script, data):
"""
Test clean up after a 'setup.py install' exception.
"""
# broken==0.2broken fails during install; see packages readme file
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken==0.2broken',
expect_error=True,
)
build = script.venv_path / 'build'
assert not exists(build), "build/ dir still exists: %s" % result.stdout
script.assert_no_temp()
def test_cleanup_after_egg_info_exception(script, data):
"""
Test clean up after a 'setup.py egg_info' exception.
"""
# brokenegginfo fails during egg_info; see packages readme file
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'brokenegginfo==0.1',
expect_error=True,
)
build = script.venv_path / 'build'
assert not exists(build), "build/ dir still exists: %s" % result.stdout
script.assert_no_temp()
@pytest.mark.network
def test_cleanup_prevented_upon_build_dir_exception(script, data):
"""
Test no cleanup occurs after a PreviousBuildDirError
"""
build = script.venv_path / 'build'
build_simple = build / 'simple'
os.makedirs(build_simple)
write_delete_marker_file(build)
build_simple.join("setup.py").write("#")
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'simple',
'--build', build,
expect_error=True, expect_temp=True,
)
assert result.returncode == PREVIOUS_BUILD_DIR_ERROR
assert "pip can't proceed" in result.stderr
assert exists(build_simple)
| zvezdan/pip | tests/functional/test_install_cleanup.py | Python | mit | 4,518 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright (C) 2013 Yaacov Zamir <[email protected]>
# Author: Yaacov Zamir (2013)
from Tkinter import *
import ttk
# try to load pyYaml
# if the module is missing, we can use a dict
# instead
try:
import yaml
except:
pass
class TtkYaml:
''' a class to build simple ttk frames with buttons
using yaml gui files
'''
def __init__(self):
self.root = Tk()
def load_gui(self, data):
''' load a yaml file as the gui
@param filename the file gui yaml name
or the data dict
'''
# load gui file
# if data is not a file name - use data as a gui dict
if isinstance(data, str):
f = open(data)
self.gui = yaml.safe_load(f)
f.close()
else:
self.gui = data
# alias the gui inputs as self.inputs
self.inputs = self.gui['inputs']
# set the main frame and the inputs
self.set_mainframe()
self.set_inputs()
# extra styling
self.set_style()
def run_gui(self):
''' start running the program
'''
self.root.mainloop()
def input_changed(self, *args):
''' called when an input is changed,
inherit and overload this function if you want to call
somthing when an input has changed
'''
# default is to do nothing
pass
def set_style(self):
''' do extra styling
'''
parent = self.mainframe
parent['padding'] = '5 5 5 5'
for child in parent.winfo_children():
child.grid_configure(padx=5, pady=5)
def set_mainframe(self):
''' set a ttk main frame
'''
# get locale parameters
root = self.root
frame = self.gui['frame']
# create main window
if 'title' in frame.keys():
root.title(frame['title'])
if 'ico' in frame.keys():
try:
root.iconbitmap(default=frame['ico'])
except:
pass
# set the main frame
self.mainframe = ttk.Frame(root)
self.mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
def set_inputs(self):
''' set the ttk inputs and buttons
'''
# get locale parameters
parent = self.mainframe
inputs = self.inputs
call_back = self.input_changed
# create all the inputs / buttons
# and position them in the frame
# TODO: create a separate function call for the creation
# of each widget
for reg, button in inputs.items():
# set a var for this input
var = StringVar()
button['var'] = var
# checkbox
# --------
if button['type'] == 'checkbox':
# create the widget
_check = ttk.Checkbutton(parent, text=button['text'],
variable=var,
command=call_back,
onvalue='1.0', offvalue='0.0')
button['checkbox'] = _check
_check.grid(column=button['c'], row=button['r'], sticky=(W, E))
# radiobox
# --------
elif button['type'] == 'radio':
# create a label
_label = ttk.Label(parent, text=button['text'])
_label.grid(column=button['c'], row=button['r'], sticky=W)
# FIXME: what to put in the on 'radio' value?
button['radio'] = _label
# create the widgets
i = 0
for key, text in button['options'].items():
_radio = ttk.Radiobutton(parent,
command=call_back, text=text,
variable=var, value=key)
_radio.grid(column=(button['c'] + 1), row=(button['r'] + i), sticky=(W, E))
i += 1
# entry
# -----
elif button['type'] == 'entry':
# create a lable
_label = ttk.Label(parent, text=button['text'])
_label.grid(column=button['c'], row=button['r'], sticky=W)
# create the widget
_entry = ttk.Entry(parent, width=18,
textvariable=var)
button['entry'] = _entry
# check state
if 'state' in button.keys():
if button['state'] == 'DISABLED':
_entry['state'] = DISABLED
else:
_entry['state'] = button['state']
# bind change to call back function
_entry.bind('<Return>', call_back)
_entry.grid(column=(button['c'] + 1), row=button['r'], sticky=(W, E))
# button
# -----
elif button['type'] == 'button':
# create the widget
_button = ttk.Button(parent, textvariable=var, command=call_back)
button['button'] = _button
# set text
var.set(button['text'])
# add to parent
_button.grid(column=button['c'], row=button['r'], sticky=W)
# label
# -----
elif button['type'] == 'label':
# create the widget
_label = ttk.Label(parent, textvariable=var)
button['label'] = _label
# set style
if 'foreground' in button.keys():
_style = ttk.Style()
_style.configure('%s.TLabel' % button['foreground'], foreground=button['foreground'])
_label.configure(style='%s.TLabel' % button['foreground'])
# set text
var.set(button['text'])
# add to parent
_label.grid(column=button['c'], row=button['r'], sticky=W)
else:
pass
| yaacov/ttkyaml | src/ttkyaml/ttkyaml.py | Python | gpl-2.0 | 7,272 |
from fontTools.misc.arrayTools import pairwise
from fontTools.pens.filterPen import ContourFilterPen
__all__ = ["reversedContour", "ReverseContourPen"]
class ReverseContourPen(ContourFilterPen):
"""Filter pen that passes outline data to another pen, but reversing
the winding direction of all contours. Components are simply passed
through unchanged.
Closed contours are reversed in such a way that the first point remains
the first point.
"""
def filterContour(self, contour):
return reversedContour(contour)
def reversedContour(contour):
""" Generator that takes a list of pen's (operator, operands) tuples,
and yields them with the winding direction reversed.
"""
if not contour:
return # nothing to do, stop iteration
# valid contours must have at least a starting and ending command,
# can't have one without the other
assert len(contour) > 1, "invalid contour"
# the type of the last command determines if the contour is closed
contourType = contour.pop()[0]
assert contourType in ("endPath", "closePath")
closed = contourType == "closePath"
firstType, firstPts = contour.pop(0)
assert firstType in ("moveTo", "qCurveTo"), (
"invalid initial segment type: %r" % firstType)
firstOnCurve = firstPts[-1]
if firstType == "qCurveTo":
# special case for TrueType paths contaning only off-curve points
assert firstOnCurve is None, (
"off-curve only paths must end with 'None'")
assert not contour, (
"only one qCurveTo allowed per off-curve path")
firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) +
(None,))
if not contour:
# contour contains only one segment, nothing to reverse
if firstType == "moveTo":
closed = False # single-point paths can't be closed
else:
closed = True # off-curve paths are closed by definition
yield firstType, firstPts
else:
lastType, lastPts = contour[-1]
lastOnCurve = lastPts[-1]
if closed:
# for closed paths, we keep the starting point
yield firstType, firstPts
if firstOnCurve != lastOnCurve:
# emit an implied line between the last and first points
yield "lineTo", (lastOnCurve,)
contour[-1] = (lastType,
tuple(lastPts[:-1]) + (firstOnCurve,))
if len(contour) > 1:
secondType, secondPts = contour[0]
else:
# contour has only two points, the second and last are the same
secondType, secondPts = lastType, lastPts
# if a lineTo follows the initial moveTo, after reversing it
# will be implied by the closePath, so we don't emit one;
# unless the lineTo and moveTo overlap, in which case we keep the
# duplicate points
if secondType == "lineTo" and firstPts != secondPts:
del contour[0]
if contour:
contour[-1] = (lastType,
tuple(lastPts[:-1]) + secondPts)
else:
# for open paths, the last point will become the first
yield firstType, (lastOnCurve,)
contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
# we iterate over all segment pairs in reverse order, and yield
# each one with the off-curve points reversed (if any), and
# with the on-curve point of the following segment
for (curType, curPts), (_, nextPts) in pairwise(
contour, reverse=True):
yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
yield "closePath" if closed else "endPath", ()
| google/material-design-icons | update/venv/lib/python3.9/site-packages/fontTools/pens/reverseContourPen.py | Python | apache-2.0 | 3,849 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0201
from recipe_engine import recipe_api
from . import builder_name_schema
class BuilderNameSchemaApi(recipe_api.RecipeApi):
def __init__(self, *args, **kwargs):
super(BuilderNameSchemaApi, self).__init__(*args, **kwargs)
# See builder_name_schema.py for documentation.
self.BUILDER_NAME_SCHEMA = builder_name_schema.BUILDER_NAME_SCHEMA
self.BUILDER_NAME_SEP = builder_name_schema.BUILDER_NAME_SEP
self.BUILDER_ROLE_BUILD = builder_name_schema.BUILDER_ROLE_BUILD
self.BUILDER_ROLE_HOUSEKEEPER = builder_name_schema.BUILDER_ROLE_HOUSEKEEPER
self.BUILDER_ROLE_INFRA = builder_name_schema.BUILDER_ROLE_INFRA
self.BUILDER_ROLE_PERF = builder_name_schema.BUILDER_ROLE_PERF
self.BUILDER_ROLE_TEST = builder_name_schema.BUILDER_ROLE_TEST
self.BUILDER_ROLES = builder_name_schema.BUILDER_ROLES
def MakeBuilderName(self, **kwargs):
return builder_name_schema.MakeBuilderName(**kwargs)
def DictForBuilderName(self, *args, **kwargs):
return builder_name_schema.DictForBuilderName(*args, **kwargs)
| youtube/cobalt | third_party/skia_next/third_party/skia/infra/bots/recipe_modules/builder_name_schema/api.py | Python | bsd-3-clause | 1,233 |
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016 Jonathan Labéjof <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Main package."""
from .version import __version__
from .lang import (
build, getresource, SchemaFactory, SchemaBuilder, getschemacls,
XSDSchemaBuilder, FunctionSchema, ParamSchema
)
from .base import Schema, DynamicValue
from .registry import register, registercls, getbyuuid, getbyname
from .utils import (
validate, dump, updatecontent, ThisSchema, data2schema, AnySchema,
data2schemacls, RegisteredSchema, RefSchema
)
from .elementary import (
StringSchema, DateTimeSchema, IntegerSchema, NumberSchema, LongSchema,
ComplexSchema, FloatSchema, ArraySchema, DictSchema, TypeSchema,
EnumSchema, BooleanSchema, ElementarySchema, OneOfSchema
)
__all__ = [
'__version__', 'Schema', 'DynamicValue',
'register', 'registercls', 'getbyuuid', 'getbyname',
'StringSchema', 'IntegerSchema',
'FloatSchema', 'ComplexSchema', 'EnumSchema', 'ArraySchema',
'DictSchema', 'DateTimeSchema', 'NumberSchema', 'BooleanSchema',
'TypeSchema', 'AnySchema', 'OneOfSchema',
'data2schemacls',
'data2schema', 'validate', 'dump', 'updatecontent', 'ThisSchema',
'RefSchema', 'AnySchema', 'RegisteredSchema',
'ElementarySchema', 'LongSchema',
'SchemaFactory', 'getschemacls', 'getresource', 'SchemaBuilder', 'build',
'XSDSchemaBuilder', 'FunctionSchema', 'ParamSchema'
]
| b3j0f/schema | b3j0f/schema/__init__.py | Python | mit | 2,642 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from selenium.common.exceptions import TimeoutException
from pages.firefox.whatsnew.whatsnew import FirefoxWhatsNewPage
@pytest.mark.skip_if_not_firefox(reason='Whatsnew pages are shown to Firefox only.')
@pytest.mark.nondestructive
def test_send_to_device_success(base_url, selenium):
page = FirefoxWhatsNewPage(selenium, base_url).open()
assert not page.is_qr_code_displayed
send_to_device = page.send_to_device
send_to_device.type_email('[email protected]')
send_to_device.click_send()
assert send_to_device.send_successful
@pytest.mark.skip_if_not_firefox(reason='Whatsnew pages are shown to Firefox only.')
@pytest.mark.nondestructive
def test_send_to_device_fails_when_missing_required_fields(base_url, selenium):
page = FirefoxWhatsNewPage(selenium, base_url).open()
with pytest.raises(TimeoutException):
page.send_to_device.click_send()
@pytest.mark.skip_if_not_firefox(reason='Whatsnew pages are shown to Firefox only.')
@pytest.mark.nondestructive
def test_firefox_rocket_send_yourself(base_url, selenium):
page = FirefoxWhatsNewPage(selenium, base_url, locale='id').open()
assert page.send_to_device.is_displayed
send_to_device = page.send_to_device
send_to_device.type_email('[email protected]')
send_to_device.click_send()
assert send_to_device.send_successful
| sgarrity/bedrock | tests/functional/firefox/whatsnew/test_whatsnew.py | Python | mpl-2.0 | 1,563 |
def install(job):
service = job.service
service.executeAction('printx', context=job.context, args={"tags":['a', 'C', 'b']})
def printx(job):
print("Executing printx in test1")
| Jumpscale/ays9 | tests/test_services/test_list_jobs/actorTemplates/test1/actions.py | Python | apache-2.0 | 189 |
from django.conf.urls import url, include
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls.static import static
from django.contrib import admin
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += i18n_patterns(
url(r'^diff/', include('cmsplugin_diff.urls', namespace='cmsplugin_diff')),
url(r'^', include('cms.urls')),
)
| doctormo/django-cmsplugin-diff | demo/urls.py | Python | agpl-3.0 | 539 |
# Copyright 2015, Cisco Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.drivers import base
from ironic.drivers.modules.cimc import common
imcsdk = importutils.try_import('ImcSdk')
LOG = logging.getLogger(__name__)
CIMC_TO_IRONIC_BOOT_DEVICE = {
'storage-read-write': boot_devices.DISK,
'lan-read-only': boot_devices.PXE,
'vm-read-only': boot_devices.CDROM
}
IRONIC_TO_CIMC_BOOT_DEVICE = {
boot_devices.DISK: ('lsbootStorage', 'storage-read-write',
'storage', 'read-write'),
boot_devices.PXE: ('lsbootLan', 'lan-read-only',
'lan', 'read-only'),
boot_devices.CDROM: ('lsbootVirtualMedia', 'vm-read-only',
'virtual-media', 'read-only')
}
class CIMCManagement(base.ManagementInterface):
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return common.COMMON_PROPERTIES
def validate(self, task):
"""Check if node.driver_info contains the required CIMC credentials.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if required CIMC credentials are
missing.
"""
common.parse_driver_info(task.node)
def get_supported_boot_devices(self, task):
"""Get a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(CIMC_TO_IRONIC_BOOT_DEVICE.values())
def get_boot_device(self, task):
"""Get the current boot device for a node.
Provides the current boot device of the node. Be aware that not
all drivers support this.
:param task: a task from TaskManager.
:raises: MissingParameterValue if a required parameter is missing
:raises: CIMCException if there is an error from CIMC
:returns: a dictionary containing:
:boot_device:
the boot device, one of :mod:`ironic.common.boot_devices` or
None if it is unknown.
:persistent:
Whether the boot device will persist to all future boots or
not, None if it is unknown.
"""
with common.cimc_handle(task) as handle:
method = imcsdk.ImcCore.ExternalMethod("ConfigResolveClass")
method.Cookie = handle.cookie
method.InDn = "sys/rack-unit-1"
method.InHierarchical = "true"
method.ClassId = "lsbootDef"
try:
resp = handle.xml_query(method, imcsdk.WriteXmlOption.DIRTY)
except imcsdk.ImcException as e:
raise exception.CIMCException(node=task.node.uuid, error=e)
error = getattr(resp, 'error_code', None)
if error:
raise exception.CIMCException(node=task.node.uuid, error=error)
bootDevs = resp.OutConfigs.child[0].child
first_device = None
for dev in bootDevs:
try:
if int(dev.Order) == 1:
first_device = dev
break
except (ValueError, AttributeError):
pass
boot_device = (CIMC_TO_IRONIC_BOOT_DEVICE.get(
first_device.Rn) if first_device else None)
# Every boot device in CIMC is persistent right now
persistent = True if boot_device else None
return {'boot_device': boot_device, 'persistent': persistent}
def set_boot_device(self, task, device, persistent=True):
"""Set the boot device for a node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Every boot device in CIMC is persistent right now,
so this value is ignored.
:raises: InvalidParameterValue if an invalid boot device is
specified.
:raises: MissingParameterValue if a required parameter is missing
:raises: CIMCException if there is an error from CIMC
"""
with common.cimc_handle(task) as handle:
dev = IRONIC_TO_CIMC_BOOT_DEVICE[device]
method = imcsdk.ImcCore.ExternalMethod("ConfigConfMo")
method.Cookie = handle.cookie
method.Dn = "sys/rack-unit-1/boot-policy"
method.InHierarchical = "true"
config = imcsdk.Imc.ConfigConfig()
bootMode = imcsdk.ImcCore.ManagedObject(dev[0])
bootMode.set_attr("access", dev[3])
bootMode.set_attr("type", dev[2])
bootMode.set_attr("Rn", dev[1])
bootMode.set_attr("order", "1")
config.add_child(bootMode)
method.InConfig = config
try:
resp = handle.xml_query(method, imcsdk.WriteXmlOption.DIRTY)
except imcsdk.ImcException as e:
raise exception.CIMCException(node=task.node.uuid, error=e)
error = getattr(resp, 'error_code')
if error:
raise exception.CIMCException(node=task.node.uuid, error=error)
def get_sensors_data(self, task):
raise NotImplementedError()
| redhat-openstack/ironic | ironic/drivers/modules/cimc/management.py | Python | apache-2.0 | 6,171 |
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Miscellaneous node types.
'''
import os.path
from grit.node import base
from grit.node import message
from grit import exception
from grit import constants
from grit import util
import grit.format.rc_header
class IfNode(base.Node):
'''A node for conditional inclusion of resources.
'''
def _IsValidChild(self, child):
from grit.node import empty
assert self.parent, '<if> node should never be root.'
if isinstance(self.parent, empty.IncludesNode):
from grit.node import include
return isinstance(child, include.IncludeNode)
elif isinstance(self.parent, empty.MessagesNode):
from grit.node import message
return isinstance(child, message.MessageNode)
elif isinstance(self.parent, empty.StructuresNode):
from grit.node import structure
return isinstance(child, structure.StructureNode)
else:
return False
def MandatoryAttributes(self):
return ['expr']
def IsConditionSatisfied(self):
'''Returns true if and only if the Python expression stored in attribute
'expr' evaluates to true.
'''
return self.EvaluateCondition(self.attrs['expr'])
class ReleaseNode(base.Node):
'''The <release> element.'''
def _IsValidChild(self, child):
from grit.node import empty
return isinstance(child, (empty.IncludesNode, empty.MessagesNode,
empty.StructuresNode, empty.IdentifiersNode))
def _IsValidAttribute(self, name, value):
return (
(name == 'seq' and int(value) <= self.GetRoot().GetCurrentRelease()) or
name == 'allow_pseudo'
)
def MandatoryAttributes(self):
return ['seq']
def DefaultAttributes(self):
return { 'allow_pseudo' : 'true' }
def GetReleaseNumber():
'''Returns the sequence number of this release.'''
return self.attribs['seq']
def ItemFormatter(self, t):
if t == 'data_package':
from grit.format import data_pack
return data_pack.DataPack()
else:
return super(type(self), self).ItemFormatter(t)
class GritNode(base.Node):
'''The <grit> root element.'''
def __init__(self):
base.Node.__init__(self)
self.output_language = ''
self.defines = {}
def _IsValidChild(self, child):
from grit.node import empty
return isinstance(child, (ReleaseNode, empty.TranslationsNode,
empty.OutputsNode))
def _IsValidAttribute(self, name, value):
if name not in ['base_dir', 'source_lang_id',
'latest_public_release', 'current_release',
'enc_check', 'tc_project']:
return False
if name in ['latest_public_release', 'current_release'] and value.strip(
'0123456789') != '':
return False
return True
def MandatoryAttributes(self):
return ['latest_public_release', 'current_release']
def DefaultAttributes(self):
return {
'base_dir' : '.',
'source_lang_id' : 'en',
'enc_check' : constants.ENCODING_CHECK,
'tc_project' : 'NEED_TO_SET_tc_project_ATTRIBUTE',
}
def EndParsing(self):
base.Node.EndParsing(self)
if (int(self.attrs['latest_public_release'])
> int(self.attrs['current_release'])):
raise exception.Parsing('latest_public_release cannot have a greater '
'value than current_release')
self.ValidateUniqueIds()
# Add the encoding check if it's not present (should ensure that it's always
# present in all .grd files generated by GRIT). If it's present, assert if
# it's not correct.
if 'enc_check' not in self.attrs or self.attrs['enc_check'] == '':
self.attrs['enc_check'] = constants.ENCODING_CHECK
else:
assert self.attrs['enc_check'] == constants.ENCODING_CHECK, (
'Are you sure your .grd file is in the correct encoding (UTF-8)?')
def ValidateUniqueIds(self):
'''Validate that 'name' attribute is unique in all nodes in this tree
except for nodes that are children of <if> nodes.
'''
unique_names = {}
duplicate_names = []
for node in self:
if isinstance(node, message.PhNode):
continue # PhNode objects have a 'name' attribute which is not an ID
node_ids = node.GetTextualIds()
if node_ids:
for node_id in node_ids:
if util.SYSTEM_IDENTIFIERS.match(node_id):
continue # predefined IDs are sometimes used more than once
# Don't complain about duplicate IDs if they occur in a node that is
# inside an <if> node.
if (node_id in unique_names and node_id not in duplicate_names and
(not node.parent or not isinstance(node.parent, IfNode))):
duplicate_names.append(node_id)
unique_names[node_id] = 1
if len(duplicate_names):
raise exception.DuplicateKey(', '.join(duplicate_names))
def GetCurrentRelease(self):
'''Returns the current release number.'''
return int(self.attrs['current_release'])
def GetLatestPublicRelease(self):
'''Returns the latest public release number.'''
return int(self.attrs['latest_public_release'])
def GetSourceLanguage(self):
'''Returns the language code of the source language.'''
return self.attrs['source_lang_id']
def GetTcProject(self):
'''Returns the name of this project in the TranslationConsole, or
'NEED_TO_SET_tc_project_ATTRIBUTE' if it is not defined.'''
return self.attrs['tc_project']
def SetOwnDir(self, dir):
'''Informs the 'grit' element of the directory the file it is in resides.
This allows it to calculate relative paths from the input file, which is
what we desire (rather than from the current path).
Args:
dir: r'c:\bla'
Return:
None
'''
assert dir
self.base_dir = os.path.normpath(os.path.join(dir, self.attrs['base_dir']))
def GetBaseDir(self):
'''Returns the base directory, relative to the working directory. To get
the base directory as set in the .grd file, use GetOriginalBaseDir()
'''
if hasattr(self, 'base_dir'):
return self.base_dir
else:
return self.GetOriginalBaseDir()
def GetOriginalBaseDir(self):
'''Returns the base directory, as set in the .grd file.
'''
return self.attrs['base_dir']
def GetOutputFiles(self):
'''Returns the list of <file> nodes that are children of this node's
<outputs> child.'''
for child in self.children:
if child.name == 'outputs':
return child.children
raise exception.MissingElement()
def ItemFormatter(self, t):
if t == 'rc_header':
from grit.format import rc_header # import here to avoid circular dep
return rc_header.TopLevel()
elif t in ['rc_all', 'rc_translateable', 'rc_nontranslateable']:
from grit.format import rc # avoid circular dep
return rc.TopLevel()
elif t == 'resource_map_header':
from grit.format import resource_map
return resource_map.HeaderTopLevel()
elif t == 'resource_map_source':
from grit.format import resource_map
return resource_map.SourceTopLevel()
elif t == 'js_map_format':
from grit.format import js_map_format
return js_map_format.TopLevel()
else:
return super(type(self), self).ItemFormatter(t)
def SetOutputContext(self, output_language, defines):
self.output_language = output_language
self.defines = defines
class IdentifierNode(base.Node):
'''A node for specifying identifiers that should appear in the resource
header file, and be unique amongst all other resource identifiers, but don't
have any other attributes or reference any resources.
'''
def MandatoryAttributes(self):
return ['name']
def DefaultAttributes(self):
return { 'comment' : '', 'id' : '' }
def ItemFormatter(self, t):
if t == 'rc_header':
return grit.format.rc_header.Item()
def GetId(self):
'''Returns the id of this identifier if it has one, None otherwise
'''
if 'id' in self.attrs:
return self.attrs['id']
return None
# static method
def Construct(parent, name, id, comment):
'''Creates a new node which is a child of 'parent', with attributes set
by parameters of the same name.
'''
node = IdentifierNode()
node.StartParsing('identifier', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('id', id)
node.HandleAttribute('comment', comment)
node.EndParsing()
return node
Construct = staticmethod(Construct)
| rwatson/chromium-capsicum | tools/grit/grit/node/misc.py | Python | bsd-3-clause | 8,674 |
# Droog
# Copyright (C) 2015 Adam Miezianko
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""The creature module defines the general Creature class as well as individual
creature types."""
import logging
import random
from . import actor
from . import attack
from . import the
from . import world
from . import combat
from . import engine
LOG = logging.getLogger(__name__)
class Creature(actor.Actor):
"""The Creature class manages a creature's statistics and actions."""
def __init__(self, glyph, name, initial_vowel=False):
"""Create a creature.
The creature's initial attributes will all be two.
glyph -- a single single character representation, for the map
name -- a string representation, for elsewhere
initial_vowel -- when true, use 'an', not 'a' in sentences
"""
assert len(glyph) == 1
self.glyph = glyph
self.name = name
self.initial_vowel = initial_vowel
self._strength = 2
self._dexterity = 2
self._constitution = 2
self.is_dead = False
self.is_hero = False
self.loc = (None, None)
self.is_stunned = False
self.is_diseased = False
self.is_wounded = False
self.inventory = []
self.score_value = 0
self.end_reason = "befallen by an unspecified disaster"
super(Creature, self).__init__()
def act(self):
"""Clear any conditions that clear at the start of the creature's turn.
Currently that is stunned."""
self.is_stunned = False
def __repr__(self):
"""A string representation of the creature."""
return self.name
@property
def strength(self):
"""The creature's strength."""
return self._strength
@strength.setter
def strength(self, value):
"""Sets the creature's strength. If the value is invalid this will
raise an AssertionError"""
assert engine.is_valid_attribute(value)
self._strength = value
@property
def dexterity(self):
"""The creature's dextierity."""
return self._dexterity
@dexterity.setter
def dexterity(self, value):
"""Sets the creature's dexterity. If the value is invalid this will
raise an AssertionError"""
assert engine.is_valid_attribute(value)
self._dexterity = value
@property
def constitution(self):
"""Creature's constitution."""
if self.is_diseased and self._constitution > 1:
return self._constitution - 1
return self._constitution
@constitution.setter
def constitution(self, value):
"""Sets the creature's dexterity. If the value is invalid this will
raise an AssertionError"""
assert engine.is_valid_attribute(value)
self._constitution = value
class Zombie(Creature):
"""Zombie creature."""
def __init__(self, improvement=None):
"""Create a zombie.
A zombie uses the average stat array and then raises one stat to three.
improvement -- one of 'strength', 'dex', or 'con' to improve
"""
strength = 2
dexterity = 2
constitution = 2
if not improvement:
improvement = random.choice(['strength', 'dexterity',
'constitution'])
name = "zombie"
if improvement == 'strength':
strength = 3
name = "strong zombie"
if improvement == 'dexterity':
dexterity = 3
name = "nimble zombie"
if improvement == 'constitution':
constitution = 3
name = "hale zombie"
self.attacks = [attack.make_bite(), attack.make_unarmed()]
self.sense_range = 15
self.score_value = 4000
super(Zombie, self).__init__('Z', name)
self.strength = strength
self.dexterity = dexterity
self.constitution = constitution
def act(self):
"""Zombies use the following decision tree:
1) If adjacent to the hero, bite her.
2) If within 15 steps of the hero, move towards her.
3) Otherwise, move randomly."""
super(Zombie, self).act()
return ai_act(self)
class ZombieDog(Creature):
"""Zombie dog."""
def __init__(self):
self.attacks = [attack.make_bite(effectiveness=70)]
self.sense_range = 30
super(ZombieDog, self).__init__('d', 'zombie dog')
self.strength = 2
self.dexterity = 3
self.constitution = 1
self.score_value = 2000
def act(self):
"""Zombie dogs use the following decision tree:
1) If adjacent to the hero, bite her.
2) If within 30 steps of the hero, move towards her.
3) Otherwise, move randomly."""
super(ZombieDog, self).act()
return ai_act(self)
class Cop(Creature):
"""Cop."""
def __init__(self):
super(Cop, self).__init__('C', 'cop')
self.attacks = []
self.sense_range = 1
self.strength = 3
self.dexterity = 1
self.constitution = 3
self.score_value = 8000
def act(self):
"""Cops use the following decision tree:
1) If adjacent to the hero, punch her.
2) If within 30 steps of the hero, move towards her.
3) Otherwise, move randomly."""
super(Cop, self).act()
return 6
def ai_act(creature):
"""Act as a creature.
Creatures use the following decision tree:
1) If adjacent to the hero, bite her.
2) If within 15 steps of the hero, move towards her.
3) Otherwise, move randomly.
"""
assert creature.loc
dist = creature.loc.distance_to(the.world.hero_location)
LOG.info("%r is %r from the hero.", creature.name, dist)
# 1) If adjacent to the hero, bite her.
if dist < 2:
return combat.attack(creature, the.hero,
random.choice(creature.attacks))
# 2) If within 15 steps of the hero, move towards her.
elif dist < creature.sense_range:
delta = creature.loc.delta_to(the.world.hero_location)
# 3) Otherwise, move randomly.
else:
delta = world.random_delta()
LOG.info("%r wants to move from %r by %r.", creature.name,
creature.loc, delta)
cost = the.world.move_creature(creature.loc, delta)
if not cost == 0:
return cost
return 6 # If the creature fails to move, it stands around a while
def create_from_glyph(glyph):
"""Create a creature based on a glyph."""
monster = None
if glyph == 'Z':
monster = Zombie()
if glyph == 'd':
monster = ZombieDog()
if glyph == 'C':
monster = Cop()
return monster
| abeing/droog | droog/creature.py | Python | gpl-2.0 | 7,412 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for tokenizing SQL."""
import sqlparse
def _is_whitespace(sqlparse_token):
return sqlparse_token.ttype == sqlparse.tokens.Whitespace
def tokenize_sql(sql_exp):
sql_exp = sql_exp.lower()
sql_exp = sql_exp.rstrip(";")
parse = sqlparse.parse(sql_exp)
sql = parse[0]
flat_tokens = sql.flatten()
sql_tokens = [
token.value for token in flat_tokens if not _is_whitespace(token)
]
return sql_tokens
| google-research/language | language/compgen/nqg/tasks/spider/sql_tokenizer.py | Python | apache-2.0 | 1,049 |
import numpy as np
import sys
from trw_utils import *
from heterogenous_crf import inference_gco
from pyqpbo import binary_general_graph
from scipy.optimize import fmin_l_bfgs_b
def trw(node_weights, edges, edge_weights, y,
max_iter=100, verbose=0, tol=1e-3,
get_energy=None):
n_nodes, n_states = node_weights.shape
n_edges = edges.shape[0]
y_hat = []
lambdas = np.zeros(n_nodes)
mu = np.zeros((n_nodes, n_states))
learning_rate = 0.1
energy_history = []
primal_history = []
pairwise = []
for k in xrange(n_states):
y_hat.append(np.zeros(n_states))
_pairwise = np.zeros((n_edges, 2, 2))
for i in xrange(n_edges):
_pairwise[i,1,0] = _pairwise[i,0,1] = -0.5 * edge_weights[i,k,k]
pairwise.append(_pairwise)
for i in xrange(n_edges):
e1, e2 = edges[i]
node_weights[e1,:] += 0.5 * np.diag(edge_weights[i,:,:])
node_weights[e2,:] += 0.5 * np.diag(edge_weights[i,:,:])
for iteration in xrange(max_iter):
dmu = np.zeros((n_nodes, n_states))
unaries = node_weights + mu
x, f_val, d = fmin_l_bfgs_b(f, np.zeros(n_nodes),
args=(unaries, pairwise, edges),
maxiter=50,
pgtol=1e-5)
E = np.sum(x)
for k in xrange(n_states):
new_unaries = np.zeros((n_nodes, 2))
new_unaries[:,1] = unaries[:,k] + x
y_hat[k], energy = binary_general_graph(edges, new_unaries, pairwise[k])
E -= 0.5*energy
dmu[:,k] -= y_hat[k]
y_hat_kappa, energy = optimize_kappa(y, mu, 1, n_nodes, n_states)
E += energy
dmu[np.ogrid[:dmu.shape[0]], y_hat_kappa] += 1
mu -= learning_rate * dmu
energy_history.append(E)
lambda_sum = np.zeros((n_nodes, n_states))
for k in xrange(n_states):
lambda_sum[:,k] = y_hat[k]
lambda_sum = lambda_sum / np.sum(lambda_sum, axis=1, keepdims=True)
if get_energy is not None:
primal = get_energy(get_labelling(lambda_sum))
primal_history.append(primal)
else:
primal = 0
if iteration:
learning_rate = 1. / np.sqrt(iteration)
if verbose:
print 'Iteration {}: energy={}, primal={}'.format(iteration, E, primal)
if iteration > 0 and np.abs(E - energy_history[-2]) < tol:
if verbose:
print 'Converged'
break
info = {'primal': primal_history,
'dual': energy_history,
'iteration': iteration}
return lambda_sum, y_hat_kappa, info
def f(x, node_weights, pairwise, edges):
n_nodes, n_states = node_weights.shape
dual = 0
dlambda = np.zeros(n_nodes)
for k in xrange(n_states):
new_unaries = np.zeros((n_nodes, 2))
new_unaries[:,1] = node_weights[:,k] + x
y_hat, energy = binary_general_graph(edges, new_unaries, pairwise[k])
dual += 0.5 * energy
dlambda += y_hat
dlambda -= 1
dual -= np.sum(x)
#print dual
return -dual, -dlambda
| kondra/latent_ssvm | smd.py | Python | bsd-2-clause | 3,199 |
#/usr/bin/env python
# -#- coding: utf-8 -#-
#
#
#
# standard copy right text
#
# Initial version: 2012-04-02
# Author: Amnon Janiv
"""
.. module:: samples
:synopsis: Explore equity master package
Demonstrates how file related processes are pipelined between
cooperating process using input and output queues.
.. moduleauthor:: Amnon Janiv
"""
import os
import sys
from optparse import OptionParser
root_dir = '../demo/'
log_dir = 'logs/'
wf_dir = 'wf/'
data_dir = '../data/'
input_dir = data_dir + 'input/'
data_files = (
'vendor_a_eq.txt',
'vendor_b_eq.txt'
)
def fix_path():
"""
Configure python sys.path for standalone execution
"""
_cwd = os.path.dirname(__file__)
_app_root = os.path.abspath(os.path.join(_cwd, '..'))
if not _app_root in sys.path:
sys.path.append(_app_root)
try:
import equity_master as eqm
except ImportError:
fix_path()
import equity_master as eqm
#
# Leverage the data configuration environment
# One can envision a process where these
# configuration files are detected dynamically.
#
from data_config import vendor_a
from data_config import vendor_b
from data_config import my_firm
def detection_req(wfdir):
"""Helper function for creating file detection requests
"""
fn = eqm.create_file_detection_request
file_descs = (vendor_a.in_eq_def_file_desc,
vendor_b.in_eq_def_file_desc)
reqs = [fn(eqm.WAIT,
file_desc,
file_desc,
wfdir)
for file_desc in file_descs]
return reqs
def validation_req(wfdir):
"""
Create a validation request
"""
fn = eqm.create_file_validation_request
in_desc = (vendor_a.in_eq_def_file_desc,
vendor_b.in_eq_def_file_desc)
out_desc = (vendor_a.out_eq_def_file_desc,
vendor_b.out_eq_def_file_desc)
reqs = [fn(eqm.ARRIVED,
in_d,
out_d,
wfdir)
for in_d, out_d in zip(in_desc, out_desc)]
return reqs
def sort_req(wfdir):
"""
Create a file sort request
"""
fn = eqm.create_file_sort_request
in_desc = (vendor_a.out_eq_def_file_desc,
vendor_b.out_eq_def_file_desc)
out_desc = (vendor_a.out_eq_def_file_desc,
vendor_b.out_eq_def_file_desc)
reqs = [fn(eqm.VALIDATED,
in_d,
out_d,
wfdir)
for in_d, out_d in zip(in_desc, out_desc)]
return reqs
def merge_req(wfdir):
"""
Create a file merge request
"""
fn = eqm.create_file_merge_request
input_file_descs = (vendor_a.out_eq_def_file_desc,
vendor_b.out_eq_def_file_desc)
request = fn(eqm.SORTED,
input_file_descs,
my_firm.eq_def_file_desc,
wfdir)
return (request,)
def dispatch(proc, reqs):
"""
Send/receive request/response
"""
resp = []
for index in range(len(reqs)):
proc.send(reqs[index])
resp.append(proc.receive())
return resp
def exec_workflow(
parser,
options,
args,
):
"""
Execute command line options related to file feed proces
"""
print ('workflow ->', ' '.join(sys.argv))
wf = eqm.create_workflow(options.root_dir,
options.wf_dir,
options.log_dir,
options.data_dir,
data_files)
print ('workflow is using direcotry (%s)' % wf.dir)
def detect_st():
proc = eqm.create_file_detection_process()
reqs = detection_req(wf.dir)
return proc, reqs
def validate_st():
proc = eqm.create_file_validation_process()
reqs = validation_req(wf.dir)
return proc, reqs
def sort_st():
proc = eqm.create_file_sort_process()
reqs = sort_req(wf.dir)
return proc, reqs
def merge_st():
proc = eqm.create_file_merge_process()
reqs = merge_req(wf.dir)
return proc, reqs
tmpl = 'process is now in state (%s) for file (%s)'
for state in (detect_st,validate_st, sort_st, merge_st):
proc, reqs = state()
proc.start()
responses = dispatch (proc, reqs)
assert (len(responses) == len(reqs))
for result in responses:
assert(result.error is None)
proc_st = eqm.process_state(result.response.file_state)
print (tmpl % (proc_st.name,
result.response.file_name))
proc.shutdown()
proc.join()
def setup():
"""
Setup workflow execution environment
"""
parser = OptionParser(usage='%prog -q[query] -j[journey] ',
version='%prog 1.0')
parser.add_option('-d', '--data_dir', action='store', dest='data_dir',
default=input_dir, help="data files directory")
parser.add_option('-r', '--root_dir', action='store', dest='root_dir',
default=root_dir, help="demo root directory")
parser.add_option('-w', '--workflow_dir', action='store', dest='wf_dir',
default=wf_dir, help="workflow root directory")
parser.add_option('-l', '--log_dir', action='store', dest='log_dir',
default=log_dir, help="log directory")
return parser
def main():
parser= setup()
(options, args) = parser.parse_args()
exec_workflow(parser, options, args)
if __name__ == '__main__':
main()
| ajaniv/equitymaster | scripts/samples.py | Python | gpl-2.0 | 5,763 |
# -*- coding: utf-8 -*-
import threading
import re
import sys
import six
from six import string_types
# Python3 queue support.
try:
import Queue
except ImportError:
import queue as Queue
from telebot import logger
class WorkerThread(threading.Thread):
count = 0
def __init__(self, exception_callback=None, queue=None, name=None):
if not name:
name = "WorkerThread{0}".format(self.__class__.count + 1)
self.__class__.count += 1
if not queue:
queue = Queue.Queue()
threading.Thread.__init__(self, name=name)
self.queue = queue
self.daemon = True
self.received_task_event = threading.Event()
self.done_event = threading.Event()
self.exception_event = threading.Event()
self.continue_event = threading.Event()
self.exception_callback = exception_callback
self.exc_info = None
self._running = True
self.start()
def run(self):
while self._running:
try:
task, args, kwargs = self.queue.get(block=True, timeout=.5)
self.continue_event.clear()
self.received_task_event.clear()
self.done_event.clear()
self.exception_event.clear()
logger.debug("Received task")
self.received_task_event.set()
task(*args, **kwargs)
logger.debug("Task complete")
self.done_event.set()
except Queue.Empty:
pass
except:
logger.debug("Exception occurred")
self.exc_info = sys.exc_info()
self.exception_event.set()
if self.exception_callback:
self.exception_callback(self, self.exc_info)
self.continue_event.wait()
def put(self, task, *args, **kwargs):
self.queue.put((task, args, kwargs))
def raise_exceptions(self):
if self.exception_event.is_set():
six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
def clear_exceptions(self):
self.exception_event.clear()
self.continue_event.set()
def stop(self):
self._running = False
class ThreadPool:
def __init__(self, num_threads=2):
self.tasks = Queue.Queue()
self.workers = [WorkerThread(self.on_exception, self.tasks) for _ in range(num_threads)]
self.num_threads = num_threads
self.exception_event = threading.Event()
self.exc_info = None
def put(self, func, *args, **kwargs):
self.tasks.put((func, args, kwargs))
def on_exception(self, worker_thread, exc_info):
self.exc_info = exc_info
self.exception_event.set()
worker_thread.continue_event.set()
def raise_exceptions(self):
if self.exception_event.is_set():
six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
def clear_exceptions(self):
self.exception_event.clear()
def close(self):
for worker in self.workers:
worker.stop()
for worker in self.workers:
worker.join()
class AsyncTask:
def __init__(self, target, *args, **kwargs):
self.target = target
self.args = args
self.kwargs = kwargs
self.done = False
self.thread = threading.Thread(target=self._run)
self.thread.start()
def _run(self):
try:
self.result = self.target(*self.args, **self.kwargs)
except:
self.result = sys.exc_info()
self.done = True
def wait(self):
if not self.done:
self.thread.join()
if isinstance(self.result, BaseException):
six.reraise(self.result[0], self.result[1], self.result[2])
else:
return self.result
def async():
def decorator(fn):
def wrapper(*args, **kwargs):
return AsyncTask(fn, *args, **kwargs)
return wrapper
return decorator
def is_string(var):
return isinstance(var, string_types)
def is_command(text):
"""
Checks if `text` is a command. Telegram chat commands start with the '/' character.
:param text: Text to check.
:return: True if `text` is a command, else False.
"""
return text.startswith('/')
def extract_command(text):
"""
Extracts the command from `text` (minus the '/') if `text` is a command (see is_command).
If `text` is not a command, this function returns None.
Examples:
extract_command('/help'): 'help'
extract_command('/help@BotName'): 'help'
extract_command('/search black eyed peas'): 'search'
extract_command('Good day to you'): None
:param text: String to extract the command from
:return: the command if `text` is a command (according to is_command), else None.
"""
return text.split()[0].split('@')[0][1:] if is_command(text) else None
def split_string(text, chars_per_string):
"""
Splits one string into multiple strings, with a maximum amount of `chars_per_string` characters per string.
This is very useful for splitting one giant message into multiples.
:param text: The text to split
:param chars_per_string: The number of characters per line the text is split into.
:return: The splitted text as a list of strings.
"""
return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]
# CREDITS TO http://stackoverflow.com/questions/12317940#answer-12320352
def or_set(self):
self._set()
self.changed()
def or_clear(self):
self._clear()
self.changed()
def orify(e, changed_callback):
e._set = e.set
e._clear = e.clear
e.changed = changed_callback
e.set = lambda: or_set(e)
e.clear = lambda: or_clear(e)
def OrEvent(*events):
or_event = threading.Event()
def changed():
bools = [e.is_set() for e in events]
if any(bools):
or_event.set()
else:
or_event.clear()
def busy_wait():
while not or_event.is_set():
or_event._wait(3)
for e in events:
orify(e, changed)
or_event._wait = or_event.wait
or_event.wait = busy_wait
changed()
return or_event
def extract_arguments(text):
"""
Returns the argument after the command.
Examples:
extract_arguments("/get name"): 'name'
extract_arguments("/get"): ''
extract_arguments("/get@botName name"): 'name'
:param text: String to extract the arguments from a command
:return: the arguments if `text` is a command (according to is_command), else None.
"""
regexp = re.compile("\/\w*(@\w*)*\s*([\s\S]*)",re.IGNORECASE)
result = regexp.match(text)
return result.group(2) if is_command(text) else None
| dzmuh97/OpenOrioks | telebot/util.py | Python | gpl-3.0 | 7,051 |
../../../../../share/pyshared/keyring/tests/test_cli.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/keyring/tests/test_cli.py | Python | gpl-3.0 | 55 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.