code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import smtplib
from email.mime.text import MIMEText
def send(config, message):
sender = config.email
password = config.password
recipients = config.recipients
session = build_session(sender, password)
message = build_message(sender, recipients, message)
session.sendmail(sender, recipients, message.as_string())
session.quit()
def build_session(user, password):
session = smtplib.SMTP('smtp.gmail.com', 587)
session.ehlo()
session.starttls()
session.login(user, password)
return session
def build_message(sender, recipients, message):
msg = MIMEText(message, 'html')
msg['Subject'] = 'Toppit Update'
msg['From'] = sender
msg['To'] = ", ".join(recipients)
return msg
| twbarber/toppit | toppit/mailer.py | Python | mit | 741 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import re
import unicodedata
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
from odoo.tools import ustr
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
# Inspired by http://stackoverflow.com/questions/517923
def remove_accents(input_str):
"""Suboptimal-but-better-than-nothing way to replace accented
latin letters by an ASCII equivalent. Will obviously change the
meaning of input_str and work only for some cases"""
input_str = ustr(input_str)
nkfd_form = unicodedata.normalize('NFKD', input_str)
return u''.join([c for c in nkfd_form if not unicodedata.combining(c)])
class Alias(models.Model):
"""A Mail Alias is a mapping of an email address with a given OpenERP Document
model. It is used by OpenERP's mail gateway when processing incoming emails
sent to the system. If the recipient address (To) of the message matches
a Mail Alias, the message will be either processed following the rules
of that alias. If the message is a reply it will be attached to the
existing discussion on the corresponding record, otherwise a new
record of the corresponding model will be created.
This is meant to be used in combination with a catch-all email configuration
on the company's mail server, so that as soon as a new mail.alias is
created, it becomes immediately usable and OpenERP will accept email for it.
"""
_name = 'mail.alias'
_description = "Email Aliases"
_rec_name = 'alias_name'
_order = 'alias_model_id, alias_name'
alias_name = fields.Char('Alias Name', help="The name of the email alias, e.g. 'jobs' if you want to catch emails for <[email protected]>")
alias_model_id = fields.Many2one('ir.model', 'Aliased Model', required=True, ondelete="cascade",
help="The model (Odoo Document Kind) to which this alias "
"corresponds. Any incoming email that does not reply to an "
"existing record will cause the creation of a new record "
"of this model (e.g. a Project Task)",
# hack to only allow selecting mail_thread models (we might
# (have a few false positives, though)
domain="[('field_id.name', '=', 'message_ids')]")
alias_user_id = fields.Many2one('res.users', 'Owner', defaults=lambda self: self.env.user,
help="The owner of records created upon receiving emails on this alias. "
"If this field is not set the system will attempt to find the right owner "
"based on the sender (From) address, or will use the Administrator account "
"if no system user is found for that address.")
alias_defaults = fields.Text('Default Values', required=True, default='{}',
help="A Python dictionary that will be evaluated to provide "
"default values when creating new records for this alias.")
alias_force_thread_id = fields.Integer(
'Record Thread ID',
help="Optional ID of a thread (record) to which all incoming messages will be attached, even "
"if they did not reply to it. If set, this will disable the creation of new records completely.")
alias_domain = fields.Char('Alias domain', compute='_get_alias_domain',
default=lambda self: self.env["ir.config_parameter"].get_param("mail.catchall.domain"))
alias_parent_model_id = fields.Many2one(
'ir.model', 'Parent Model',
help="Parent model holding the alias. The model holding the alias reference "
"is not necessarily the model given by alias_model_id "
"(example: project (parent_model) and task (model))")
alias_parent_thread_id = fields.Integer('Parent Record Thread ID', help="ID of the parent record holding the alias (example: project holding the task creation alias)")
alias_contact = fields.Selection([
('everyone', 'Everyone'),
('partners', 'Authenticated Partners'),
('followers', 'Followers only')], default='everyone',
string='Alias Contact Security', required=True,
help="Policy to post a message on the document using the mailgateway.\n"
"- everyone: everyone can post\n"
"- partners: only authenticated partners\n"
"- followers: only followers of the related document or members of following channels\n")
_sql_constraints = [
('alias_unique', 'UNIQUE(alias_name)', 'Unfortunately this email alias is already used, please choose a unique one')
]
@api.multi
def _get_alias_domain(self):
alias_domain = self.env["ir.config_parameter"].get_param("mail.catchall.domain")
for record in self:
record.alias_domain = alias_domain
@api.one
@api.constrains('alias_defaults')
def _check_alias_defaults(self):
try:
dict(safe_eval(self.alias_defaults))
except Exception:
raise ValidationError(_('Invalid expression, it must be a literal python dictionary definition e.g. "{\'field\': \'value\'}"'))
@api.model
def create(self, vals):
""" Creates an email.alias record according to the values provided in ``vals``,
with 2 alterations: the ``alias_name`` value may be suffixed in order to
make it unique (and certain unsafe characters replaced), and
he ``alias_model_id`` value will set to the model ID of the ``model_name``
context value, if provided.
"""
model_name = self._context.get('alias_model_name')
parent_model_name = self._context.get('alias_parent_model_name')
if vals.get('alias_name'):
vals['alias_name'] = self._clean_and_make_unique(vals.get('alias_name'))
if model_name:
model = self.env['ir.model'].search([('model', '=', model_name)])
vals['alias_model_id'] = model.id
if parent_model_name:
model = self.env['ir.model'].search([('model', '=', parent_model_name)])
vals['alias_parent_model_id'] = model.id
return super(Alias, self).create(vals)
@api.multi
def write(self, vals):
""""give a unique alias name if given alias name is already assigned"""
if vals.get('alias_name') and self.ids:
vals['alias_name'] = self._clean_and_make_unique(vals.get('alias_name'), alias_ids=self.ids)
return super(Alias, self).write(vals)
@api.multi
def name_get(self):
"""Return the mail alias display alias_name, including the implicit
mail catchall domain if exists from config otherwise "New Alias".
e.g. `[email protected]` or `jobs` or 'New Alias'
"""
res = []
for record in self:
if record.alias_name and record.alias_domain:
res.append((record['id'], "%s@%s" % (record.alias_name, record.alias_domain)))
elif record.alias_name:
res.append((record['id'], "%s" % (record.alias_name)))
else:
res.append((record['id'], _("Inactive Alias")))
return res
@api.model
def _find_unique(self, name, alias_ids=False):
"""Find a unique alias name similar to ``name``. If ``name`` is
already taken, make a variant by adding an integer suffix until
an unused alias is found.
"""
sequence = None
while True:
new_name = "%s%s" % (name, sequence) if sequence is not None else name
domain = [('alias_name', '=', new_name)]
if alias_ids:
domain += [('id', 'not in', alias_ids)]
if not self.search(domain):
break
sequence = (sequence + 1) if sequence else 2
return new_name
@api.model
def _clean_and_make_unique(self, name, alias_ids=False):
# when an alias name appears to already be an email, we keep the local part only
name = remove_accents(name).lower().split('@')[0]
name = re.sub(r'[^\w+.]+', '-', name)
return self._find_unique(name, alias_ids=alias_ids)
@api.multi
def open_document(self):
if not self.alias_model_id or not self.alias_force_thread_id:
return False
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': self.alias_model_id.model,
'res_id': self.alias_force_thread_id,
'type': 'ir.actions.act_window',
}
@api.multi
def open_parent_document(self):
if not self.alias_parent_model_id or not self.alias_parent_thread_id:
return False
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': self.alias_parent_model_id.model,
'res_id': self.alias_parent_thread_id,
'type': 'ir.actions.act_window',
}
class AliasMixin(models.AbstractModel):
""" A mixin for models that inherits mail.alias. This mixin initializes the
alias_id column in database, and manages the expected one-to-one
relation between your model and mail aliases.
"""
_name = 'mail.alias.mixin'
_inherits = {'mail.alias': 'alias_id'}
alias_id = fields.Many2one('mail.alias', string='Alias', ondelete="restrict", required=True)
def get_alias_model_name(self, vals):
""" Return the model name for the alias. Incoming emails that are not
replies to existing records will cause the creation of a new record
of this alias model. The value may depend on ``vals``, the dict of
values passed to ``create`` when a record of this model is created.
"""
return None
def get_alias_values(self):
""" Return values to create an alias, or to write on the alias after its
creation.
"""
return {'alias_parent_thread_id': self.id}
@api.model
def create(self, vals):
""" Create a record with ``vals``, and create a corresponding alias. """
record = super(AliasMixin, self.with_context(
alias_model_name=self.get_alias_model_name(vals),
alias_parent_model_name=self._name,
)).create(vals)
record.alias_id.sudo().write(record.get_alias_values())
return record
@api.multi
def unlink(self):
""" Delete the given records, and cascade-delete their corresponding alias. """
aliases = self.mapped('alias_id')
res = super(AliasMixin, self).unlink()
aliases.unlink()
return res
@api.model_cr_context
def _init_column(self, name):
""" Create aliases for existing rows. """
super(AliasMixin, self)._init_column(name)
if name != 'alias_id':
return
alias_ctx = {
'alias_model_name': self.get_alias_model_name({}),
'alias_parent_model_name': self._name,
}
alias_model = self.env['mail.alias'].sudo().with_context(alias_ctx).browse([])
child_ctx = {
'active_test': False, # retrieve all records
'prefetch_fields': False, # do not prefetch fields on records
}
child_model = self.sudo().with_context(child_ctx).browse([])
for record in child_model.search([('alias_id', '=', False)]):
# create the alias, and link it to the current record
alias = alias_model.create(record.get_alias_values())
record.with_context({'mail_notrack': True}).alias_id = alias
_logger.info('Mail alias created for %s %s (id %s)',
record._name, record.display_name, record.id)
| chienlieu2017/it_management | odoo/addons/mail/models/mail_alias.py | Python | gpl-3.0 | 12,189 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ConfigureContactSettings
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-domains
# [START domains_v1_generated_Domains_ConfigureContactSettings_sync]
from google.cloud import domains_v1
def sample_configure_contact_settings():
# Create a client
client = domains_v1.DomainsClient()
# Initialize request argument(s)
request = domains_v1.ConfigureContactSettingsRequest(
registration="registration_value",
)
# Make the request
operation = client.configure_contact_settings(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END domains_v1_generated_Domains_ConfigureContactSettings_sync]
| googleapis/python-domains | samples/generated_samples/domains_v1_generated_domains_configure_contact_settings_sync.py | Python | apache-2.0 | 1,593 |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import numpy as np
from sympy.physics.paulialgebra import delta
def bbox_transform(ex_rois, gt_rois):
"""
computes the distance from ground-truth boxes to the given boxes, normed by their size
:param ex_rois: n * 4 numpy array, given boxes
:param gt_rois: n * 4 numpy array, ground-truth boxes
:return: deltas: n * 4 numpy array, ground-truth boxes
"""
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
# assert np.min(ex_widths) > 0.1 and np.min(ex_heights) > 0.1, \
# 'Invalid boxes found: {} {}'. \
# format(ex_rois[np.argmin(ex_widths), :], ex_rois[np.argmin(ex_heights), :])
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def bbox_transform_inv(boxes, deltas):
if boxes.shape[0] == 0:
return np.zeros((0,), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
"""
if boxes.shape[0] == 0:
return boxes
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
| adityaarun1/faster_rcnn_pytorch | faster_rcnn/fast_rcnn/bbox_transform.py | Python | mit | 3,098 |
from __future__ import unicode_literals
from django.core.urlresolvers import resolve
from django.contrib.auth.middleware import AuthenticationMiddleware as DjangoAuthenticationMiddleware
from django.contrib.auth import get_user
from django.http.response import HttpResponseRedirect
from is_core.exceptions import HttpRedirectException
class RequestKwargsMiddleware(object):
def process_request(self, request):
request.kwargs = resolve(request.path).kwargs
class HttpExceptionsMiddleware(object):
def process_exception(self, request, exception):
if isinstance(exception, HttpRedirectException):
return HttpResponseRedirect(exception.url)
| vojtatranta/django-is-core | is_core/middleware/__init__.py | Python | lgpl-3.0 | 681 |
import os,sys,urllib2
import xbmcplugin,xbmcgui
import xml.etree.ElementTree as ET
__addon__ = "SomaFM"
__addonid__ = "plugin.audio.somafm"
__version__ = "0.0.2"
def log(msg):
print "[PLUGIN] '%s (%s)' " % (__addon__, __version__) + str(msg)
log("Initialized!")
log(sys.argv)
rootURL = "http://somafm.com/"
#pluginPath = sys.argv[0]
handle = int(sys.argv[1])
query = sys.argv[2]
def getHeaders(withReferrer=None):
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3'
if withReferrer:
headers['Referrer'] = withReferrer
return headers
def getHTMLFor(url, withData=None, withReferrer=None):
url = rootURL + url
log("Get HTML for URL: " + url)
req = urllib2.Request(url, withData, getHeaders(withReferrer))
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
def addEntries():
somaXML = getHTMLFor(url="channels.xml")
channelsContainer = ET.fromstring(somaXML)
for stations in channelsContainer.findall(".//channel"):
title = stations.find('title').text
description = stations.find('description').text
if stations.find('largeimage') is not None:
img = rootURL + stations.find('largeimage').text.replace(rootURL,"")
else:
img = rootURL + stations.find('image').text.replace(rootURL,"")
url = rootURL + stations.find('fastpls').text.replace(rootURL,"")
log(title)
log(description)
log(img)
log(url)
li = xbmcgui.ListItem(title, description, thumbnailImage=img)
li.setProperty("IsPlayable","true")
xbmcplugin.addDirectoryItem(
handle=handle,
url=url,
listitem=li)
addEntries()
xbmcplugin.endOfDirectory(handle)
| nils-werner/xbmc-somafm | default.py | Python | gpl-2.0 | 1,907 |
# -*- coding: utf-8 -*-
"""Tests for tmuxp.
tmuxp.tests
~~~~~~~~~~~
"""
from __future__ import absolute_import, division, print_function, \
with_statement, unicode_literals
import logging
import sys
import pkgutil
try:
import unittest2 as unittest
except ImportError: # Python 2.7
import unittest
from .. import log
from .._compat import string_types, PY2, reraise
from ..server import Server
t = Server()
t.socket_name = 'tmuxp_test'
from . import helpers
# Logger functionality
logger = logging.getLogger()
if not logger.handlers:
channel = logging.StreamHandler()
channel.setFormatter(log.DebugLogFormatter())
logger.addHandler(channel)
logger.setLevel('INFO')
# enable DEBUG message if channel is at testsuite + testsuite.* packages.
testsuite_logger = logging.getLogger(__name__)
testsuite_logger.setLevel('INFO')
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s')
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, '\n'.join(track),
exception.__class__.__name__, str(exception))
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
self.exception)
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# XXX: py3 review needed
assert isinstance(import_name, string_types)
# force the import name to automatically convert to strings
import_name = str(import_name)
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
return __import__(import_name)
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
if PY2 and isinstance(obj, unicode):
obj = obj.encode('utf-8')
try:
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
# support importing modules not yet set up by the parent module
# (or package for that matter)
modname = module + '.' + obj
__import__(modname)
return sys.modules[modname]
except ImportError as e:
if not silent:
reraise(
ImportStringError,
ImportStringError(import_name, e),
sys.exc_info()[2])
def find_modules(import_path, include_packages=False, recursive=False):
"""Find all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_name: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, '__path__', None)
if path is None:
raise ValueError('%r is not a package' % import_path)
basename = module.__name__ + '.'
for importer, modname, ispkg in pkgutil.iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
for item in find_modules(modname, include_packages, True):
yield item
else:
yield modname
def iter_suites(package):
"""Yields all testsuites."""
for module in find_modules(package, include_packages=True):
mod = __import__(module, fromlist=['*'])
if hasattr(mod, 'suite'):
yield mod.suite()
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def suite():
"""A testsuite that has all the Flask tests. You can use this
function to integrate the Flask tests into your own testsuite
in case you want to test that monkeypatches to Flask do not
break it.
"""
suite = unittest.TestSuite()
for other_suite in iter_suites(__name__):
suite.addTest(other_suite)
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception:
import sys
import traceback
traceback.print_exc()
sys.exit(1)
| madelynfreed/rlundo | venv/lib/python2.7/site-packages/tmuxp/testsuite/__init__.py | Python | gpl-3.0 | 8,102 |
from __future__ import print_function, division
from sympy.core.containers import Tuple
from sympy.core.compatibility import range
from types import FunctionType
class TableForm(object):
r"""
Create a nice table representation of data.
Examples
========
>>> from sympy import TableForm
>>> t = TableForm([[5, 7], [4, 2], [10, 3]])
>>> print(t)
5 7
4 2
10 3
You can use the SymPy's printing system to produce tables in any
format (ascii, latex, html, ...).
>>> print(t.as_latex())
\begin{tabular}{l l}
$5$ & $7$ \\
$4$ & $2$ \\
$10$ & $3$ \\
\end{tabular}
"""
def __init__(self, data, **kwarg):
"""
Creates a TableForm.
Parameters:
data ...
2D data to be put into the table; data can be
given as a Matrix
headings ...
gives the labels for rows and columns:
Can be a single argument that applies to both
dimensions:
- None ... no labels
- "automatic" ... labels are 1, 2, 3, ...
Can be a list of labels for rows and columns:
The labels for each dimension can be given
as None, "automatic", or [l1, l2, ...] e.g.
["automatic", None] will number the rows
[default: None]
alignments ...
alignment of the columns with:
- "left" or "<"
- "center" or "^"
- "right" or ">"
When given as a single value, the value is used for
all columns. The row headings (if given) will be
right justified unless an explicit alignment is
given for it and all other columns.
[default: "left"]
formats ...
a list of format strings or functions that accept
3 arguments (entry, row number, col number) and
return a string for the table entry. (If a function
returns None then the _print method will be used.)
wipe_zeros ...
Don't show zeros in the table.
[default: True]
pad ...
the string to use to indicate a missing value (e.g.
elements that are None or those that are missing
from the end of a row (i.e. any row that is shorter
than the rest is assumed to have missing values).
When None, nothing will be shown for values that
are missing from the end of a row; values that are
None, however, will be shown.
[default: None]
Examples
========
>>> from sympy import TableForm, Matrix
>>> TableForm([[5, 7], [4, 2], [10, 3]])
5 7
4 2
10 3
>>> TableForm([list('.'*i) for i in range(1, 4)], headings='automatic')
| 1 2 3
---------
1 | .
2 | . .
3 | . . .
>>> TableForm([['.'*(j if not i%2 else 1) for i in range(3)]
... for j in range(4)], alignments='rcl')
.
. . .
.. . ..
... . ...
"""
from sympy import Symbol, S, Matrix
from sympy.core.sympify import SympifyError
# We only support 2D data. Check the consistency:
if isinstance(data, Matrix):
data = data.tolist()
_w = len(data[0])
_h = len(data)
# fill out any short lines
pad = kwarg.get('pad', None)
ok_None = False
if pad is None:
pad = " "
ok_None = True
pad = Symbol(pad)
_w = max(len(line) for line in data)
for i, line in enumerate(data):
if len(line) != _w:
line.extend([pad]*(_w - len(line)))
for j, lj in enumerate(line):
if lj is None:
if not ok_None:
lj = pad
else:
try:
lj = S(lj)
except SympifyError:
lj = Symbol(str(lj))
line[j] = lj
data[i] = line
_lines = Tuple(*data)
headings = kwarg.get("headings", [None, None])
if headings == "automatic":
_headings = [range(1, _h + 1), range(1, _w + 1)]
else:
h1, h2 = headings
if h1 == "automatic":
h1 = range(1, _h + 1)
if h2 == "automatic":
h2 = range(1, _w + 1)
_headings = [h1, h2]
allow = ('l', 'r', 'c')
alignments = kwarg.get("alignments", "l")
def _std_align(a):
a = a.strip().lower()
if len(a) > 1:
return {'left': 'l', 'right': 'r', 'center': 'c'}.get(a, a)
else:
return {'<': 'l', '>': 'r', '^': 'c'}.get(a, a)
std_align = _std_align(alignments)
if std_align in allow:
_alignments = [std_align]*_w
else:
_alignments = []
for a in alignments:
std_align = _std_align(a)
_alignments.append(std_align)
if std_align not in ('l', 'r', 'c'):
raise ValueError('alignment "%s" unrecognized' %
alignments)
if _headings[0] and len(_alignments) == _w + 1:
_head_align = _alignments[0]
_alignments = _alignments[1:]
else:
_head_align = 'r'
if len(_alignments) != _w:
raise ValueError(
'wrong number of alignments: expected %s but got %s' %
(_w, len(_alignments)))
_column_formats = kwarg.get("formats", [None]*_w)
_wipe_zeros = kwarg.get("wipe_zeros", True)
self._w = _w
self._h = _h
self._lines = _lines
self._headings = _headings
self._head_align = _head_align
self._alignments = _alignments
self._column_formats = _column_formats
self._wipe_zeros = _wipe_zeros
def __repr__(self):
from .str import sstr
return sstr(self, order=None)
def __str__(self):
from .str import sstr
return sstr(self, order=None)
def as_matrix(self):
"""Returns the data of the table in Matrix form.
Examples
========
>>> from sympy import TableForm
>>> t = TableForm([[5, 7], [4, 2], [10, 3]], headings='automatic')
>>> t
| 1 2
--------
1 | 5 7
2 | 4 2
3 | 10 3
>>> t.as_matrix()
Matrix([
[ 5, 7],
[ 4, 2],
[10, 3]])
"""
from sympy import Matrix
return Matrix(self._lines)
def as_str(self):
# XXX obsolete ?
return str(self)
def as_latex(self):
from .latex import latex
return latex(self)
def _sympystr(self, p):
"""
Returns the string representation of 'self'.
Examples
========
>>> from sympy import TableForm
>>> t = TableForm([[5, 7], [4, 2], [10, 3]])
>>> s = t.as_str()
"""
column_widths = [0] * self._w
lines = []
for line in self._lines:
new_line = []
for i in range(self._w):
# Format the item somehow if needed:
s = str(line[i])
if self._wipe_zeros and (s == "0"):
s = " "
w = len(s)
if w > column_widths[i]:
column_widths[i] = w
new_line.append(s)
lines.append(new_line)
# Check heading:
if self._headings[0]:
self._headings[0] = [str(x) for x in self._headings[0]]
_head_width = max([len(x) for x in self._headings[0]])
if self._headings[1]:
new_line = []
for i in range(self._w):
# Format the item somehow if needed:
s = str(self._headings[1][i])
w = len(s)
if w > column_widths[i]:
column_widths[i] = w
new_line.append(s)
self._headings[1] = new_line
format_str = []
def _align(align, w):
return '%%%s%ss' % (
("-" if align == "l" else ""),
str(w))
format_str = [_align(align, w) for align, w in
zip(self._alignments, column_widths)]
if self._headings[0]:
format_str.insert(0, _align(self._head_align, _head_width))
format_str.insert(1, '|')
format_str = ' '.join(format_str) + '\n'
s = []
if self._headings[1]:
d = self._headings[1]
if self._headings[0]:
d = [""] + d
first_line = format_str % tuple(d)
s.append(first_line)
s.append("-" * (len(first_line) - 1) + "\n")
for i, line in enumerate(lines):
d = [l if self._alignments[j] != 'c' else
l.center(column_widths[j]) for j, l in enumerate(line)]
if self._headings[0]:
l = self._headings[0][i]
l = (l if self._head_align != 'c' else
l.center(_head_width))
d = [l] + d
s.append(format_str % tuple(d))
return ''.join(s)[:-1] # don't include trailing newline
def _latex(self, printer):
"""
Returns the string representation of 'self'.
"""
# Check heading:
if self._headings[1]:
new_line = []
for i in range(self._w):
# Format the item somehow if needed:
new_line.append(str(self._headings[1][i]))
self._headings[1] = new_line
alignments = []
if self._headings[0]:
self._headings[0] = [str(x) for x in self._headings[0]]
alignments = [self._head_align]
alignments.extend(self._alignments)
s = r"\begin{tabular}{" + " ".join(alignments) + "}\n"
if self._headings[1]:
d = self._headings[1]
if self._headings[0]:
d = [""] + d
first_line = " & ".join(d) + r" \\" + "\n"
s += first_line
s += r"\hline" + "\n"
for i, line in enumerate(self._lines):
d = []
for j, x in enumerate(line):
if self._wipe_zeros and (x in (0, "0")):
d.append(" ")
continue
f = self._column_formats[j]
if f:
if isinstance(f, FunctionType):
v = f(x, i, j)
if v is None:
v = printer._print(x)
else:
v = f % x
d.append(v)
else:
v = printer._print(x)
d.append("$%s$" % v)
if self._headings[0]:
d = [self._headings[0][i]] + d
s += " & ".join(d) + r" \\" + "\n"
s += r"\end{tabular}"
return s
| wxgeo/geophar | wxgeometrie/sympy/printing/tableform.py | Python | gpl-2.0 | 11,812 |
#!/usr/bin/python3
import argparse as ap
import shared
ACTIONS = dict()
def action(key):
def wrapper(function):
ACTIONS[key] = function
return function
return wrapper
def get_closed_issues(repo, milestone):
issues_and_prs = repo.get_issues(milestone=milestone, state="closed")
issues_only = [i for i in issues_and_prs if i.pull_request is None]
return issues_only
def get_closed_prs(repo, milestone):
issues_and_prs = repo.get_issues(milestone=milestone, state="closed")
prs_only = [i for i in issues_and_prs if i.pull_request is not None]
return prs_only
@action("issues-closed")
def print_closed_issues(repo, milestone):
for issue in get_closed_issues(repo, milestone):
print(issue.title)
@action("prs-merged")
def print_closed_prs(repo, milestone):
for pr in get_closed_prs(repo, milestone):
print(pr.title)
def create_parser():
parser = ap.ArgumentParser()
parser.add_argument("version", type=shared.version_type)
parser.add_argument("what", choices=(ACTIONS.keys()))
shared.update_parser_with_common_stuff(parser)
return parser
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
gh = shared.get_github(args)
repo = shared.get_repo(gh, "OpenSCAP")
milestone = shared.get_milestone(repo, args.version)
ACTIONS[args.what](repo, milestone)
| mpreisler/openscap | release_tools/query-milestones.py | Python | lgpl-2.1 | 1,404 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras category crossing preprocessing layers."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.experimental.preprocessing.CategoryCrossing')
class CategoryCrossing(base_preprocessing_layer.PreprocessingLayer):
"""Category crossing layer.
This layer concatenates multiple categorical inputs into a single categorical
output (similar to Cartesian product). The output dtype is string.
Usage:
>>> inp_1 = ['a', 'b', 'c']
>>> inp_2 = ['d', 'e', 'f']
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryCrossing()
>>> layer([inp_1, inp_2])
<tf.Tensor: shape=(3, 1), dtype=string, numpy=
array([[b'a_X_d'],
[b'b_X_e'],
[b'c_X_f']], dtype=object)>
>>> inp_1 = ['a', 'b', 'c']
>>> inp_2 = ['d', 'e', 'f']
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryCrossing(
... separator='-')
>>> layer([inp_1, inp_2])
<tf.Tensor: shape=(3, 1), dtype=string, numpy=
array([[b'a-d'],
[b'b-e'],
[b'c-f']], dtype=object)>
Args:
depth: depth of input crossing. By default None, all inputs are crossed into
one output. It can also be an int or tuple/list of ints. Passing an
integer will create combinations of crossed outputs with depth up to that
integer, i.e., [1, 2, ..., `depth`), and passing a tuple of integers will
create crossed outputs with depth for the specified values in the tuple,
i.e., `depth`=(N1, N2) will create all possible crossed outputs with depth
equal to N1 or N2. Passing `None` means a single crossed output with all
inputs. For example, with inputs `a`, `b` and `c`, `depth=2` means the
output will be [a;b;c;cross(a, b);cross(bc);cross(ca)].
separator: A string added between each input being joined. Defaults to
'_X_'.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: a list of string or int tensors or sparse tensors of shape
`[batch_size, d1, ..., dm]`
Output shape: a single string or int tensor or sparse tensor of shape
`[batch_size, d1, ..., dm]`
Returns:
If any input is `RaggedTensor`, the output is `RaggedTensor`.
Else, if any input is `SparseTensor`, the output is `SparseTensor`.
Otherwise, the output is `Tensor`.
Example: (`depth`=None)
If the layer receives three inputs:
`a=[[1], [4]]`, `b=[[2], [5]]`, `c=[[3], [6]]`
the output will be a string tensor:
`[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`
Example: (`depth` is an integer)
With the same input above, and if `depth`=2,
the output will be a list of 6 string tensors:
`[[b'1'], [b'4']]`
`[[b'2'], [b'5']]`
`[[b'3'], [b'6']]`
`[[b'1_X_2'], [b'4_X_5']]`,
`[[b'2_X_3'], [b'5_X_6']]`,
`[[b'3_X_1'], [b'6_X_4']]`
Example: (`depth` is a tuple/list of integers)
With the same input above, and if `depth`=(2, 3)
the output will be a list of 4 string tensors:
`[[b'1_X_2'], [b'4_X_5']]`,
`[[b'2_X_3'], [b'5_X_6']]`,
`[[b'3_X_1'], [b'6_X_4']]`,
`[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`
"""
def __init__(self, depth=None, name=None, separator='_X_', **kwargs):
super(CategoryCrossing, self).__init__(name=name, **kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell(
'CategoryCrossing').set(True)
self.depth = depth
self.separator = separator
if isinstance(depth, (tuple, list)):
self._depth_tuple = depth
elif depth is not None:
self._depth_tuple = tuple([i for i in range(1, depth + 1)])
def partial_crossing(self, partial_inputs, ragged_out, sparse_out):
"""Gets the crossed output from a partial list/tuple of inputs."""
# If ragged_out=True, convert output from sparse to ragged.
if ragged_out:
# TODO(momernick): Support separator with ragged_cross.
if self.separator != '_X_':
raise ValueError('Non-default separator with ragged input is not '
'supported yet, given {}'.format(self.separator))
return ragged_array_ops.cross(partial_inputs)
elif sparse_out:
return sparse_ops.sparse_cross(partial_inputs, separator=self.separator)
else:
return sparse_ops.sparse_tensor_to_dense(
sparse_ops.sparse_cross(partial_inputs, separator=self.separator))
def _preprocess_input(self, inp):
if isinstance(inp, (list, tuple, np.ndarray)):
inp = ops.convert_to_tensor_v2_with_dispatch(inp)
if inp.shape.rank == 1:
inp = array_ops.expand_dims(inp, axis=-1)
return inp
def call(self, inputs):
inputs = [self._preprocess_input(inp) for inp in inputs]
depth_tuple = self._depth_tuple if self.depth else (len(inputs),)
ragged_out = sparse_out = False
if any(tf_utils.is_ragged(inp) for inp in inputs):
ragged_out = True
elif any(isinstance(inp, sparse_tensor.SparseTensor) for inp in inputs):
sparse_out = True
outputs = []
for depth in depth_tuple:
if len(inputs) < depth:
raise ValueError(
'Number of inputs cannot be less than depth, got {} input tensors, '
'and depth {}'.format(len(inputs), depth))
for partial_inps in itertools.combinations(inputs, depth):
partial_out = self.partial_crossing(
partial_inps, ragged_out, sparse_out)
outputs.append(partial_out)
if sparse_out:
return sparse_ops.sparse_concat_v2(axis=1, sp_inputs=outputs)
return array_ops.concat(outputs, axis=1)
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)):
raise ValueError('A `CategoryCrossing` layer should be called '
'on a list of inputs.')
input_shapes = input_shape
batch_size = None
for inp_shape in input_shapes:
inp_tensor_shape = tensor_shape.TensorShape(inp_shape).as_list()
if len(inp_tensor_shape) != 2:
raise ValueError('Inputs must be rank 2, get {}'.format(input_shapes))
if batch_size is None:
batch_size = inp_tensor_shape[0]
# The second dimension is dynamic based on inputs.
output_shape = [batch_size, None]
return tensor_shape.TensorShape(output_shape)
def compute_output_signature(self, input_spec):
input_shapes = [x.shape for x in input_spec]
output_shape = self.compute_output_shape(input_shapes)
if any(
isinstance(inp_spec, ragged_tensor.RaggedTensorSpec)
for inp_spec in input_spec):
return tensor_spec.TensorSpec(shape=output_shape, dtype=dtypes.string)
elif any(
isinstance(inp_spec, sparse_tensor.SparseTensorSpec)
for inp_spec in input_spec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=dtypes.string)
return tensor_spec.TensorSpec(shape=output_shape, dtype=dtypes.string)
def get_config(self):
config = {
'depth': self.depth,
'separator': self.separator,
}
base_config = super(CategoryCrossing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| annarev/tensorflow | tensorflow/python/keras/layers/preprocessing/category_crossing.py | Python | apache-2.0 | 8,543 |
"""
Classes and functions used to visualize data for thermo scientific analyzers
"""
from pandas import Series, DataFrame
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import dates as d
import os
import math
import glob
import matplotlib
import warnings
import sys
__all__ = ['diurnal_plot','diurnal_plot_single', 'ThermoPlot']
def diurnal_plot(data, dates=[], shaded=False, title="Diurnal Profile of Trace Gases", xlabel="Local Time: East St. Louis, MO"):
'''
If plotting the entire DataFrame (data), choose all_data=True, else choose all_data=False
and declare the date or dates to plot as a list. `data` should be a pandas core DataFrame
with time index and each trace gas concentration as a column
returns a single plot for NOx, SO2, and O3
>>>
'''
# Check to make sure the data is a valid dataframe
if not isinstance(data, pd.DataFrame):
print ("data is not a pandas DataFrame, thus this will not end well for you.")
exit
# If length of dates is zero, plot everything
if len(dates) == 0:
# Plot everything, yo!
pass
elif len(dates) == 1:
# Plot just this date
data = data[dates[0]]
elif len(dates) == 2:
# Plot between these dates
data = data[dates[0]:dates[1]]
else:
sys.exit("Dates are not properly configured.")
# Add columns for time to enable simple diurnal trends to be found
data['Time'] = data.index.map(lambda x: x.strftime("%H:%M"))
# Group the data by time and grab the statistics
grouped = data.groupby('Time').describe().unstack()
# set the index to be a str
grouped.index = pd.to_datetime(grouped.index.astype(str))
# Plot
fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(10,9), sharex=True)
# Set plot titles and labels
ax1.set_title(title, fontsize=14)
ax1.set_ylabel(r'$\ [NO_x] (ppb)$', fontsize=14, weight='bold')
ax2.set_ylabel(r'$\ [SO_2] (ppb)$', fontsize=14)
ax3.set_ylabel(r'$\ [O_3] (ppb)$', fontsize=14)
ax3.set_xlabel(xlabel, fontsize=14)
# Make the ticks invisible on the first and second plots
plt.setp( ax1.get_xticklabels(), visible=False)
plt.setp( ax2.get_xticklabels(), visible=False)
# Set y min to zero just in case:
ax1.set_ylim(0,grouped['nox']['mean'].max()*1.05)
ax2.set_ylim(0,grouped['so2']['mean'].max()*1.05)
ax3.set_ylim(0,grouped['o3']['mean'].max()*1.05)
# Plot means
ax1.plot(grouped.index, grouped['nox']['mean'],'g', linewidth=2.0)
ax2.plot(grouped.index, grouped['so2']['mean'], 'r', linewidth=2.0)
ax3.plot(grouped.index, grouped['o3']['mean'], 'b', linewidth=2.0)
# If shaded=true, plot trends
if shaded == True:
ax1.plot(grouped.index, grouped['nox']['75%'],'g')
ax1.plot(grouped.index, grouped['nox']['25%'],'g')
ax1.set_ylim(0,grouped['nox']['75%'].max()*1.05)
ax1.fill_between(grouped.index, grouped['nox']['mean'], grouped['nox']['75%'], alpha=.5, facecolor='green')
ax1.fill_between(grouped.index, grouped['nox']['mean'], grouped['nox']['25%'], alpha=.5, facecolor='green')
ax2.plot(grouped.index, grouped['so2']['75%'],'r')
ax2.plot(grouped.index, grouped['so2']['25%'],'r')
ax2.set_ylim(0,grouped['so2']['75%'].max()*1.05)
ax2.fill_between(grouped.index, grouped['so2']['mean'], grouped['so2']['75%'], alpha=.5, facecolor='red')
ax2.fill_between(grouped.index, grouped['so2']['mean'], grouped['so2']['25%'], alpha=.5, facecolor='red')
ax3.plot(grouped.index, grouped['o3']['75%'],'b')
ax3.plot(grouped.index, grouped['o3']['25%'],'b')
ax3.set_ylim(0,grouped['o3']['75%'].max()*1.05)
ax3.fill_between(grouped.index, grouped['o3']['mean'], grouped['o3']['75%'], alpha=.5, facecolor='blue')
ax3.fill_between(grouped.index, grouped['o3']['mean'], grouped['o3']['25%'], alpha=.5, facecolor='blue')
# Get/Set xticks
ticks = ax1.get_xticks()
ax3.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
ax3.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
ax3.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
# Make the layout tight to get rid of some whitespace
plt.tight_layout()
plt.show()
return (fig, (ax1, ax2, ax3))
def diurnal_plot_single(data, model='', dates=[], shaded=False, color1 = 'blue',
title="Diurnal Profile of Trace Gases", xlabel="Local Time: East St. Louis, MO",
ylabel=r'$\ [NO_x] (ppb)$'):
'''
`data` should be a pandas core DataFrame with time index and each trace gas concentration as a column
returns a single plot for one of the three analyzers.
>>>diurnal_plot_single(data,model='o3', ylabel='O3', shaded=True, color1='green')
'''
# Check to make sure the data is a valid dataframe
if not isinstance(data, pd.DataFrame):
sys.exit("data is not a pandas DataFrame, thus this will not end well for you.")
# Check to make sure the model is valid
if model.lower() not in ['nox','so2','o3','sox']:
sys.exit("Model is not defined correctly: options are ['nox','so2','sox','o3']")
# Set model to predefined variable
if model.lower() == 'nox':
instr = 'nox'
elif model.lower() == 'so2' or model.lower() == 'sox':
instr = 'sox'
else:
instr = 'o3'
# If not plotting all the data, truncate the dataframe to include only the needed data
if len(dates) == 0:
# plot everything
pass
elif len(dates) == 1:
# plot just this date
data = data[dates[0]]
elif len(dates) == 2:
# plot between these dates
data = data[dates[0]:dates[1]]
else:
sys.exit("You have an error with how you defined your dates")
# Add columns for time to enable simple diurnal trends to be found
data['Time'] = data.index.map(lambda x: x.strftime("%H:%M"))
# Group the data by time and grab the statistics
grouped = data.groupby('Time').describe().unstack()
# set the index to be a str
grouped.index = pd.to_datetime(grouped.index.astype(str))
# Plot
fig, ax = plt.subplots(1, figsize=(8,4))
# Set plot titles and labels
ax.set_title(title, fontsize=14)
ax.set_ylabel(ylabel, fontsize=14, weight='bold')
ax.set_xlabel(xlabel, fontsize=14)
# Set y min to zero just in case:
ax.set_ylim(0,grouped[instr]['mean'].max()*1.05)
# Plot means
ax.plot(grouped.index, grouped[instr]['mean'], color1,linewidth=2.0)
# If shaded=true, plot trends
if shaded == True:
ax.plot(grouped.index, grouped[instr]['75%'],color1)
ax.plot(grouped.index, grouped[instr]['25%'],color1)
ax.set_ylim(0,grouped[instr]['75%'].max()*1.05)
ax.fill_between(grouped.index, grouped[instr]['mean'], grouped[instr]['75%'], alpha=.5, facecolor=color1)
ax.fill_between(grouped.index, grouped[instr]['mean'], grouped[instr]['25%'], alpha=.5, facecolor=color1)
# Get/Set xticks
ticks = ax.get_xticks()
ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
# Make the layout tight to get rid of some whitespace
plt.tight_layout()
plt.show()
return (fig, ax)
class ThermoPlot():
'''
Allows for easy plotting of internal instrument data. Currently supports the
following models:
- NO, NO2, NOx (42I)
- O3 (49I)
- SO2 (43I)
'''
def __init__(self, data):
self.data = data
def debug_plot(self, args={}):
'''
Plots thermo scientific instrument data for debugging purposes. The top plot contains internal
instrument data such as flow rates and temperatures. The bottom plot contains trace gas data for the
instrument.
instrument must be set to either nox, so2, sox, or o3
>>> nox = ThermoPlot(data)
>>> f, (a1, a2, a3) = nox.debug_plot()
'''
default_args = {
'xlabel':'Local Time, East St Louis, MO',
'ylabpressure':'Flow (LPM)',
'ylabgas':'Gas Conc. (ppb)',
'ylabtemp':'Temperature (C)',
'title_fontsize':'18',
'labels_fontsize':'14',
'grid':False
}
# Figure out what model we are trying to plot and set instrument specific default args
cols = [i.lower() for i in self.data.columns.values.tolist()]
if 'o3' in cols:
default_args['instrument'] = 'o3'
default_args['title'] = "Debug Plot for " + r'$\ O_{3} $' + ": Model 49I"
default_args['color_o3'] = 'blue'
elif 'sox' in cols or 'so2' in cols:
default_args['instrument'] = 'so2'
default_args['title'] = "Debug Plot for " + r'$\ SO_{2} $' + ": Model 43I"
default_args['color_so2'] = 'green'
elif 'nox' in cols:
default_args['instrument'] = 'nox'
default_args['title'] = "Debug Plot for " + r'$\ NO_{x} $' + ": Model 42I"
default_args['color_no'] = '#FAB923'
default_args['color_nox'] = '#FC5603'
default_args['color_no2'] = '#FAE823'
else:
sys.exit("Could not figure out what isntrument this is for")
# If kwargs are set, replace the default values
for key, val in default_args.iteritems():
if args.has_key(key):
default_args[key] = args[key]
# Set up Plot and all three axes
fig, (ax1, ax3) = plt.subplots(2, figsize=(10,6), sharex=True)
ax2 = ax1.twinx()
# set up axes labels and titles
ax1.set_title(default_args['title'], fontsize=default_args['title_fontsize'])
ax1.set_ylabel(default_args['ylabpressure'], fontsize=default_args['labels_fontsize'])
ax2.set_ylabel(default_args['ylabtemp'], fontsize=default_args['labels_fontsize'])
ax3.set_ylabel(default_args['ylabgas'], fontsize=default_args['labels_fontsize'])
ax3.set_xlabel(default_args['xlabel'], fontsize=default_args['labels_fontsize'])
# Make the ticks invisible on the first and second plots
plt.setp( ax1.get_xticklabels(), visible=False )
# Plot the debug data on the top graph
if default_args['instrument'] == 'o3':
self.data['bncht'].plot(ax=ax2, label=r'$\ T_{bench}$')
self.data['lmpt'].plot(ax=ax2, label=r'$\ T_{lamp}$')
self.data['flowa'].plot(ax=ax1, label=r'$\ Q_{A}$', style='--')
self.data['flowb'].plot(ax=ax1, label=r'$\ Q_{B}$', style='--')
self.data['o3'].plot(ax=ax3, color=default_args['color_o3'], label=r'$\ O_{3}$')
elif default_args['instrument'] == 'so2':
self.data['intt'].plot(ax=ax2, label=r'$\ T_{internal}$')
self.data['rctt'].plot(ax=ax2, label=r'$\ T_{reactor}$')
self.data['smplfl'].plot(ax=ax1, label=r'$\ Q_{sample}$', style='--')
self.data['so2'].plot(ax=ax3, label=r'$\ SO_2 $', color=default_args['color_so2'], ylim=[0,self.data['so2'].max()*1.05])
else:
m = max(self.data['convt'].max(),self.data['intt'].max(),self.data['pmtt'].max())
self.data['convt'].plot(ax=ax2, label=r'$\ T_{converter}$')
self.data['intt'].plot(ax=ax2, label=r'$\ T_{internal}$')
self.data['rctt'].plot(ax=ax2, label=r'$\ T_{reactor}$')
self.data['pmtt'].plot(ax=ax2, label=r'$\ T_{PMT}$')
self.data['smplf'].plot(ax=ax1, label=r'$\ Q_{sample}$', style='--')
self.data['ozonf'].plot(ax=ax1, label=r'$\ Q_{ozone}$', style='--')
self.data['no'].plot(ax=ax3, label=r'$\ NO $', color=default_args['color_no'])
self.data['no2'].plot(ax=ax3, label=r'$\ NO_{2}$', color=default_args['color_no2'])
self.data['nox'].plot(ax=ax3, label=r'$\ NO_{x}$', color=default_args['color_nox'], ylim=(0,math.ceil(self.data.nox.max()*1.05)))
# Legends
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
plt.legend(lines+lines2, labels+labels2, bbox_to_anchor=(1.10, 1), loc=2, borderaxespad=0.)
ax3.legend(bbox_to_anchor=(1.10, 1.), loc=2, borderaxespad=0.)
# Hide grids?
ax1.grid(default_args['grid'])
ax2.grid(default_args['grid'])
ax3.grid(default_args['grid'])
# More of the things..
plt.tight_layout()
plt.show()
return fig, (ax1, ax2, ax3) | dhhagan/ACT | ACT/thermo/visualize.py | Python | mit | 13,306 |
import random
from common import generalUtils
from common.log import logUtils as log
from constants import clientPackets
from constants import matchModModes
from constants import matchTeamTypes
from constants import matchTeams
from constants import slotStatuses
from objects import glob
def handle(userToken, packetData):
# Read new settings
packetData = clientPackets.changeMatchSettings(packetData)
# Get match ID
matchID = userToken.matchID
# Make sure the match exists
if matchID not in glob.matches.matches:
return
# Host check
with glob.matches.matches[matchID] as match:
if userToken.userID != match.hostUserID:
return
# Some dank memes easter egg
memeTitles = [
"RWC 2020",
"Fokabot is a duck",
"Dank memes",
"1337ms Ping",
"Iscriviti a Xenotoze",
"...e i marò?",
"Superman dies",
"The brace is on fire",
"print_foot()",
"#FREEZEBARKEZ",
"Ripple devs are actually cats",
"Thank Mr Shaural",
"NEVER GIVE UP",
"T I E D W I T H U N I T E D",
"HIGHEST HDHR LOBBY OF ALL TIME",
"This is gasoline and I set myself on fire",
"Everyone is cheating apparently",
"Kurwa mac",
"TATOE",
"This is not your drama landfill.",
"I like cheese",
"NYO IS NOT A CAT HE IS A DO(N)G",
"Datingu startuato"
]
# Set match name
match.matchName = packetData["matchName"] if packetData["matchName"] != "meme" else random.choice(memeTitles)
# Update match settings
match.inProgress = packetData["inProgress"]
if packetData["matchPassword"] != "":
match.matchPassword = generalUtils.stringMd5(packetData["matchPassword"])
else:
match.matchPassword = ""
match.beatmapName = packetData["beatmapName"]
match.beatmapID = packetData["beatmapID"]
match.hostUserID = packetData["hostUserID"]
match.gameMode = packetData["gameMode"]
oldBeatmapMD5 = match.beatmapMD5
oldMods = match.mods
oldMatchTeamType = match.matchTeamType
match.mods = packetData["mods"]
match.beatmapMD5 = packetData["beatmapMD5"]
match.matchScoringType = packetData["scoringType"]
match.matchTeamType = packetData["teamType"]
match.matchModMode = packetData["freeMods"]
# Reset ready if needed
if oldMods != match.mods or oldBeatmapMD5 != match.beatmapMD5:
match.resetReady()
# Reset mods if needed
if match.matchModMode == matchModModes.NORMAL:
# Reset slot mods if not freeMods
match.resetMods()
else:
# Reset match mods if freemod
match.mods = 0
# Initialize teams if team type changed
if match.matchTeamType != oldMatchTeamType:
match.initializeTeams()
# Force no freemods if tag coop
if match.matchTeamType == matchTeamTypes.TAG_COOP or match.matchTeamType == matchTeamTypes.TAG_TEAM_VS:
match.matchModMode = matchModModes.NORMAL
# Send updated settings
match.sendUpdates()
# Console output
log.info("MPROOM{}: Updated room settings".format(match.matchID))
| osuripple/pep.py | events/changeMatchSettingsEvent.py | Python | agpl-3.0 | 2,905 |
"""
Easily manage dotfiles through the command line.
"""
from setuptools import find_packages, setup
dependencies = ['click']
setup(
name='dot-cli',
version='0.0.5',
url='https://github.com/kylefrost/dot',
download_url='https://github.com/kylefrost/dot-cli/tarball/0.0.5',
author='Kyle Frost',
author_email='[email protected]',
description='Lightweight tool for managing dotfiles with git and the command line.',
license='GPLv3',
long_description=__doc__,
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=dependencies,
entry_points={
'console_scripts': [
'dot = dot.cli:main',
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| kylefrost/dot-cli | setup.py | Python | gpl-3.0 | 1,706 |
import unittest
from conans.test.utils.tools import TestClient
from conans.paths import CONANFILE, CONANINFO, BUILD_INFO_CMAKE
import os
from conans.model.info import ConanInfo
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.util.files import mkdir, load
class InstallSubfolderTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
self.settings = ("-s os=Windows -s compiler='Visual Studio' -s compiler.version=12 "
"-s arch=x86 -s compiler.runtime=MD")
def _create(self, number, version, deps=None, export=True):
files = cpp_hello_conan_files(number, version, deps, build=False)
files[CONANFILE] = files[CONANFILE] + """ def build(self):
self.output.info("Settings %s" % self.settings.values.dumps())
self.output.info("Options %s" % self.options.values.dumps())
"""
self.client.save(files, clean_first=True)
if export:
self.client.run("export . lasote/stable")
def reuse_test(self):
self._create("Hello0", "0.1")
self._create("Hello1", "0.1", ["Hello0/0.1@lasote/stable"])
self._create("Hello2", "0.1", ["Hello1/0.1@lasote/stable"], export=False)
current_folder = self.client.current_folder
h00 = "2e38bbc2c3ef1425197c8e2ffa8532894c347d26"
h10 = "44671ecdd9c606eb7166f2197ab50be8d36a3c3b"
h01 = "8b964e421a5b7e48b7bc19b94782672be126be8b"
h11 = "3eeab577a3134fa3afdcd82881751789ec48e08f"
for lang, id0, id1, id2, id3 in [(0, h00, h10, h01, h11),
(1, h01, h11, h00, h10)]:
self.client.current_folder = os.path.join(current_folder, "lang%dbuild" % lang)
mkdir(self.client.current_folder)
self.client.run("install .. -o language=%d %s --build missing" % (lang, self.settings))
info_path = os.path.join(self.client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
self.assertEqual("arch=x86\n"
"compiler=Visual Studio\n"
"compiler.runtime=MD\n"
"compiler.version=12\n"
"os=Windows",
conan_info.settings.dumps())
conan_info_text = load(info_path)
self.assertIn(id0, conan_info_text)
self.assertIn(id1, conan_info_text)
self.assertNotIn(id2, conan_info_text)
self.assertNotIn(id3, conan_info_text)
self.assertEqual("language=%s\nstatic=True" % lang, conan_info.options.dumps())
build_cmake = os.path.join(self.client.current_folder, BUILD_INFO_CMAKE)
build_cmake_text = load(build_cmake)
self.assertIn(id0, build_cmake_text)
self.assertIn(id1, build_cmake_text)
self.assertNotIn(id2, build_cmake_text)
self.assertNotIn(id3, build_cmake_text)
# Now test "build" command in subfolders
for lang, id0, id1, id2, id3 in [(0, h00, h10, h01, h11),
(1, h01, h11, h00, h10)]:
self.client.current_folder = os.path.join(current_folder, "lang%dbuild" % lang)
self.client.run("build ..")
self.assertIn("compiler=Visual Studio", self.client.out)
self.assertIn("language=%d" % lang, self.client.out)
self.assertNotIn("language=%d" % (not lang), self.client.out)
| birsoyo/conan | conans/test/command/install_subfolder_test.py | Python | mit | 3,532 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'intbarras.ui'
#
# Created: Tue Feb 23 20:28:14 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
"""
Prueba para insertar la prueba de gráfico de barras con episodio de sueño
utilizando Qt designer
Código de interfaz compilado mediante el comando:
pyuic4 -x intbarras.ui -o pruebainterfazbar.py
"""
from PyQt4 import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(480, 481)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(270, 360, 98, 27))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_2 = QtGui.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(130, 360, 98, 27))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.graphicsView = GraphicsView(self.centralwidget)
self.graphicsView.setGeometry(QtCore.QRect(100, 60, 256, 192))
self.graphicsView.setObjectName(_fromUtf8("graphicsView"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 480, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.pushButton.setText(_translate("MainWindow", "PushButton", None))
self.pushButton_2.setText(_translate("MainWindow", "PushButton", None))
from pyqtgraph import GraphicsView
#Cargar los datos
csv = np.genfromtxt ('data.csv', delimiter=",")
x = csv[:,1]
y = csv[:,25]
#Trocear datos en episodios
a = -1
b = -1
c = 0
for i in range(len(y)):
if(y[i] != 0 and a == -1): #dormido
a = i #comienzo del episodio
elif(y[i] == 0 and a != -1 and b < 100): #despierto
b = b + 1
elif(b >= 100 and c == 0):
c = 1
b = i #fin del episodio
k = i
print "a:%i b:%i k:%i\n" %(a, b, k)
#Elegir episodio a mostrar
n = b-a
x = x[a:b]
y = y[a:b]
xGrid = np.linspace(x[0], x[-1], n)
yGrid = np.interp(xGrid, x, y)
colors = []
num = 0
for i in y:
if(i == 2): #Sueño ligero - Naranja
c = pg.mkColor(255, 128, 0)
elif(i == 4): #Sueño profundo - Amarillo
c = pg.mkColor(255, 255, 0)
elif(i == 5): #Sueño muy profundo - Verde
c = pg.mkColor(0, 255, 0)
#print "Verde! en %i\n" % num
else: #Despierto - Rojo
c = pg.mkColor(255, 0, 0)
colors.append(c)
num = num + 1
barGraphItem = pg.BarGraphItem()
barGraphItem.setOpts(x0=xGrid[:-1], x1=xGrid[1:], y0=[0] * n, height=0.3, brushes=colors, pens=colors)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
plt = pg.PlotItem()
plt.getViewBox().setMouseEnabled(True, False)
ui.graphicsView.setCentralItem(plt)
plt.addItem(barGraphItem)
MainWindow.show()
sys.exit(app.exec_())
| acrsilva/animated-zZz-machine | pruebas/pruebainterfazbar.py | Python | lgpl-3.0 | 4,071 |
# Usage:
# - python this_script path_to_image input_file
#
# Purpose: This script displays arrows between test points and their kNN estimates.
#
# We use this utility to visualize the accuracy of our kNN algorithm.
# have mapped.
from sys import argv, stderr, exit
from PIL import Image, ImageTk
import Tkinter
import json
import os
import math
import sys
APP = {} # contains global information needed by tkinter functions
REAL_POINTS = []
GUESS_POINTS = []
NEIGHBORS = []
NUM_NEIGHBORS = 4
INDEX = 0
class Point(object):
""" Point Object """
def __init__(self, x, y, density=""):
self.x = x
self.y = y
self.density=density
class Line(object):
""" Line Object """
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.length = distance(p1, p2)
#--------------------------------
# Misc Utility Functions Below
#--------------------------------
def distance(a, b):
""" Returns the distance between the given two Points """
dx = a.x - b.x
dy = a.y - b.y
return math.sqrt(dx * dx + dy * dy)
#--------------------------------
# TKinter Application Code Below
#--------------------------------
def initializeApp(image_path):
""" Initializes data for app Binds tkinter buttons """
global APP
image = Image.open(image_path)
width, height = image.size[0], image.size[1]
APP['window'] = Tkinter.Tk()
APP['frame'] = Tkinter.Frame(APP['window'])
image_tk = ImageTk.PhotoImage(image)
APP['canvas'] = Tkinter.Canvas(APP['frame'], width=width, height=height-50)
APP['canvas'].create_image(width // 2, height // 2, image=image_tk)
APP['dims'] = {'w': width, 'h': width}
APP['buttons'] = getButtons()
APP['points'] = []
APP['lines'] = []
APP['canvas_list'] = []
APP['frame'].pack()
APP['canvas'].pack()
APP['buttons']['ready_btn'].pack(side='right')
APP['window'].mainloop()
def getButtons():
""" Returns dict of buttons; will be added to app object"""
buttons = {'ready_btn': Tkinter.Button(APP['frame'], text="Begin", command=ready)}
return buttons
def draw_point(p, color, text=""):
""" draws a point at the coordinates with the specified color """
global APP
radius = 5 # point radius
new_canvas = APP['canvas'].create_oval(
p.x - radius, p.y - radius, p.x + radius, p.y + radius, fill=color)
if text != "":
APP['canvas_list'].append(new_canvas)
new_canvas = APP['canvas'].create_text(
p.x, p.y-15, text=str(text))
APP['points'].append(p)
APP['canvas_list'].append(new_canvas)
def draw_line(line, color):
""" draws the given line with the specified color """
global APP
new_canvas = APP['canvas'].create_line(
line.p1.x, line.p1.y, line.p2.x, line.p2.y, fill=color,
width=1, arrow=Tkinter.FIRST)
APP['lines'].append(line)
APP['canvas_list'].append(new_canvas)
def ready():
""" Displays connections between test points and predictions """
global REAL_POINTS, GUESS_POINTS, NEIGHBORS, INDEX
if INDEX == 0:
global APP
readPoints()
APP['buttons']['ready_btn']["text"] = "Next point"
elif INDEX == len(REAL_POINTS):
sys.exit(0)
else:
global APP
for canvas in APP['canvas_list']:
APP['canvas'].delete(canvas)
APP['points'] = []
APP['canvas_list'] = []
draw_point(REAL_POINTS[INDEX], 'green', "P" + str(INDEX))
draw_point(GUESS_POINTS[INDEX], 'red')
draw_line(Line(REAL_POINTS[INDEX], GUESS_POINTS[INDEX]), 'blue')
for j in range(INDEX * NUM_NEIGHBORS, INDEX * NUM_NEIGHBORS + NUM_NEIGHBORS):
draw_point(NEIGHBORS[j], 'purple', str(j - INDEX * NUM_NEIGHBORS + 1))
draw_line(Line(REAL_POINTS[INDEX], NEIGHBORS[j]), 'black')
INDEX = INDEX + 1
def readPoints():
global REAL_POINTS, GUESS_POINTS, NEIGHBORS
""" Reads points from input file """
REAL_POINTS = []
GUESS_POINTS = []
NEIGHBORS = []
points_list = sys.stdin.readlines()
for (index, line) in enumerate(points_list):
points = [float(p) for p in line.rstrip().split()]
if len(points) == 4:
REAL_POINTS.append(Point(points[0], points[1]))
GUESS_POINTS.append(Point(points[2], points[3]))
else:
NEIGHBORS.append(Point(points[0], points[1]))
def main(argv):
if len(argv) != 4:
print "Usage: python testresults.py k image_path point_coords"
exit(1)
global NUM_NEIGHBORS
NUM_NEIGHBORS = int(argv[1])
image_path = argv[2]
sys.stdin = open(argv[3])
initializeApp(image_path)
if __name__ == '__main__':
main(argv)
| TeamSirius/Utilities | scripts/showneighbors.py | Python | apache-2.0 | 4,742 |
#r# =======================
#r# How to Use SubCircuit
#r# =======================
#r# This example shows how to use subcircuits.
####################################################################################################
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
####################################################################################################
from PySpice.Spice.Netlist import Circuit, SubCircuit, SubCircuitFactory
from PySpice.Unit import *
####################################################################################################
#r# There is two ways to define subcircuit with PySpice, either using
#r# :class:`PySpice.Spice.Netlist.SubCircuit` or a simpler alternative
#r# :class:`PySpice.Spice.Netlist.SubCircuitFactory`.
#r#
#r# Let define a parallel resistor subcircuit using the :class:`PySpice.Spice.Netlist.SubCircuitFactory`
class ParallelResistor(SubCircuitFactory):
NAME = 'parallel_resistor'
NODES = ('n1', 'n2')
def __init__(self, R1=1@u_Ω, R2=2@u_Ω):
super().__init__()
self.R(1, 'n1', 'n2', R1)
self.R(2, 'n1', 'n2', R2)
#r# Let define a circuit
circuit = Circuit('Test')
#r# then we can use this subcircuit like this
circuit.subcircuit(ParallelResistor(R2=3@u_Ω))
circuit.X('1', 'parallel_resistor', 1, circuit.gnd)
print(circuit)
#o#
#r# If the above way is not suited for your purpose we can use this second approach
class ParallelResistor2(SubCircuit):
NODES = ('n1', 'n2')
def __init__(self, name, R1=1@u_Ω, R2=2@u_Ω):
SubCircuit.__init__(self, name, *self.NODES)
self.R(1, 'n1', 'n2', R1)
self.R(2, 'n1', 'n2', R2)
circuit = Circuit('Test')
circuit.subcircuit(ParallelResistor2('pr1', R2=2@u_Ω))
circuit.X('1', 'pr1', 1, circuit.gnd)
circuit.subcircuit(ParallelResistor2('pr2', R2=3@u_Ω))
circuit.X('2', 'pr2', 1, circuit.gnd)
print(circuit)
#o#
| FabriceSalvaire/PySpice | examples/basic-usages/subcircuit.py | Python | gpl-3.0 | 1,938 |
import copy
from os.path import join, expanduser
from drivelink import Dict
class _Cached:
"This will shine the most with recursive functions. But the recursion has to call the cached function, not the function itself."
f = None
c = None
def __init__(self, function, file_basename=None, size_limit=1024, max_pages=16, file_location=join(expanduser("~"), ".DriveLink"), compression_ratio=0):
for n in list(n for n in set(dir(function)) - set(dir(self)) if n != '__class__'):
setattr(self, n, getattr(function, n))
if file_basename is None:
file_basename = function.__name__
self.f = function
self.c = Dict(file_basename, size_limit, max_pages, file_location, compression_ratio)
def __call__(self, *args, **kwargs):
i = str(args) + str(kwargs)
if i in self.c:
return copy.deepcopy(self.c[i])
else:
t = self.f(*args, **kwargs)
self.c[i] = copy.deepcopy(t)
return t
def cached(file_basename=None, size_limit=1024, max_pages=16, file_location=join(expanduser("~"), ".DriveLink")):
'''
A decorator that creates a simplistic cached function with minimal overhead.
This provides very simplistic and quick cache. The values are saved to a drivelink.Dict
and will be reloaded on program restarting.
'''
def decorator(f):
return _Cached(f, file_basename, size_limit, max_pages, file_location)
return decorator
| cdusold/DriveLink | drivelink/_diskmemoize.py | Python | mit | 1,491 |
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(ext_modules=cythonize("rollover_c.pyx"),
include_dirs=[numpy.get_include()])
# python rollover_c_setup.py build_ext --inplace
| energyPATHWAYS/energyPATHWAYS | energyPATHWAYS/_obsolete/rollover_c_setup.py | Python | mit | 221 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
paddle.enable_static()
# Correct: General.
class TestSqueezeOp(OpTest):
def setUp(self):
self.op_type = "squeeze2"
self.init_test_case()
self.inputs = {"X": np.random.random(self.ori_shape).astype("float64")}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
"XShape": np.random.random(self.ori_shape).astype("float64")
}
def test_check_output(self):
self.check_output(no_check_set=['XShape'])
def test_check_grad(self):
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, 2)
self.new_shape = (3, 40)
def init_attrs(self):
self.attrs = {"axes": self.axes}
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 20, 1, 5)
self.axes = (0, -2)
self.new_shape = (20, 5)
# Correct: No axes input.
class TestSqueezeOp2(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 20, 1, 5)
self.axes = ()
self.new_shape = (20, 5)
# Correct: Just part of axes be squeezed.
class TestSqueezeOp3(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (6, 1, 5, 1, 4, 1)
self.axes = (1, -1)
self.new_shape = (6, 5, 1, 4)
if __name__ == "__main__":
unittest.main()
| luotao1/Paddle | python/paddle/fluid/tests/unittests/test_squeeze2_op.py | Python | apache-2.0 | 2,178 |
from tests.helper import TestHelper, TestHelperException
def run(data):
helper = TestHelper(data)
try:
value = 1
for t in range(21):
helper.is_io_correct(t, 0, value, 0)
if value: value = 0
else: value = 1
except TestHelperException as e:
print(e.msg)
return TestHelper.FAIL_CODE
return TestHelper.SUCCESS_CODE
| charlesvdv/arduinint | tests/test_delai/test.py | Python | mit | 397 |
from django_cassandra_engine.utils import get_cassandra_connections
from .compat import (
Cluster,
CQLEngineException,
PlainTextAuthProvider,
Session,
connection,
)
class Cursor(object):
def __init__(self, connection):
self.connection = connection
def execute(self, *args, **kwargs):
return self.connection.execute(*args, **kwargs)
def close(self):
pass
def fetchmany(self, _):
return []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class FakeConnection(object):
def commit(self):
pass
def rollback(self):
pass
def cursor(self):
return Cursor(None)
def close(self):
pass
class CassandraConnection(object):
def __init__(self, alias, **options):
self.alias = alias
self.hosts = options.get("HOST").split(",")
self.keyspace = options.get("NAME")
self.user = options.get("USER")
self.password = options.get("PASSWORD")
self.options = options.get("OPTIONS", {})
self.cluster_options = self.options.get("connection", {})
self.session_options = self.options.get("session", {})
self.connection_options = {
"lazy_connect": self.cluster_options.pop("lazy_connect", False),
"retry_connect": self.cluster_options.pop("retry_connect", False),
"consistency": self.cluster_options.pop("consistency", None),
}
if self.user and self.password and "auth_provider" not in self.cluster_options:
self.cluster_options["auth_provider"] = PlainTextAuthProvider(
username=self.user, password=self.password
)
self.default = (
alias == "default"
or len(list(get_cassandra_connections())) == 1
or self.cluster_options.pop("default", False)
)
self.register()
def register(self):
try:
connection.get_connection(name=self.alias)
except CQLEngineException:
if self.default:
from cassandra.cqlengine import models
models.DEFAULT_KEYSPACE = self.keyspace
for option, value in self.session_options.items():
setattr(Session, option, value)
if "cloud" in self.cluster_options:
cluster = Cluster(**self.cluster_options)
session = cluster.connect()
connection.register_connection(
self.alias, default=self.default, session=session
)
else:
connection.register_connection(
self.alias,
hosts=self.hosts,
default=self.default,
cluster_options=self.cluster_options,
**self.connection_options
)
@property
def cluster(self):
return connection.get_cluster(connection=self.alias)
@property
def session(self):
return connection.get_session(connection=self.alias)
def commit(self):
pass
def rollback(self):
pass
def cursor(self):
return Cursor(self)
def execute(self, qs, *args, **kwargs):
self.session.set_keyspace(self.keyspace)
return self.session.execute(qs, *args, **kwargs)
def close(self):
"""We would like to keep connection always open by default"""
def unregister(self):
"""Unregister this connection"""
connection.unregister_connection(self.alias)
| r4fek/django-cassandra-engine | django_cassandra_engine/connection.py | Python | bsd-2-clause | 3,613 |
from loadimage import loadImage
from loadlabel import loadGroundTruth
from labelreshape import reshapeLables
from loadpickle import loadPickleData
from loadcsvlabel import loadCSV | wuga214/Boundary-Detection-via-Convolution-Deconvolution-Neural-Network-with-BMA | Conv-Deconv-Image-Process/files/__init__.py | Python | mit | 179 |
import pymel.all as pm
import random
import math
import traceback
helpString="""
Actual python code can be evaluated for the new value.
i is the current iteration
r is python's random module
m is python's math module
pm is pymel
for example you could enter "translate" as the attribute to change
and use the following code to change the attributes on many objects.
Try it on a bunch of default cubes. (perhaps 20 cubes?)
( r(-0.3,0.3), i, m.sin(i)/3 )
"""
class RiggerAttributeSetter(object):
def __init__(self, showUi=False, go=True, attribute='', value=''):
if showUi:
self.ui = RiggerAttributeSetterUi(parent=self)
else:
if go==True:
self.setAttributes( attribute, value )
def setAttributes(self, attribute, value):
## Get a list of all selected objects
objs = pm.ls(selection=True)
attribute = str(attribute)
## Loop through the list, and try setting each object
for i,obj in enumerate(objs):
## Get the attribute based on the attribute name in the UI
try:
attr = getattr( obj, attribute )
a = attr.get()
r = random.uniform
m = math
#i = i ## just a reminder! i is useful
## Get the value from the UI
val = eval( value )
## Convert the value from the UI into a floating point number
attr.set( val )
except:
print('Failed for object' + obj.name() )
print( traceback.format_exc() )
class RiggerAttributeSetterUi(object):
def __init__(self, parent):
self.parent = parent
self.win = pm.window('Attribute Setter', resizeToFitChildren=True)
with self.win:
self.col = pm.columnLayout()
with self.col:
self.helpButton = pm.button(
label="Show Help",
command=lambda x: self.showHelp(),
)
## Text label that says "Attribute to change:"
self.attributeLabel = pm.text( 'Attribute to change:' )
## Text entry field, a place where the user will type in the attribute to change
self.attributeField = pm.textField( width=600 )
## Text label that says "New value for attribute:"
self.valueLabel = pm.text( 'New value for attribute:' )
## Text entry field, a place where the user will type the new value to set the attribute to
self.valueField = pm.textField( width=600 )
self.go = pm.button(
label="Set Attributes",
command=lambda x: self.parent.setAttributes(
attribute=self.attributeField.getText(),
value=self.valueField.getText(),
)
)
def showHelp(self):
helpWindow = pm.window('Attribute Setter Help', resizeToFitChildren=True)
with helpWindow:
helpCol = pm.columnLayout()
with helpCol:
helpText = pm.text( helpString )
helpWindow.show() | joetainment/mmmmtools | MmmmToolsMod/Dynamic/RiggerAttributeSetter.py | Python | gpl-3.0 | 3,023 |
from . import voice
""" Style Guide: When writing dialogue, try to follow the following rules.
- Do not use contractions.
DRACONIAN: Reptiles hiss. And they never want something they can desire.
DWARVEN: Dwarves use a lot of kennings in their speech. Think "The Mighty
Thor".
ELVEN: Lacks words with negative meanings, like "evil" or "ugly". Instead
use the negation of a positive word- "ungood", "unlovely".
GNOMIC: Like dwarves, but cheeky.
GREEK: This pretty much only exists so that Centaurs, Titans, and other
beings from Greek mythology can use the Green name generator. Maybe try to
use as many loanwords as possible?
HURTHISH: It's all about the food.
KITTEH: I can haz lolspeak? Should have a big smart/stupid difference- the
smart speakers get grammar right, just use Kitteh vocabulary. Also, try and
stick to lolspeak which can be spoken aloud + isn't perfectly homophonic.
ORCISH: This language is more efficient than regular English because it gets
rid of trivialities like verb conjugation, a bunch of pronouns, and the "th"
sound.
STUPID: Uses slang that your English teacher would disapprove of and
otherwise mangles the language.
SMART: Why use a four letter word when there's a twelve letter alternative?
The personalizer determines how things are said. It contains options based
on the language/nature of the speaker, and is unconcerned with the greater
context.
"""
class PTEntry( object ):
def __init__( self, subtext, requires = {} ):
self.subtext = subtext
self.requires = requires
def fits_voice( self, npc_voice ):
ok = True
for k,v in self.requires.items():
if v:
# This key is required.
ok = ok and ( k in npc_voice )
else:
# This key is prohibited.
ok = ok and ( k not in npc_voice )
return ok
PT_DATABASE = {
"a lot": (
PTEntry( "lotz", { voice.KITTEH: True } ),
PTEntry( "plenty" ),
PTEntry( "tons", { voice.HURTHISH: True } ),
),
"am looking for": (
PTEntry( "seek", { voice.DRACONIAN: True } ),
PTEntry( "iz lookin' fer", { voice.KITTEH: True } ),
),
"anything": (
PTEntry( "stuff", { voice.STUPID: True } ),
PTEntry( "anythin'", { voice.KITTEH: True } ),
),
"any help": (
PTEntry( "some assistance", { voice.DRACONIAN: True } ),
PTEntry( "teh helps", { voice.KITTEH: True } ),
),
"Are you alright ?": (
PTEntry( "Is everything alright?" ),
PTEntry( "Is everything okay?" ),
PTEntry( "Are you okay?" ),
PTEntry( "Iz you okays?", { voice.KITTEH: True } ),
PTEntry( "I can smell your pain.", { voice.DRACONIAN: True } ),
PTEntry( "Wot's da matta?", { voice.ORCISH: True } ),
),
"ate": (
PTEntry( "eated", { voice.KITTEH: True, voice.SMART: False } ),
PTEntry( "devoured", { voice.SMART: True } ),
PTEntry( "consumed", { voice.DRACONIAN: True } ),
),
"axe": (
PTEntry( "choppa", { voice.ORCISH: True } ),
PTEntry( "foe-cleaver", { voice.DWARVEN: True } ),
),
"bow": (
PTEntry( "arrow-flinga", { voice.ORCISH: True } ),
),
"buy": (
PTEntry( "purchase", { voice.SMART: True } ),
PTEntry( "buyz", { voice.KITTEH: True } ),
PTEntry( "acquire", { voice.GNOMIC: True, voice.STUPID: False } ),
PTEntry( "get", { voice.STUPID: True } ),
),
"buy something": (
PTEntry( "trade" ),
PTEntry( "get a thing", { voice.STUPID: True } ),
PTEntry( "make a purchase", { voice.SMART: True } ),
PTEntry( "lay down some coin", { voice.DWARVEN: True } ),
PTEntry( "barter", { voice.DWARVEN: True } ),
PTEntry( "haggle", { voice.GNOMIC: True } ),
PTEntry( "barter", { voice.GNOMIC: True } ),
PTEntry( "exchange treasures", { voice.DRACONIAN: True } ),
PTEntry( "buy sum stuff", { voice.KITTEH: True } ),
),
"Can I have": (
PTEntry( "I can haz", { voice.KITTEH: True, voice.SMART: False } ),
),
"Can I help you": (
PTEntry( "Do you need help" ),
PTEntry( "Shall I assist you", { voice.DRACONIAN: True } ),
),
"cat": (
PTEntry( "kitteh", { voice.KITTEH: True } ),
),
"come back tomorrow": (
PTEntry( "come back later" ),
PTEntry( "come back next year", { voice.GNOMIC: True } ),
PTEntry( "better luck next time", { voice.GNOMIC: True } ),
),
'different': (
PTEntry( "divergent", { voice.SMART: True } ),
PTEntry( "not the same", { voice.STUPID: True } ),
PTEntry( "dissimilar", { voice.ELVEN: True } ),
PTEntry( "difrunt", { voice.KITTEH: True } ),
),
"did not": (
PTEntry( "didn't" ),
),
"Do you have": (
PTEntry( "You got", { voice.ORCISH: True } ),
PTEntry( "You haz got", { voice.KITTEH: True, voice.SMART: False } ),
),
"do you want": (
PTEntry( "may you be wanting", { voice.HURTHISH: True } ),
),
"enjoy": (
PTEntry( "savor", { voice.DRACONIAN: True } ),
),
"eternity": (
PTEntry( "eternitys", { voice.KITTEH: True } ),
PTEntry( "all-tomorrows", { voice.DWARVEN: True } ),
),
'exactly': (
PTEntry( "precisely", { voice.SMART: True } ),
PTEntry( "perfectly", { voice.ELVEN: True } ),
PTEntry( "right", { voice.STUPID: True } ),
PTEntry( "egsaktly", { voice.KITTEH: True } ),
PTEntry( "preshishley", { voice.KITTEH: True, voice.SMART: True } ),
PTEntry( "suitably", { voice.HURTHISH: True } ),
),
"evil": (
PTEntry( "ungood", { voice.ELVEN: True } ),
),
"forever": (
PTEntry( "furevur", { voice.KITTEH: True } ),
PTEntry( "til never", { voice.GNOMIC: True } ),
),
"have": (
PTEntry( "'ave", { voice.ORCISH: True } ),
PTEntry( "haves", { voice.DRACONIAN: True, voice.STUPID: True } ),
PTEntry( "haz", { voice.KITTEH: True } ),
),
"have .": (
PTEntry( "possess.", { voice.DRACONIAN: True } ),
PTEntry( "haz.", { voice.KITTEH: True } ),
),
"He": (
PTEntry( "Him", { voice.KITTEH: True, voice.STUPID: True } ),
),
"he": (
PTEntry( "him", { voice.KITTEH: True, voice.STUPID: True } ),
),
"Hello": (
PTEntry( "'Ello, ello", { voice.ORCISH: True } ),
PTEntry( "'Ello", { voice.ORCISH: True, voice.STUPID: False } ),
PTEntry( "Greetings", { voice.SMART: True } ),
PTEntry( "Hey", { voice.SMART: False, voice.DWARVEN: False, voice.ORCISH: False, voice.DRACONIAN: False } ),
PTEntry( "Salutations", { voice.DRACONIAN: True, voice.STUPID: False } ),
PTEntry( "Sss", { voice.DRACONIAN: True, voice.STUPID: True } ),
PTEntry( "Hi", { voice.STUPID: False, voice.DWARVEN: False, voice.ORCISH: False, voice.DRACONIAN: False } ),
PTEntry( "Lali-ho", { voice.DWARVEN: True } ),
PTEntry( "Hai", { voice.KITTEH: True } ),
PTEntry( "Hullo", { voice.HURTHISH: True } ),
),
"Hello ,": (
PTEntry( "'Ello there,", { voice.ORCISH: True } ),
PTEntry( "Greetingss,", { voice.DRACONIAN: True } ),
PTEntry( "Lali-ho,", { voice.DWARVEN: True } ),
PTEntry( "Oh hai there,", { voice.KITTEH: True } ),
PTEntry( "Hullo,", { voice.HURTHISH: True } ),
),
"here": (
PTEntry( "'ere", { voice.ORCISH: True } ),
),
"hers": (
PTEntry( "her", { voice.KITTEH: True, voice.STUPID: True } ),
PTEntry( "herz", { voice.KITTEH: True } ),
),
"His": (
PTEntry( "Him", { voice.KITTEH: True, voice.STUPID: True } ),
),
"his": (
PTEntry( "him", { voice.KITTEH: True, voice.STUPID: True } ),
),
"I am": (
PTEntry( "I'm" ),
PTEntry( "I'z", { voice.KITTEH: True } ),
),
"I would": (
PTEntry( "I'd" ),
),
"information": (
PTEntry( "stuff", { voice.STUPID: True } ),
PTEntry( "knowledge", { voice.SMART: True } ),
PTEntry( "enlightenment", { voice.ELVEN: True, voice.STUPID: False } ),
PTEntry( "know-wotz", { voice.ORCISH: True } ),
PTEntry( "secrets", { voice.DRACONIAN: True } ),
PTEntry( "hard facts", { voice.DWARVEN: True } ),
PTEntry( "informashun", { voice.KITTEH: True } ),
PTEntry( "gnosis", { voice.GREEK: True } ),
),
"in stock .": (
PTEntry( "right here." ),
),
"is not": (
PTEntry( "isn't" ),
PTEntry( "ain't", { voice.STUPID: True } ),
PTEntry( "izn't", { voice.KITTEH: True } ),
),
"It is": (
PTEntry( "It's" ),
PTEntry( "It'z", { voice.KITTEH: True } ),
),
"Knowledge": (
PTEntry( "Smarts", { voice.STUPID: True } ),
PTEntry( "Information", { voice.SMART: True } ),
PTEntry( "Enlightenment", { voice.ELVEN: True, voice.STUPID: False } ),
PTEntry( "Know-wotz", { voice.ORCISH: True } ),
PTEntry( "Know-how", { voice.DWARVEN: True } ),
PTEntry( "Informashun", { voice.KITTEH: True } ),
PTEntry( "Gnosis", { voice.GREEK: True } ),
PTEntry( "Book learning", { voice.HURTHISH: True } ),
),
"knowledge": (
PTEntry( "smarts", { voice.STUPID: True } ),
PTEntry( "information", { voice.SMART: True } ),
PTEntry( "enlightenment", { voice.ELVEN: True, voice.STUPID: False } ),
PTEntry( "know-wotz", { voice.ORCISH: True } ),
PTEntry( "know-how", { voice.DWARVEN: True } ),
PTEntry( "informashun", { voice.KITTEH: True } ),
PTEntry( "gnosis", { voice.GREEK: True } ),
PTEntry( "book learning", { voice.HURTHISH: True } ),
),
"leader": (
PTEntry( "archon", { voice.GREEK: True } ),
PTEntry( "boss", { voice.ORCISH: True } ),
PTEntry( "bigwig", { voice.KITTEH: True } ),
),
"Let me": (
PTEntry( "Lemme", { voice.STUPID: True } ),
PTEntry( "Allow me to", { voice.SMART: True } ),
PTEntry( "I can", { voice.KITTEH: True } ),
),
"Let me know": (
PTEntry( "Tell me" ),
PTEntry( "Spit it out", { voice.ORCISH: True } ),
PTEntry( "Holler", { voice.HURTHISH: True } ),
),
"let me know": (
PTEntry( "tell me" ),
PTEntry( "inform me", { voice.SMART: True } ),
PTEntry( "just holler", { voice.HURTHISH: True } ),
),
"library": (
PTEntry( "book place", { voice.STUPID: True } ),
PTEntry( "lyceum", { voice.GREEK: True } ),
),
"little": (
PTEntry( "littul", { voice.KITTEH: True } ),
PTEntry( "wee", { voice.KITTEH: True, voice.STUPID: False } ),
),
"Looking for": (
PTEntry( "Lookin' for", { voice.ORCISH: True } ),
PTEntry( "Seeking", { voice.DRACONIAN: True } ),
PTEntry( "Lookin fer", { voice.KITTEH: True } ),
PTEntry( "In the market for", { voice.GNOMIC: True } ),
),
"mace": (
PTEntry( "clubba", { voice.ORCISH: True } ),
),
"makes": (
PTEntry( "makez", { voice.KITTEH: True } ),
),
"made": (
PTEntry( "maked", { voice.KITTEH: True, voice.SMART: False } ),
PTEntry( "maded", { voice.KITTEH: True, voice.SMART: True } ),
),
"me": (
PTEntry( "meh", { voice.KITTEH: True } ),
),
"more powerful": (
PTEntry( "stronger" ),
PTEntry( "ruffer", { voice.ORCISH: True } ),
PTEntry( "stronger", { voice.DRACONIAN: True } ),
PTEntry( "powuhfuller", { voice.KITTEH: True } ),
PTEntry( "bigger", { voice.STUPID: True } ),
PTEntry( "scarier", { voice.HURTHISH: True } ),
),
"much": (
PTEntry( "mutch", { voice.KITTEH: True } ),
PTEntry( "manys", { voice.KITTEH: True, voice.STUPID: True } ),
),
"My": (
PTEntry( "Me", { voice.ORCISH: True } ),
PTEntry( "Me", { voice.STUPID: True } ),
PTEntry( "Mai", { voice.KITTEH: True } ),
PTEntry( "Meh", { voice.KITTEH: True } ),
),
"my": (
PTEntry( "me", { voice.ORCISH: True } ),
PTEntry( "me", { voice.STUPID: True } ),
PTEntry( "mai", { voice.KITTEH: True } ),
PTEntry( "meh", { voice.KITTEH: True } ),
),
"need a": (
PTEntry( "desire a", { voice.DRACONIAN: True } ),
PTEntry( "needz a", { voice.KITTEH: True } ),
PTEntry( "require a", { voice.STUPID: False } ),
PTEntry( "want a", { voice.HURTHISH: True } ),
),
"need some": (
PTEntry( "desire some", { voice.DRACONIAN: True } ),
PTEntry( "needz sum", { voice.KITTEH: True } ),
PTEntry( "require some", { voice.STUPID: False } ),
PTEntry( "want some", { voice.HURTHISH: True } ),
),
"need to": (
PTEntry( "needz to", { voice.KITTEH: True } ),
),
"no !": (
PTEntry( "noes!", { voice.KITTEH: True } ),
),
"normal": (
PTEntry( "orthodox", { voice.GREEK: True } ),
PTEntry( "suitable", { voice.HURTHISH: True } ),
),
"nothing": (
PTEntry( "nuttin", { voice.KITTEH: True } ),
),
"over": (
PTEntry( "ovah", { voice.KITTEH: True } ),
),
"poison": (
PTEntry( "toxin", { voice.GREEK: True } ),
),
"poisonous": (
PTEntry( "toxic", { voice.GREEK: True } ),
),
"power": (
PTEntry( "strength" ),
PTEntry( "potential", { voice.DWARVEN: True } ),
PTEntry( "strength", { voice.DRACONIAN: True } ),
PTEntry( "powehs", { voice.KITTEH: True } ),
),
"powerful": (
PTEntry( "hurty", { voice.ORCISH: True } ),
PTEntry( "frightening", { voice.HURTHISH: True, voice.STUPID: False } ),
PTEntry( "scary", { voice.HURTHISH: True } ),
PTEntry( "dangerous" ),
),
"probably": (
PTEntry( "maybe" ),
PTEntry( "definitely", { voice.DWARVEN: True } ),
PTEntry( "potentially", { voice.GNOMIC: True } ),
PTEntry( "rilly", { voice.ORCISH: True } ),
PTEntry( "surely", { voice.DRACONIAN: True } ),
PTEntry( "downright", { voice.HURTHISH: True } ),
),
"riddle": (
PTEntry( "enigma", { voice.GREEK: True } ),
PTEntry( "quizzy question", { voice.HURTHISH: True } ),
),
"right now": (
PTEntry( "at present", { voice.SMART: True } ),
PTEntry( "today", { voice.GNOMIC: True } ),
PTEntry( "ere an now", { voice.ORCISH: True } ),
),
"sad": (
PTEntry( "down" ),
PTEntry( "miserable" ),
PTEntry( "dour", { voice.DWARVEN: True } ),
PTEntry( "sour", { voice.HURTHISH: True } ),
PTEntry( "unhappy", { voice.ELVEN: True, voice.STUPID: False } ),
PTEntry( "unwell", { voice.ELVEN: True } ),
PTEntry( "glum", { voice.ORCISH: True } ),
PTEntry( "melancholy", { voice.SMART: True } ),
),
"scared": (
PTEntry( "terrified", { voice.SMART: True } ),
PTEntry( "scaredy", { voice.KITTEH: True } ),
),
"sea": (
PTEntry( "whale-road", { voice.DWARVEN: True } ),
),
"see your wares": (
PTEntry( "see what you have" ),
PTEntry( "examine your wares", { voice.DWARVEN: True } ),
PTEntry( "see your treasures", { voice.DRACONIAN: True } ),
PTEntry( "look at yer gubbins", { voice.ORCISH: True } ),
PTEntry( "peruse your wares", { voice.ELVEN: True } ),
PTEntry( "inspect your merchandise", { voice.SMART: True } ),
PTEntry( "look at your stuff", { voice.STUPID: True } ),
PTEntry( "lookit teh shinies", { voice.KITTEH: True } ),
),
"She": (
PTEntry( "Her", { voice.KITTEH: True, voice.STUPID: True } ),
),
"she": (
PTEntry( "her", { voice.KITTEH: True, voice.STUPID: True } ),
),
"shop": (
PTEntry( "store" ),
PTEntry( "forge-front", { voice.DWARVEN: True } ),
PTEntry( "boutique", { voice.ELVEN: True } ),
),
"some": (
PTEntry( "ssome", { voice.DRACONIAN: True } ),
),
"something": (
PTEntry( "wotever", { voice.ORCISH: True } ),
PTEntry( "somethin'", { voice.KITTEH: True } ),
),
"staff": (
PTEntry( "stick", { voice.STUPID: True } ),
),
"sun": (
PTEntry( "sky-candle", { voice.DWARVEN: True } ),
),
"sword": (
PTEntry( "stabba", { voice.ORCISH: True } ),
PTEntry( "foe-skewer", { voice.DWARVEN: True } ),
),
"than others .": (
PTEntry( "than the rest." ),
PTEntry( "den woteva.", { voice.ORCISH: True } ),
PTEntry( "than all get-out.", { voice.HURTHISH: True } ),
),
"That": (
PTEntry( "Dat", { voice.ORCISH: True } ),
),
"that": (
PTEntry( "dat", { voice.ORCISH: True } ),
),
"that makes": (
PTEntry( "wot makes", { voice.ORCISH: True } ),
),
"The": (
PTEntry( "Dat", { voice.ORCISH: True } ),
PTEntry( "Teh", { voice.KITTEH: True } ),
),
"the": (
PTEntry( "dat", { voice.ORCISH: True } ),
PTEntry( "teh", { voice.KITTEH: True } ),
),
"There": (
PTEntry( "Thar", { voice.KITTEH: True } ),
PTEntry( "Dere", { voice.ORCISH: True } ),
),
"there": (
PTEntry( "thar", { voice.KITTEH: True } ),
PTEntry( "dere", { voice.ORCISH: True } ),
),
"There is": (
PTEntry( "There's" ),
PTEntry( "Dere's", { voice.ORCISH: True } ),
PTEntry( "Thar'z", { voice.KITTEH: True } ),
),
"There is not": (
PTEntry( "There isn't", { voice.STUPID: False } ),
PTEntry( "There ain't", { voice.STUPID: True } ),
PTEntry( "Dere ain't", { voice.ORCISH: True } ),
PTEntry( "Dere ain't no", { voice.ORCISH: True, voice.STUPID: True } ),
PTEntry( "Thar izn't", { voice.KITTEH: True, voice.STUPID: False } ),
PTEntry( "Thar ain't", { voice.KITTEH: True, voice.STUPID: True } ),
),
"These": (
PTEntry( "Deze", { voice.ORCISH: True } ),
),
"these": (
PTEntry( "deze", { voice.ORCISH: True } ),
),
"This": (
PTEntry( "Dis", { voice.ORCISH: True } ),
PTEntry( "Thiss", { voice.DRACONIAN: True } ),
PTEntry( "Dis", { voice.KITTEH: True } ),
),
"this": (
PTEntry( "dis", { voice.ORCISH: True } ),
PTEntry( "thiss", { voice.DRACONIAN: True } ),
PTEntry( "dis", { voice.KITTEH: True } ),
),
"This is": (
PTEntry( "Dis ere's", { voice.ORCISH: True } ),
PTEntry( "This here's", { voice.ORCISH: True, voice.SMART: True } ),
PTEntry( "Dis are", { voice.KITTEH: True } ),
),
"trying": (
PTEntry( "tryin", { voice.KITTEH: True, voice.STUPID: True } ),
),
"ugly": (
PTEntry( "unlovely", { voice.ELVEN: True } ),
),
"want": (
PTEntry( "desire", { voice.DRACONIAN: True } ),
),
"wares": (
PTEntry( "goods" ),
PTEntry( "products", { voice.SMART: True } ),
PTEntry( "shinies", { voice.KITTEH: True } ),
),
"We": (
PTEntry( "Us lot", { voice.ORCISH: True } ),
PTEntry( "We's", { voice.KITTEH: True } ),
PTEntry( "I", { voice.DRACONIAN: True } ),
),
"weak": (
PTEntry( "anemic", { voice.GREEK: True } ),
PTEntry( "unfit", { voice.DRACONIAN: True } ),
PTEntry( "unstrong", { voice.ELVEN: True } ),
),
"weapon": (
PTEntry( "foe-smiter", { voice.DWARVEN: True } ),
PTEntry( "head-cracker", { voice.ORCISH: True } ),
PTEntry( "skull-smacker", { voice.ORCISH: True } ),
PTEntry( "gutripper", { voice.ORCISH: True } ),
PTEntry( "armament", { voice.SMART: True } ),
PTEntry( "hurty thing", { voice.STUPID: True } ),
PTEntry( "weppun", { voice.KITTEH: True } ),
),
"Welcome": (
PTEntry( "Lali-ho", { voice.DWARVEN: True } ),
PTEntry( "O hai", { voice.KITTEH: True } ),
PTEntry( "Come on in", { voice.HURTHISH: True } ),
),
"Welcome to": (
PTEntry( "Git over ere to", { voice.ORCISH: True } ),
),
"went": (
PTEntry( "goed", { voice.KITTEH: True } ),
),
"what": (
PTEntry( "wot", { voice.ORCISH: True } ),
),
"wilderness": (
PTEntry( "wilds", { voice.GREEK: True } ),
PTEntry( "savage spaces", { voice.DRACONIAN: True } ),
PTEntry( "outdoors", { voice.DWARVEN: True } ),
),
"wisdom": (
PTEntry( "philosophy", { voice.GREEK: True } ),
PTEntry( "sense", { voice.DRACONIAN: True } ),
PTEntry( "sagacity", { voice.DRACONIAN: True, voice.SMART: True } ),
PTEntry( "prudence", { voice.DWARVEN: True } ),
),
"with": (
PTEntry( "wif", { voice.KITTEH: True } ),
),
"Would": (
PTEntry( "Wud", { voice.KITTEH: True } ),
),
"would": (
PTEntry( "wud", { voice.KITTEH: True } ),
),
"would like": (
PTEntry( "desire", { voice.DRACONIAN: True } ),
PTEntry( "wantz", { voice.KITTEH: True } ),
),
"your": (
PTEntry( "yer", { voice.ORCISH: True } ),
PTEntry( "youz", { voice.KITTEH: True } ),
),
"You look like you": (
PTEntry( "I tinks you", { voice.ORCISH: True } ),
PTEntry( "Mebbe you haz", { voice.KITTEH: True } ),
),
}
| jwvhewitt/dmeternal | old_game/dialogue/personalizer.py | Python | gpl-2.0 | 22,147 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class GaloisFieldElement(object):
"""
Element of a finite field
"""
def __init__(self):
pass
def __eq__(self, other):
return self.__dict__ == other.__dict__
class GaloisFieldArithmetic(object):
"""
A collection of arithmetic operators for finite field elements
"""
def __init__(self, add_identity, mul_identity):
self.add_identity = add_identity # additive identity
self.mul_identity = mul_identity # multiplicative identity
def add(self, a, b):
"""
a + b
"""
pass
def neg(self, a):
"""
-a
"""
pass
def sub(self, a, b):
"""
a - b
"""
pass
def mul(self, a, b):
"""
a * b
"""
pass
def invert(self, a):
"""
a^(-1)
"""
pass
def div(self, a, b):
"""
a / b
"""
pass
def pow(self, a, e):
"""
a^e
"""
pass
def get_add_identity(self):
return self.add_identity
def get_mul_identity(self):
return self.mul_identity
| FederatedAI/FATE | python/federatedml/secureprotol/number_theory/field/base_galois_field.py | Python | apache-2.0 | 1,831 |
'''
Copyright 2013-2014 Reubenur Rahman
All Rights Reserved
@author: [email protected]
'''
import atexit
import argparse
import sys
import time
import ssl
from pyVmomi import vim, vmodl
from pyVim import connect
from pyVim.connect import Disconnect, SmartConnect, GetSi
inputs = {'vcenter_ip': '15.10.10.211',
'vcenter_password': 'Password123',
'vcenter_user': 'Administrator',
'vm_name' : 'reuben-test',
#create, remove or list
'operation' : 'create',
'snapshot_name' : 'my_test_snapshot',
'ignore_ssl': True
}
def get_obj(content, vimtype, name):
"""
Get the vsphere object associated with a given text name
"""
obj = None
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def wait_for_task(task, raiseOnError=True, si=None, pc=None):
if si is None:
si = GetSi()
if pc is None:
sc = si.RetrieveContent()
pc = sc.propertyCollector
# First create the object specification as the task object.
objspec = vmodl.Query.PropertyCollector.ObjectSpec()
objspec.SetObj(task)
# Next, create the property specification as the state.
propspec = vmodl.Query.PropertyCollector.PropertySpec()
propspec.SetType(vim.Task);
propspec.SetPathSet(["info.state"]);
propspec.SetAll(True)
# Create a filter spec with the specified object and property spec.
filterspec = vmodl.Query.PropertyCollector.FilterSpec()
filterspec.SetObjectSet([objspec])
filterspec.SetPropSet([propspec])
# Create the filter
filter = pc.CreateFilter(filterspec, True)
# Loop looking for updates till the state moves to a completed state.
taskName = task.GetInfo().GetName()
update = pc.WaitForUpdates(None)
state = task.GetInfo().GetState()
while state != vim.TaskInfo.State.success and \
state != vim.TaskInfo.State.error:
if (state == 'running') and (taskName.info.name != "Destroy"):
# check to see if VM needs to ask a question, thow exception
vm = task.GetInfo().GetEntity()
if vm is not None and isinstance(vm, vim.VirtualMachine):
qst = vm.GetRuntime().GetQuestion()
if qst is not None:
raise Exception("Task blocked, User Intervention required")
update = pc.WaitForUpdates(update.GetVersion())
state = task.GetInfo().GetState()
filter.Destroy()
if state == "error" and raiseOnError:
raise task.GetInfo().GetError()
return state
def invoke_and_track(func, *args, **kw):
try :
task = func(*args, **kw)
wait_for_task(task)
except:
raise
def get_snapshots(vm):
return get_snapshots_recursively(vm.snapshot.rootSnapshotList, '')
def get_snapshots_recursively(snapshots, snapshot_location):
snapshot_paths = []
if not snapshots:
return snapshot_paths
for snapshot in snapshots:
if snapshot_location:
current_snapshot_path = snapshot_location + '/' + snapshot.name
else:
current_snapshot_path = snapshot.name
snapshot_paths.append(current_snapshot_path)
snapshot_paths = snapshot_paths + get_snapshots_recursively(snapshot.childSnapshotList, current_snapshot_path)
return snapshot_paths
def main():
try:
si = None
try:
print "Trying to connect to VCENTER SERVER . . ."
context = None
if inputs['ignore_ssl']:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
#si = connect.Connect(args.host, int(args.port), args.user, args.password, service="hostd")
si = connect.Connect(inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password'],
sslContext=context)
except IOError, e:
pass
atexit.register(Disconnect, si)
print "Connected to VCENTER SERVER !"
content = si.RetrieveContent()
operation = inputs['operation']
vm_name = inputs['vm_name']
snapshot_name = inputs['snapshot_name']
vm = get_obj(content, [vim.VirtualMachine], vm_name)
if operation == 'create':
description = "Test snapshot"
# Read about dumpMemory : http://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.VirtualMachine.html#createSnapshot
dumpMemory = False
quiesce = True
invoke_and_track(vm.CreateSnapshot(snapshot_name, description, dumpMemory, quiesce))
elif operation == 'remove':
snapshots = vm.snapshot.rootSnapshotList
for snapshot in snapshots:
if snapshot_name == snapshot.name:
snap_obj = snapshot.snapshot
print "Removing snapshot ", snap_obj
invoke_and_track(snap_obj.RemoveSnapshot_Task(True))
else:
print "Couldn't find any snapshots"
if operation == 'list':
print 'Display list of snapshots on virtual machine ' + vm.name
snapshot_paths = get_snapshots(vm)
for snapshot_path in snapshot_paths:
print snapshot_path
except vmodl.MethodFault, e:
print "Caught vmodl fault: %s" % e.msg
return 1
except Exception, e:
if str(e).startswith("'vim.Task'"):
return 1
print "Caught exception: %s" % str(e)
return 1
# Start program
if __name__ == "__main__":
main()
| rreubenur/vmware-pyvmomi-examples | create_and_remove_snapshot.py | Python | apache-2.0 | 5,882 |
# -*- coding: utf8 -*-
################################################################################
#
# Copyright 2014-2016 Eric Lacombe <[email protected]>
#
################################################################################
#
# This file is part of fuddly.
#
# fuddly is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fuddly is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fuddly. If not, see <http://www.gnu.org/licenses/>
#
################################################################################
from __future__ import print_function
import sys
import unittest
import ddt
import ddt
sys.path.append('.')
from framework.value_types import *
import data_models.tutorial.example as example
from framework.fuzzing_primitives import *
from framework.plumbing import *
from framework.data_model import *
from framework.encoders import *
from test import ignore_data_model_specifics, run_long_tests, exit_on_import_error
def setUpModule():
global fmk, dm, results
fmk = FmkPlumbing(exit_on_error=exit_on_import_error, debug_mode=True)
fmk.start()
fmk.run_project(name='tuto', dm_name='example')
dm = example.data_model
results = collections.OrderedDict()
fmk.prj.reset_knowledge()
def tearDownModule():
global fmk
fmk.stop()
######## Tests cases begins Here ########
# Legacy --> Need to be revamped
class TestBasics(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dm = example.data_model
cls.dm.load_data_model(fmk._name2dm)
def setUp(self):
pass
def test_01(self):
# print('\n### TEST 0: generate one EX1 ###')
#
node_ex1 = dm.get_atom('EX1')
print('Flatten 1: ', repr(node_ex1.to_bytes()))
print('Flatten 1: ', repr(node_ex1.to_bytes()))
l = node_ex1.get_value()
hk = list(node_ex1.iter_paths(only_paths=True))
# print(l)
#
# print('\n\n ####### \n\n')
#
# print(l[0])
# print(b' @ ' + b''.join(flatten(l[1])) + b' @ ')
# print(l[1])
#
# print('\n\n ####### \n\n')
#
#
# res1 = b' @ ' + b''.join(flatten(l[1])) + b' @ ' == l[0]
# print('*** Is the concatenation (first list element) correct? %r' % res1)
#
# res2 = len(b''.join(flatten(l[1]))) == int(l[2])
# print('*** Is length of the concatenation correct? %r' % res2)
#
# results['test0'] = res1 and res2
print('\n### TEST 1: cross check self.node.get_all_paths().keys() and get_nodes_names() ###')
print('*** Hkeys from self.node.iter_paths(only_paths=True):')
hk = sorted(hk)
for k in hk:
print(k)
print('*** Hkeys from get_nodes_names():')
l = sorted(node_ex1.get_nodes_names())
for k in l:
print(k)
res1 = len(hk) == len(l)
res2 = False
for i in range(len(hk)):
if hk[i] != l[i][0]:
res2 = False
break
else:
res2 = True
results['test1'] = res1 and res2
print('\n### TEST 2: generate two different EX1 ###')
node_ex1.unfreeze()
print(node_ex1.get_value())
val1 = node_ex1.to_bytes()
node_ex1.unfreeze()
print(node_ex1.get_value())
val2 = node_ex1.to_bytes()
results['test2'] = val1 != val2
print('\n### TEST 3: generate 4 identical TUX (with last one flatten) ###')
tux = dm.get_atom('TUX')
val1 = tux.get_value()
print(val1)
val2 = tux.get_value()
print(val2)
val3 = tux.get_value()
print(val3)
print(repr(tux.to_bytes()))
res = val1 == val2 and val1 == val3
results['test3'] = res
print('\n### TEST 4: generate 2 different flatten TUX ###')
tux.unfreeze()
val1 = repr(tux.to_bytes())
print(val1)
tux.unfreeze()
val2 = repr(tux.to_bytes())
print(val2)
res = val1 != val2
results['test4'] = res
print('\n### TEST 5: test get_node_by_path() ###')
print('\n*** test 5.1: get_node_by_path() with exact path')
tux2 = dm.get_atom('TUX')
print('* Shall return None:')
val1 = tux2.get_node_by_path(path='EX1')
val2 = tux2.get_node_by_path(path='CONCAT')
print(val1)
print(val2)
print('* Shall not return None:')
val3 = tux2.get_node_by_path('TUX/TX')
val4 = tux2.get_node_by_path('TUX/TC')
print(val3)
print(val4)
res1 = val1 == None and val2 == None and val3 != None and val4 != None
print('\n*** test 5.2: call 3 times get_node_by_path()')
print('name: %s, result: %s' % ('TUX', tux2.get_node_by_path('TUX').get_path_from(tux2)))
print('name: %s, result: %s' % ('TX', tux2.get_node_by_path('TX').get_path_from(tux2)))
print('name: %s, result: %s' % ('KU', tux2.get_node_by_path('KU', conf='ALT').get_path_from(tux2)))
print('name: %s, result: %s' % (
'MARK3', tux2.get_node_by_path('MARK3', conf='ALT').get_path_from(tux2, conf='ALT')))
print('\n*** test 5.3: call get_node_by_path() with real regexp')
print('--> ' + tux2.get_node_by_path('TX.*KU').get_path_from(tux2))
print('\n*** test 5.4: call get_reachable_nodes()')
node_ex1 = dm.get_atom('EX1')
l = node_ex1.get_reachable_nodes(path_regexp='TUX')
for i in l:
print(i.get_path_from(node_ex1))
print('\n')
node_ex1 = dm.get_atom('EX1')
l = node_ex1.get_reachable_nodes(path_regexp='T[XC]/KU')
for i in l:
print(i.get_path_from(node_ex1))
if len(l) == 4:
res2 = True
else:
res2 = False
print(res1, res2)
results['test5'] = res1 and res2
print('\n### TEST 6: get_reachable_nodes()')
for e in sorted(tux2.get_nodes_names()):
print(e)
c1 = NodeInternalsCriteria(mandatory_attrs=[NodeInternals.Mutable],
node_kinds=[NodeInternals_TypedValue])
c2 = NodeInternalsCriteria(node_kinds=[NodeInternals_TypedValue])
print('\n*** test 6.1:')
l1 = tux2.get_reachable_nodes(internals_criteria=c1)
l2 = tux2.get_reachable_nodes(internals_criteria=c2)
res61 = len(l2) > len(l1)
print('len(l1): %d, len(l2): %d' % (len(l1), len(l2)))
print('\n*** test 6.2:')
res62 = False
l = tux2.get_reachable_nodes(internals_criteria=c2, conf='ALT')
for k in l:
print(k.get_path_from(tux2, conf='ALT'))
if 'MARK3' in k.get_path_from(tux2, conf='ALT'):
res62 = True
break
# l = tux2.get_reachable_nodes(node_kinds=[NodeInternals_NonTerm], conf='ALT')
# for k in l:
# print(k.get_path_from(tux2, conf='ALT'))
print('\n*** test 6.3:')
c3 = NodeInternalsCriteria(node_kinds=[NodeInternals_Func])
l3 = node_ex1.get_reachable_nodes(internals_criteria=c3)
print("*** %d Func Node found" % len(l3))
print(l3)
res63 = len(l3) == 2
print(res61, res62, res63)
results['test6'] = res61 and res62 and res63
print('\n### TEST 7: get_reachable_nodes() and change_subnodes_csts()')
print('*** junk test:')
tux2.get_node_by_path('TUX$').cc.change_subnodes_csts([('u=+', 'u>'), ('u=.', 'u>')])
print(tux2.to_bytes())
print('\n*** test 7.1:')
print('> l1:')
tux2 = dm.get_atom('TUX')
# attr = Elt_Attributes(defaults=False)
# attr.conform_to_nonterm_node()
# node_kind = [NodeInternals_NonTerm]
crit = NodeInternalsCriteria(node_kinds=[NodeInternals_NonTerm])
l1 = tux2.get_reachable_nodes(internals_criteria=crit)
# tux2.cc.get_subnodes_csts_copy()
# exit()
res1 = True
for e in l1:
print(e.get_path_from(tux2))
e.cc.change_subnodes_csts([('*', 'u=.')])
csts1, _ = e.cc.get_subnodes_csts_copy()
print(csts1)
e.cc.change_subnodes_csts([('*', 'u=.'), ('u=.', 'u>')])
csts2, _ = e.cc.get_subnodes_csts_copy()
print(csts2)
print('\n')
# val = cmp(csts1, csts2)
val = (csts1 > csts2) - (csts1 < csts2)
if val != 0:
res1 = False
print('> l2:')
l2 = tux2.get_reachable_nodes(internals_criteria=crit)
for e in l2:
print(e.get_path_from(tux2))
print('\n*** test 7.2:')
res2 = len(l2) == len(l1)
print('len(l2) == len(l1)? %r' % res2)
print('\n*** test 7.3:')
tux = dm.get_atom('TUX')
l1 = tux.get_reachable_nodes(internals_criteria=crit)
c_l1 = []
for e in l1:
order, attrs = e.cc.get_subnodes_csts_copy()
e.cc.change_subnodes_csts([('u=.', 'u>'), ('u>', 'u=.')])
csts1, _ = e.cc.get_subnodes_csts_copy()
print(csts1)
print('\n')
c_l1.append(csts1)
e.set_subnodes_full_format(order, attrs)
l2 = tux.get_reachable_nodes(internals_criteria=crit)
c_l2 = []
for e in l2:
orig = e.cc.get_subnodes_csts_copy()
e.cc.change_subnodes_csts([('u>', 'u=.'), ('u=.', 'u>')])
csts2, _ = e.cc.get_subnodes_csts_copy()
print(csts2)
print('\n')
c_l2.append(csts2)
zip_l = zip(c_l1, c_l2)
test = 1
for zl1, zl2 in zip_l:
for ze1, ze2 in zip(zl1, zl2):
test = (ze1 > ze2) - (ze1 < ze2)
if test != 0:
print(ze1)
print('########')
print(ze2)
print('########')
break
if test != 0:
res3 = False
else:
res3 = True
# val = cmp(c_l1, c_l2)
val = (c_l1 > c_l2) - (c_l1 < c_l2)
if val != 0:
res3 = False
else:
res3 = True
print(res1, res2, res3)
results['test7'] = res1 and res2 and res3
print('\n### TEST 8: set_current_conf()')
node_ex1 = dm.get_atom('EX1')
print('\n*** test 8.0:')
res01 = True
l = sorted(node_ex1.get_nodes_names())
for k in l:
print(k)
if 'EX1' != k[0][:len('EX1')]:
res01 = False
break
l2 = sorted(node_ex1.get_nodes_names(conf='ALT'))
for k in l2:
print(k)
if 'EX1' != k[0][:len('EX1')]:
res01 = False
break
res02 = False
for k in l2:
if 'MARK2' in k[0]:
for k in l2:
if 'MARK3' in k[0]:
res02 = True
break
break
res0 = res01 and res02
print('\n*** test 8.1:')
res1 = True
msg = node_ex1.to_bytes(conf='ALT')
if b' ~(..)~ ' not in msg or b' ~(X)~ ' not in msg:
res1 = False
print(msg)
node_ex1.unfreeze_all()
msg = node_ex1.to_bytes(conf='ALT')
if b' ~(..)~ ' not in msg or b' ~(X)~ ' not in msg:
res1 = False
print(msg)
node_ex1.unfreeze_all()
msg = node_ex1.to_bytes()
if b' ~(..)~ ' in msg or b' ~(X)~ ' in msg:
res1 = False
print(msg)
node_ex1.unfreeze_all()
msg = node_ex1.to_bytes(conf='ALT')
if b' ~(..)~ ' not in msg or b' ~(X)~ ' not in msg:
res1 = False
print(msg)
node_ex1.unfreeze_all()
print('\n*** test 8.2:')
print('\n***** test 8.2.0: subparts:')
node_ex1 = dm.get_atom('EX1')
res2 = True
print(node_ex1.to_bytes())
node_ex1.set_current_conf('ALT', root_regexp=None)
nonascii_test_str = u'\u00c2'.encode(internal_repr_codec)
node_ex1.unfreeze_all()
msg = node_ex1.to_bytes()
if b' ~(..)~ ' not in msg or b' ~(X)~ ' not in msg or b'[<]' not in msg or nonascii_test_str not in msg:
res2 = False
print(msg)
node_ex1.unfreeze_all()
msg = node_ex1.to_bytes()
if b' ~(..)~ ' not in msg or b' ~(X)~ ' not in msg or b'[<]' not in msg or nonascii_test_str not in msg:
res2 = False
print(msg)
node_ex1.set_current_conf('MAIN', reverse=True, root_regexp=None)
node_ex1.unfreeze_all()
msg = node_ex1.to_bytes()
if b' ~(..)~ ' in msg or b' ~(X)~ ' in msg or b'[<]' in msg or nonascii_test_str in msg:
res2 = False
print(msg)
node_ex1 = dm.get_atom('EX1')
node_ex1.set_current_conf('ALT', root_regexp='(TC)|(TC_.*)/KV')
node_ex1.set_current_conf('ALT', root_regexp='TUX$')
node_ex1.unfreeze_all()
msg = node_ex1.to_bytes()
if b' ~(..)~ ' not in msg or b' ~(X)~ ' not in msg or b'[<]' not in msg or nonascii_test_str not in msg:
res2 = False
print(msg)
print('\n***** test 8.2.1: subparts equality:')
val1 = node_ex1.get_node_by_path('TUX$').to_bytes()
val2 = node_ex1.get_node_by_path('CONCAT$').to_bytes()
print(b' @ ' + val1 + b' @ ')
print(val2)
res21 = b' @ ' + val1 + b' @ ' == val2
print(res2, res21)
print('\n*** test 8.3:')
node_ex1 = dm.get_atom('EX1')
res3 = True
l = sorted(node_ex1.get_nodes_names(conf='ALT'))
for k in l:
print(k)
if 'EX1' != k[0][:len('EX1')]:
res3 = False
break
print('\n*** test 8.4:')
print(node_ex1.to_bytes())
res4 = True
l = sorted(node_ex1.get_nodes_names())
for k in l:
print(k)
if 'EX1' != k[0][:len('EX1')]:
res4 = False
break
print('\n*** test 8.5:')
node_ex1 = dm.get_atom('EX1')
res5 = True
node_ex1.unfreeze_all()
msg = node_ex1.get_node_by_path('TUX$').to_bytes(conf='ALT', recursive=False)
if b' ~(..)~ ' not in msg or b' ~(X)~ ' in msg:
res5 = False
print(msg)
node_ex1.unfreeze_all()
msg = node_ex1.get_node_by_path('TUX$').to_bytes(conf='ALT', recursive=True)
if b' ~(..)~ ' not in msg or b' ~(X)~ ' not in msg:
res5 = False
print(msg)
print('\n*** test 8.6:')
node_ex1 = dm.get_atom('EX1')
# attr3 = Elt_Attributes(defaults=False)
# attr3.conform_to_nonterm_node()
# attr3.enable_conf('ALT')
# node_kind3 = [NodeInternals_NonTerm]
crit = NodeInternalsCriteria(mandatory_attrs=[NodeInternals.Mutable],
node_kinds=[NodeInternals_NonTerm])
node_ex1.unfreeze_all()
l = tux2.get_reachable_nodes(internals_criteria=crit, owned_conf='ALT')
for e in l:
print(e.get_path_from(tux2))
if len(l) == 4:
res6 = True
else:
res6 = False
print('Results:')
print(res0, res1, res2, res21, res3, res4, res5, res6)
results['test8'] = res0 and res1 and res2 and res21 and res3 and res4 and res5 and res6
print('\n### TEST 9: test the constraint type: =+(w1,w2,...)\n' \
'--> can be False in really rare case')
node_ex1 = dm.get_atom('EX1')
res = True
for i in range(20):
node_ex1.unfreeze_all()
msg = node_ex1.get_node_by_path('TUX$').to_bytes(conf='ALT', recursive=True)
if b' ~(..)~ TUX ~(..)~ ' not in msg:
res = False
break
# print(msg)
results['test9'] = res
print('\n### TEST 10: test fuzzing primitives')
print('\n*** test 10.1: fuzz_data_tree()')
node_ex1 = dm.get_atom('EX1')
fuzz_data_tree(node_ex1)
node_ex1.get_value()
print('\n### TEST 11: test terminal Node alternate conf')
print('\n*** test 11.1: value type Node')
node_ex1 = dm.get_atom('EX1')
res1 = True
msg = node_ex1.to_bytes(conf='ALT')
if b'[<]' not in msg or nonascii_test_str not in msg:
res1 = False
print(msg)
node_ex1.unfreeze_all()
msg = node_ex1.to_bytes(conf='ALT')
if b'[<]' not in msg or nonascii_test_str not in msg:
res1 = False
print(msg)
node_ex1.unfreeze_all()
msg = node_ex1.get_node_by_path('TUX$').to_bytes(conf='ALT', recursive=False)
if b'[<]' in msg or nonascii_test_str in msg or b' ~(..)~ TUX ~(..)~ ' not in msg:
res1 = False
print(msg)
print('\n*****\n')
crit = NodeInternalsCriteria(mandatory_attrs=[NodeInternals.Mutable],
node_kinds=[NodeInternals_TypedValue])
node_ex1.unfreeze_all()
l = node_ex1.get_reachable_nodes(internals_criteria=crit, owned_conf='ALT')
for e in l:
print(e.get_path_from(node_ex1))
if len(l) == 10:
res2 = True
else:
res2 = False
print('\n*** test 11.2: func type Node')
node_ex1 = dm.get_atom('EX1')
res3 = True
msg = node_ex1.to_bytes(conf='ALT')
if b'___' not in msg:
res3 = False
print(msg)
node_ex1.unfreeze_all()
msg = node_ex1.to_bytes(conf='ALT')
if b'___' not in msg:
res3 = False
print(msg)
node_ex1.unfreeze_all()
msg = node_ex1.get_node_by_path('TUX$').to_bytes(conf='ALT', recursive=False)
if b'___' in msg:
res3 = False
print(msg)
print(res1, res2, res3)
results['test11'] = res1 and res2 and res3
print('\n### TEST 12: get_all_path() test')
print('\n*** test 12.1:')
node_ex1 = dm.get_atom('EX1')
for i in node_ex1.iter_paths(only_paths=True):
print(i)
print('\n******\n')
node_ex1.get_value()
for i in node_ex1.iter_paths(only_paths=True):
print(i)
print('\n******\n')
node_ex1.unfreeze_all()
node_ex1.get_value()
for i in node_ex1.iter_paths(only_paths=True):
print(i)
print('\n### SUMMARY ###')
for k, v in results.items():
print('is %s OK? %r' % (k, v))
for v in results.values():
self.assertTrue(v)
class TestMisc(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dm = example.data_model
cls.dm.load_data_model(fmk._name2dm)
def setUp(self):
pass
def _loop_nodes(self, node, cpt=20, criteria_func=None, transform=lambda x: x):
stop_loop = False
for i in range(cpt):
if stop_loop:
break
node.unfreeze()
print("[#%d] %r" % (i, transform(node.to_bytes())))
if node.env.exhausted_node_exists():
for e in node.env.get_exhausted_nodes():
criteria_func(e)
if criteria_func(e):
print('--> exhausted node: ', e.name)
stop_loop = True
break
node.env.clear_all_exhausted_nodes()
return i
# @unittest.skip("demonstrating skipping")
def test_Node_unfreeze_dont_change_state(self):
'''
unfreeze(dont_change_state)
'''
simple = self.dm.get_atom('Simple')
simple.make_determinist(recursive=True)
for i in range(15):
simple.unfreeze()
val1 = simple.to_bytes()
# print(val1)
simple.unfreeze(dont_change_state=True)
val2 = simple.to_bytes()
# print(val2)
if val1 != val2:
res1 = False
break
else:
res1 = True
self.assertTrue(res1)
def test_TypedNode_1(self):
evt = dm.get_atom('TVE')
evt.get_value()
print('=======[ PATHS ]========')
for i in evt.iter_paths(only_paths=True):
print(i)
print('\n=======[ Typed Nodes ]========')
c = NodeInternalsCriteria(node_kinds=[NodeInternals_TypedValue])
vt = {}
l = evt.get_reachable_nodes(internals_criteria=c)
for e in l:
print('------------')
print(' Node.name: ', e.name)
print(' Node.env: ', e.env)
print(' Node.value_type: ', e.cc.get_value_type())
vt[e] = e.cc.get_value_type()
if issubclass(vt[e].__class__, VT_Alt):
continue
print('')
evt = dm.get_atom('TVE')
evt.make_finite(all_conf=True, recursive=True)
evt.make_determinist(all_conf=True, recursive=True)
evt.show()
orig_rnode = evt.to_bytes()
prev_path = None
turn_nb_list = []
tn_consumer = TypedNodeDisruption()
for rnode, node, orig_node_val, i in ModelWalker(evt, tn_consumer, make_determinist=True, max_steps=300):
print('=======[ %d ]========' % i)
print(' orig: ', orig_rnode)
print(' ----')
if node != None:
print(' fuzzed: ', rnode.to_bytes())
print(' ----')
current_path = node.get_path_from(rnode)
if current_path != prev_path:
turn_nb_list.append(i)
print(' current fuzzed node: %s' % current_path)
prev_path = current_path
vt = node.cc.get_value_type()
print(' node value type (changed by disruptor): ', vt)
if issubclass(vt.__class__, VT_Alt):
print(' |- node fuzzy mode: ', vt._fuzzy_mode)
print(' node value type determinist: ', vt.determinist)
print(' node determinist: ', node.cc.is_attr_set(NodeInternals.Determinist))
print(' node finite: ', node.cc.is_attr_set(NodeInternals.Finite))
if not issubclass(vt.__class__, VT_Alt):
print(' node vt endian: ', node.cc.get_value_type().endian)
print(' node orig value: (hexlified) {0!s:s}, {0!s:s}'.format(binascii.hexlify(orig_node_val),
orig_node_val))
print(' node corrupted value: (hexlified) {0!s:s}, {0!s:s}'.format(binascii.hexlify(node.to_bytes()),
node.to_bytes()))
else:
turn_nb_list.append(i)
print('\n--> Fuzzing terminated!\n')
break
print('\nTurn number when Node has changed: %r, number of test cases: %d' % (turn_nb_list, i))
good_list = [1, 13, 25, 37, 49, 55, 61, 73, 85, 97, 109, 121, 133, 145, 157, 163, 175, 187,
199, 208, 217, 233, 248]
msg = "If Fuzzy_<TypedValue>.values have been modified in size, the good_list should be updated.\n" \
"If BitField are in random mode [currently put in determinist mode], the fuzzy_mode can produce more" \
" or less value depending on drawn value when .get_value() is called (if the drawn value is" \
" the max for instance, drawn_value+1 will not be produced)"
self.assertTrue(turn_nb_list == good_list, msg=msg)
def test_Node_Attr_01(self):
'''
Value Node make_random()/make_determinist()
TODO: NEED assertion
'''
evt = dm.get_atom('TVE')
for i in range(10):
evt.unfreeze()
print(evt.to_bytes())
evt.get_node_by_path('Pre').make_random()
print('******')
for i in range(10):
evt.unfreeze()
print(evt.to_bytes())
# self.assertEqual(idx, )
def test_NonTerm_Attr_01(self):
'''
make_determinist()/finite() on NonTerm Node
TODO: NEED assertion
'''
loop_count = 50
crit_func = lambda x: x.name == 'NonTerm'
print('\n -=[ determinist & finite (loop count: %d) ]=- \n' % loop_count)
nt = dm.get_atom('NonTerm')
nt.make_finite(all_conf=True, recursive=True)
nt.make_determinist(all_conf=True, recursive=True)
nb = self._loop_nodes(nt, loop_count, criteria_func=crit_func)
self.assertEqual(nb, 18)
print('\n -=[ determinist & infinite (loop count: %d) ]=- \n' % loop_count)
nt = dm.get_atom('NonTerm')
nt.make_infinite(all_conf=True, recursive=True)
nt.make_determinist(all_conf=True, recursive=True)
self._loop_nodes(nt, loop_count, criteria_func=crit_func)
print('\n -=[ random & infinite (loop count: %d) ]=- \n' % loop_count)
nt = dm.get_atom('NonTerm')
# nt.make_infinite(all_conf=True, recursive=True)
nt.make_random(all_conf=True, recursive=True)
self._loop_nodes(nt, loop_count, criteria_func=crit_func)
print('\n -=[ random & finite (loop count: %d) ]=- \n' % loop_count)
nt = dm.get_atom('NonTerm')
nt.make_finite(all_conf=True, recursive=True)
nt.make_random(all_conf=True, recursive=True)
nb = self._loop_nodes(nt, loop_count, criteria_func=crit_func)
self.assertEqual(nb, 18)
def test_BitField_Attr_01(self):
'''
make_determinist()/finite() on BitField Node
TODO: NEED assertion
'''
loop_count = 80
print('\n -=[ random & infinite (loop count: %d) ]=- \n' % loop_count)
t = BitField(subfield_limits=[2, 6, 10, 12],
subfield_values=[[4, 2, 1], [2, 15, 16, 3], None, [1]],
subfield_val_extremums=[None, None, [3, 11], None],
padding=0, lsb_padding=True, endian=VT.LittleEndian)
node = Node('BF', value_type=t)
node.set_env(Env())
node.make_random(all_conf=True, recursive=True)
self._loop_nodes(node, loop_count, criteria_func=lambda x: True, transform=binascii.b2a_hex)
print('\n -=[ determinist & infinite (loop count: %d) ]=- \n' % loop_count)
node_copy = Node('BF_copy', base_node=node, ignore_frozen_state=True)
node_copy.set_env(Env())
node_copy.make_determinist(all_conf=True, recursive=True)
self._loop_nodes(node_copy, loop_count, criteria_func=lambda x: True, transform=binascii.b2a_hex)
print('\n -=[ determinist & finite (loop count: %d) ]=- \n' % loop_count)
node_copy2 = Node('BF_copy2', base_node=node, ignore_frozen_state=True)
node_copy2.set_env(Env())
node_copy2.make_determinist(all_conf=True, recursive=True)
node_copy2.make_finite(all_conf=True, recursive=True)
self._loop_nodes(node_copy2, loop_count, criteria_func=lambda x: True, transform=binascii.b2a_hex)
print('\n -=[ random & finite (loop count: %d) ]=- \n' % loop_count)
node_copy3 = Node('BF_copy3', base_node=node, ignore_frozen_state=True)
node_copy3.set_env(Env())
node_copy3.make_random(all_conf=True, recursive=True)
node_copy3.make_finite(all_conf=True, recursive=True)
self._loop_nodes(node_copy3, loop_count, criteria_func=lambda x: True, transform=binascii.b2a_hex)
def test_BitField(self):
loop_count = 20
e_bf = Node('BF')
vt = BitField(subfield_sizes=[4, 4, 4],
subfield_values=[[4, 2, 1], None, [10, 13]],
subfield_val_extremums=[None, [14, 15], None],
padding=0, lsb_padding=False, endian=VT.BigEndian)
e_bf.set_values(value_type=vt)
e_bf.set_env(Env())
e_bf.make_determinist(all_conf=True, recursive=True)
e_bf.make_finite(all_conf=True, recursive=True)
self._loop_nodes(e_bf, loop_count, criteria_func=lambda x: True, transform=binascii.b2a_hex)
print('\n***\n')
e_bf.cc.value_type.switch_mode()
self._loop_nodes(e_bf, loop_count, criteria_func=lambda x: True, transform=binascii.b2a_hex)
print('\n***\n')
e_bf.cc.value_type.switch_mode()
self._loop_nodes(e_bf, loop_count, criteria_func=lambda x: True, transform=binascii.b2a_hex)
print('\n***')
print('We change the current BitField value:')
e_bf.unfreeze_all()
print(binascii.b2a_hex(e_bf.get_value()))
e_bf.unfreeze_all()
print(binascii.b2a_hex(e_bf.get_value()), '\n')
e_bf.cc.value_type.switch_mode()
self._loop_nodes(e_bf, loop_count, criteria_func=lambda x: True, transform=binascii.b2a_hex)
print('\n***')
print('Random & finite: (should result in only 1 possible values)')
vt = BitField(subfield_sizes=[4, 4], subfield_values=[[0x3], [0xF]])
e = Node('bf_test', value_type=vt)
e.set_env(Env())
e.make_finite()
e.make_random()
count = self._loop_nodes(e, loop_count, criteria_func=lambda x: True)
self.assertEqual(count, 1)
def test_BitField_basic_features(self):
print('\n***** [ BitField ] *****\n')
i = 0
ok = True
t = BitField(subfield_limits=[2, 6, 8, 10], subfield_values=[[1], [1], [1], [1]],
padding=0, lsb_padding=False, endian=VT.LittleEndian)
val = binascii.b2a_hex(t.get_value())
print(t.pretty_print(), t.drawn_val)
print('*** [%d] ' % i, val)
i += 1
self.assertEqual(val, b'4501')
t = BitField(subfield_limits=[2, 6, 8, 10], subfield_values=[[1], [1], [1], [1]],
padding=0, lsb_padding=True, endian=VT.BigEndian)
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
i += 1
self.assertEqual(val, b'5140')
t = BitField(subfield_limits=[2, 6, 8, 10], subfield_values=[[1], [1], [1], [1]],
padding=1, lsb_padding=True, endian=VT.BigEndian)
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
i += 1
self.assertEqual(val, b'517f')
t = BitField(subfield_limits=[2, 6, 8, 10], subfield_values=[[1], [1], [1], [1]],
padding=0, lsb_padding=False, endian=VT.BigEndian)
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
i += 1
self.assertEqual(val, b'0145')
t = BitField(subfield_limits=[2, 6, 8, 10], subfield_values=[[1], [1], [1], [1]],
padding=1, lsb_padding=False, endian=VT.BigEndian)
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
i += 1
self.assertEqual(val, b'fd45')
t = BitField(subfield_sizes=[2, 4, 2, 2], subfield_values=[[1], [1], [1], [1]],
padding=1, lsb_padding=False, endian=VT.BigEndian)
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
i += 1
self.assertEqual(val, b'fd45')
print('\n******** subfield_values\n')
# Note that 4 in subfield 1 and 16 in subfield 2 are ignored
# --> 6 different values are output before looping
t = BitField(subfield_limits=[2, 6, 8, 10], subfield_values=[[4, 2, 1], [2, 15, 16, 3], [2, 3, 0], [1]],
padding=0, lsb_padding=True, endian=VT.LittleEndian, determinist=True)
for i in range(30):
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
print('\n********\n')
val = collections.OrderedDict()
t.switch_mode()
for i in range(30):
val[i] = binascii.b2a_hex(t.get_value())
print(t.pretty_print(), ' --> ', t.get_current_raw_val())
print('*** [%d] ' % i, val[i])
print(list(val.values())[:15])
self.assertEqual(list(val.values())[:15],
[b'c062', b'0062', b'4062', b'806f', b'8060', b'8063', b'8061',
b'8064', b'806e', b'8072', b'8042', b'8052', b'80e2', b'8022', b'80a2'])
print('\n********\n')
t.switch_mode()
for i in range(30):
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
print('\n******** subfield_val_extremums\n')
# --> 14 different values are output before looping
t = BitField(subfield_limits=[2, 6, 8, 10], subfield_val_extremums=[[1, 2], [4, 12], [0, 3], [2, 3]],
padding=0, lsb_padding=True, endian=VT.LittleEndian, determinist=True)
for i in range(30):
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
print('\n********\n')
t.switch_mode()
for i in range(30):
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
print('\n********\n')
t.switch_mode()
for i in range(30):
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
print('\n******** rewind() tests \n')
t = BitField(subfield_limits=[2, 6, 8, 10],
subfield_val_extremums=[[1, 2], [4, 12], [0, 3], None],
subfield_values=[None, None, None, [3]],
padding=0, lsb_padding=False, endian=VT.BigEndian, determinist=True)
val = {}
for i in range(30):
val[i] = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val[i])
if t.is_exhausted():
break
if val[0] != b'0311' or val[1] != b'0312' or val[2] != b'0316' or val[3] != b'031a' \
or val[4] != b'031e' or val[5] != b'0322' or val[6] != b'0326' or val[7] != b'032a' \
or val[8] != b'032e' or val[9] != b'0332' or val[10] != b'0372' or val[11] != b'03b2' or val[
12] != b'03f2':
raise ValueError
print('\n********\n')
t.reset_state()
print(binascii.b2a_hex(t.get_value()))
print(binascii.b2a_hex(t.get_value()))
print('--> rewind')
t.rewind()
print(binascii.b2a_hex(t.get_value()))
print('--> rewind')
t.rewind()
print(binascii.b2a_hex(t.get_value()))
print(binascii.b2a_hex(t.get_value()))
print('\n********\n')
t.reset_state()
for i in range(30):
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
if t.is_exhausted():
break
print('\n********\n')
print('--> rewind')
t.rewind()
print(binascii.b2a_hex(t.get_value()))
print(binascii.b2a_hex(t.get_value()))
print(binascii.b2a_hex(t.get_value()))
print('\n******** Fuzzy mode\n')
t.reset_state()
t.switch_mode()
print(binascii.b2a_hex(t.get_value()))
print(binascii.b2a_hex(t.get_value()))
print('--> rewind')
t.rewind()
print(binascii.b2a_hex(t.get_value()))
print('--> rewind')
t.rewind()
print(binascii.b2a_hex(t.get_value()))
print(binascii.b2a_hex(t.get_value()))
print('\n********\n')
t.reset_state()
t.switch_mode()
for i in range(30):
val = binascii.b2a_hex(t.get_value())
print('*** [%d] ' % i, val)
if t.is_exhausted():
break
print('\n********\n')
print('--> rewind')
t.rewind()
print(binascii.b2a_hex(t.get_value()))
print(binascii.b2a_hex(t.get_value()))
print(binascii.b2a_hex(t.get_value()))
def test_BitField_various_features(self):
bf = Node('BF')
vt1 = BitField(subfield_sizes=[3, 5, 7],
subfield_values=[[2, 1], None, [10, 120]],
subfield_val_extremums=[None, [6, 15], None],
padding=0, lsb_padding=True, endian=VT.BigEndian)
bf.set_values(value_type=vt1)
bf.make_determinist(all_conf=True, recursive=True)
bf.set_env(Env())
print('\n -=[ .extend_right() method ]=- \n')
print('*** before extension')
bf.show()
# print(bf.get_raw_value())
# bf.unfreeze()
# bf.show()
vt2 = BitField(subfield_sizes=[4, 3, 4, 4, 2],
subfield_values=[None, [3, 5], [15], [14], [2]],
subfield_val_extremums=[[8, 12], None, None, None, None],
padding=0, lsb_padding=False, endian=VT.BigEndian)
print('*** after extension')
bf.reset_state()
bf.value_type.extend_right(vt2)
bf.show()
extended_val = 3151759922
extended_bytes = b'\xbb\xdc\n2'
vt = bf.value_type
self.assertEqual(vt.subfield_limits, [3, 8, 15, 19, 22, 26, 30, 32])
self.assertEqual(vt.get_current_raw_val(), extended_val)
self.assertEqual(vt.get_current_value(), extended_bytes)
print('\n -=[ .extend_left() method ]=- \n')
# vt3 == vt2
vt3 = BitField(subfield_sizes=[4, 3, 4, 4, 2],
subfield_values=[None, [3, 5], [15], [14], [2]],
subfield_val_extremums=[[8, 12], None, None, None, None],
padding=0, lsb_padding=False, endian=VT.BigEndian)
bf2 = Node('BF', vt=vt3)
bf2.make_determinist(all_conf=True, recursive=True)
bf2.set_env(Env())
print('*** before extension')
bf2.show()
# vt4 == vt1
vt4 = BitField(subfield_sizes=[3, 5, 7],
subfield_values=[[2, 1], None, [10, 120]],
subfield_val_extremums=[None, [6, 15], None],
padding=0, lsb_padding=True, endian=VT.BigEndian)
print('*** after extension')
bf2.reset_state()
bf2.value_type.extend_left(vt4)
bf2.show()
self.assertEqual(bf2.value_type.subfield_limits, [3, 8, 15, 19, 22, 26, 30, 32])
self.assertEqual(bf2.value_type.get_current_raw_val(), extended_val)
self.assertEqual(bf2.value_type.get_current_value(), extended_bytes)
print('\n -=[ .set_subfield() .get_subfield() methods ]=- \n')
vt.set_subfield(idx=3, val=5)
vt.set_subfield(idx=0, val=3)
self.assertEqual(vt.get_subfield(idx=3), 5)
self.assertEqual(vt.get_subfield(idx=0), 3)
bf.unfreeze()
bf.show()
self.assertEqual(bf.value_type.get_subfield(idx=3), 5)
self.assertEqual(bf.value_type.get_subfield(idx=0), 3)
def test_BitField_absorb(self):
vt = BitField(subfield_sizes=[4, 4, 4],
subfield_values=[[3, 2, 0xe, 1], None, [10, 13, 3]],
subfield_val_extremums=[None, [14, 15], None],
padding=1, endian=VT.BigEndian, lsb_padding=True)
bfield_1 = Node('bfield_1', value_type=vt)
# bfield.set_env(Env())
vt = BitField(subfield_sizes=[4, 4, 4],
subfield_values=[[3, 2, 0xe, 1], None, [10, 13, 3]],
subfield_val_extremums=[None, [14, 15], None],
padding=0, endian=VT.BigEndian, lsb_padding=True)
bfield_2 = Node('bfield_2', value_type=vt)
vt = BitField(subfield_sizes=[4, 4, 4],
subfield_values=[[3, 2, 0xe, 1], None, [10, 13, 3]],
subfield_val_extremums=[None, [14, 15], None],
padding=1, endian=VT.BigEndian, lsb_padding=False)
bfield_3 = Node('bfield_3', value_type=vt)
vt = BitField(subfield_sizes=[4, 4, 4],
subfield_values=[[3, 2, 0xe, 1], None, [10, 13, 3]],
subfield_val_extremums=[None, [14, 15], None],
padding=0, endian=VT.BigEndian, lsb_padding=False)
bfield_4 = Node('bfield_4', value_type=vt)
# '?\xef' (\x3f\xe0) + padding 0b1111
msg = struct.pack('>H', 0x3fe0 + 0b1111)
status, off, size, name = bfield_1.absorb(msg, constraints=AbsFullCsts())
print('\n ---[message to absorb]---')
print(repr(msg))
bfield_1.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(size, len(msg))
msg = struct.pack('>H', 0x3fe0)
status, off, size, name = bfield_2.absorb(msg, constraints=AbsFullCsts())
print('\n ---[message to absorb]---')
print(repr(msg))
bfield_2.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(size, len(msg))
msg = struct.pack('>H', 0xf3fe)
status, off, size, name = bfield_3.absorb(msg, constraints=AbsFullCsts())
print('\n ---[message to absorb]---')
print(repr(msg))
bfield_3.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(size, len(msg))
msg = struct.pack('>H', 0x3fe)
status, off, size, name = bfield_4.absorb(msg, constraints=AbsFullCsts())
print('\n ---[message to absorb]---')
print(repr(msg))
bfield_4.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(size, len(msg))
def test_MISC(self):
'''
TODO: assertion + purpose
'''
loop_count = 20
e = Node('VT1')
vt = UINT16_be(values=[1, 2, 3, 4, 5, 6])
e.set_values(value_type=vt)
e.set_env(Env())
e.make_determinist(all_conf=True, recursive=True)
e.make_finite(all_conf=True, recursive=True)
self._loop_nodes(e, loop_count, criteria_func=lambda x: True)
e2 = Node('VT2', base_node=e, ignore_frozen_state=True)
e2.set_env(Env())
e2.make_determinist(all_conf=True, recursive=True)
e2.make_finite(all_conf=True, recursive=True)
self._loop_nodes(e2, loop_count, criteria_func=lambda x: True)
print('\n****\n')
sep = Node('sep', values=[' # '])
nt = Node('NT')
nt.set_subnodes_with_csts([
1, ['u>', [e, 3], [sep, 1], [e2, 2]]
])
nt.set_env(Env())
self._loop_nodes(nt, loop_count, criteria_func=lambda x: True)
print('\n****\n')
v = dm.get_atom('V1_middle')
v.make_finite()
e = Node('NT')
e.set_subnodes_with_csts([
1, ['u>', [v, 2]]
])
e.set_env(Env())
e.make_determinist(recursive=True)
self._loop_nodes(e, loop_count, criteria_func=lambda x: True)
print('\n****\n')
self._loop_nodes(e, loop_count, criteria_func=lambda x: True)
print('\n****\n')
e = dm.get_atom('Middle_NT')
e.make_finite(all_conf=True, recursive=True)
e.make_determinist(all_conf=True, recursive=True)
self._loop_nodes(e, loop_count, criteria_func=lambda x: x.name == 'Middle_NT')
class TestModelWalker(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dm = example.data_model
cls.dm.load_data_model(fmk._name2dm)
def setUp(self):
pass
def test_NodeConsumerStub_1(self):
nt = self.dm.get_atom('Simple')
default_consumer = NodeConsumerStub()
for rnode, consumed_node, orig_node_val, idx in ModelWalker(nt, default_consumer, make_determinist=True,
max_steps=200):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 49)
def test_NodeConsumerStub_2(self):
nt = self.dm.get_atom('Simple')
default_consumer = NodeConsumerStub(max_runs_per_node=-1, min_runs_per_node=2)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(nt, default_consumer, make_determinist=True,
max_steps=200):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 35)
def test_BasicVisitor(self):
nt = self.dm.get_atom('Simple')
default_consumer = BasicVisitor(respect_order=True)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(nt, default_consumer, make_determinist=True,
max_steps=200):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 37)
print('***')
nt = self.dm.get_atom('Simple')
default_consumer = BasicVisitor(respect_order=False)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(nt, default_consumer, make_determinist=True,
max_steps=200):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 37)
def test_NonTermVisitor(self):
print('***')
idx = 0
simple = self.dm.get_atom('Simple')
nonterm_consumer = NonTermVisitor(respect_order=True)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(simple, nonterm_consumer, make_determinist=True,
max_steps=20):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 4)
print('***')
idx = 0
simple = self.dm.get_atom('Simple')
nonterm_consumer = NonTermVisitor(respect_order=False)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(simple, nonterm_consumer, make_determinist=True,
max_steps=20):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 4)
print('***')
results = [
b' [!] ++++++++++ [!] ::>:: [!] ? [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::AAA::AAA::>:: [!] ? [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::>:: [!] ? [!] ',
b' [!] >>>>>>>>>> [!] ::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::>:: [!] ',
]
idx = 0
data = fmk.dm.get_external_atom(dm_name='mydf', data_id='shape')
nonterm_consumer = NonTermVisitor(respect_order=True)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(data, nonterm_consumer, make_determinist=True,
max_steps=50):
print(colorize('[%d] ' % idx + rnode.to_ascii(), rgb=Color.INFO))
self.assertEqual(rnode.to_bytes(), results[idx-1])
self.assertEqual(idx, 6)
print('***')
idx = 0
data = fmk.dm.get_external_atom(dm_name='mydf', data_id='shape')
nonterm_consumer = NonTermVisitor(respect_order=False)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(data, nonterm_consumer, make_determinist=True,
max_steps=50):
print(colorize('[%d] ' % idx + rnode.to_ascii(), rgb=Color.INFO))
self.assertEqual(idx, 6)
print('***')
def test_basics(self):
# data = fmk.dm.get_external_atom(dm_name='mydf', data_id='shape')
shape_desc = \
{'name': 'shape',
'custo_set': MH.Custo.NTerm.FrozenCopy,
'custo_clear': MH.Custo.NTerm.MutableClone,
'separator': {'contents': {'name': 'sep',
'contents': String(values=[' [!] '])}},
'contents': [
{'weight': 20,
'contents': [
{'name': 'prefix1',
'contents': String(size=10, alphabet='+')},
{'name': 'body_top',
'contents': [
{'name': 'body',
'custo_set': MH.Custo.NTerm.FrozenCopy,
'custo_clear': MH.Custo.NTerm.MutableClone,
'separator': {'contents': {'name': 'sep2',
'contents': String(values=['::'])}},
'shape_type': MH.Random, # ignored in determnist mode
'contents': [
{'contents': Filename(values=['AAA']),
'qty': (0, 4),
'name': 'str'},
{'contents': UINT8(values=[0x3E]), # chr(0x3E) == '>'
'name': 'int'}
]}
]}
]},
{'weight': 20,
'contents': [
{'name': 'prefix2',
'contents': String(size=10, alphabet='>')},
{'name': 'body'}
]}
]}
mb = NodeBuilder(delayed_jobs=True)
data = mb.create_graph_from_desc(shape_desc)
bv_data = data.get_clone()
nt_data = data.get_clone()
raw_vals = [
b' [!] ++++++++++ [!] ::?:: [!] ',
b' [!] ++++++++++ [!] ::=:: [!] ',
b' [!] ++++++++++ [!] ::\xff:: [!] ',
b' [!] ++++++++++ [!] ::\x00:: [!] ',
b' [!] ++++++++++ [!] ::\x01:: [!] ',
b' [!] ++++++++++ [!] ::\x80:: [!] ',
b' [!] ++++++++++ [!] ::\x7f:: [!] ',
b' [!] ++++++++++ [!] ::IAA::AAA::AAA::AAA::>:: [!] ', # [8] could change has it is a random corrupt_bit
b' [!] ++++++++++ [!] ::AAAA::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'XXX'*100 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::\x00\x00\x00::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::A%n::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::A%s::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'%n' * 400 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'%s' * 400 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'\"%n\"' * 400 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'\"%s\"' * 400 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'\r\n' * 100 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::../../../../../../etc/password::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::..\\..\\..\\..\\..\\..\\Windows\\system.ini::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::file%n%n%n%nname.txt::AAA::AAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::AAA::AAA::?:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::AAA::AAA::=:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::AAA::AAA::\xff:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::AAA::AAA::\x00:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::AAA::AAA::\x01:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::AAA::AAA::\x80:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::AAA::AAA::\x7f:: [!] ',
b' [!] ++++++++++ [!] ::AAQ::AAA::>:: [!] ', # [30] could change has it is a random corrupt_bit
b' [!] ++++++++++ [!] ::AAAA::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'XXX'*100 + b'::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::\x00\x00\x00::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::A%n::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::A%s::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'%n' * 400 + b'::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'%s' * 400 + b'::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'\"%n\"' * 400 + b'::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'\"%s\"' * 400 + b'::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA' + b'\r\n' * 100 + b'::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::../../../../../../etc/password::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::..\\..\\..\\..\\..\\..\\Windows\\system.ini::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::file%n%n%n%nname.txt::AAA::>:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::?:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::=:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::\xff:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::\x00:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::\x01:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::\x80:: [!] ',
b' [!] ++++++++++ [!] ::AAA::AAA::\x7f:: [!] ',
b' [!] >>>>>>>>>> [!] ::?:: [!] ',
b' [!] >>>>>>>>>> [!] ::=:: [!] ',
b' [!] >>>>>>>>>> [!] ::\xff:: [!] ',
b' [!] >>>>>>>>>> [!] ::\x00:: [!] ',
b' [!] >>>>>>>>>> [!] ::\x01:: [!] ',
b' [!] >>>>>>>>>> [!] ::\x80:: [!] ',
b' [!] >>>>>>>>>> [!] ::\x7f:: [!] ',
b' [!] >>>>>>>>>> [!] ::QAA::AAA::AAA::AAA::>:: [!] ', # [59] could change has it is a random corrupt_bit
b' [!] >>>>>>>>>> [!] ::AAAA::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'XXX'*100 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::\x00\x00\x00::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::A%n::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::A%s::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'%n' * 400 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'%s' * 400 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'\"%n\"' * 400 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'\"%s\"' * 400 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'\r\n' * 100 + b'::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::../../../../../../etc/password::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::..\\..\\..\\..\\..\\..\\Windows\\system.ini::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::file%n%n%n%nname.txt::AAA::AAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::AAA::AAA::?:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::AAA::AAA::=:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::AAA::AAA::\xff:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::AAA::AAA::\x00:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::AAA::AAA::\x01:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::AAA::AAA::\x80:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::AAA::AAA::\x7f:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAC::AAA::>:: [!] ', # [81] could change has it is a random corrupt_bit
b' [!] >>>>>>>>>> [!] ::AAAA::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'XXX'*100 + b'::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::\x00\x00\x00::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::A%n::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::A%s::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'%n' * 400 + b'::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'%s' * 400 + b'::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'\"%n\"' * 400 + b'::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'\"%s\"' * 400 + b'::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA' + b'\r\n' * 100 + b'::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::../../../../../../etc/password::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::..\\..\\..\\..\\..\\..\\Windows\\system.ini::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::file%n%n%n%nname.txt::AAA::>:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::?:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::=:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::\xff:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::\x00:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::\x01:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::\x80:: [!] ',
b' [!] >>>>>>>>>> [!] ::AAA::AAA::\x7f:: [!] ',
]
tn_consumer = TypedNodeDisruption(respect_order=True, ignore_separator=True)
ic = NodeInternalsCriteria(mandatory_attrs=[NodeInternals.Mutable],
negative_attrs=[NodeInternals.Separator],
node_kinds=[NodeInternals_TypedValue],
negative_node_subkinds=[String])
tn_consumer.set_node_interest(internals_criteria=ic)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(data, tn_consumer, make_determinist=True,
max_steps=200):
val = rnode.to_bytes()
print(colorize('[%d] ' % idx + repr(val), rgb=Color.INFO))
if idx not in [8, 30, 59, 81]:
self.assertEqual(val, raw_vals[idx - 1])
self.assertEqual(idx, 102) # should be even
print('***')
idx = 0
bv_consumer = BasicVisitor(respect_order=True)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(bv_data, bv_consumer,
make_determinist=True,
max_steps=100):
print(colorize('[%d] ' % idx + rnode.to_ascii(), rgb=Color.INFO))
self.assertEqual(idx, 6)
print('***')
idx = 0
nt_consumer = NonTermVisitor(respect_order=True)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(nt_data, nt_consumer,
make_determinist=True,
max_steps=100):
print(colorize('[%d] ' % idx + rnode.to_ascii(), rgb=Color.INFO))
self.assertEqual(idx, 6) # shall be equal to the previous test
def test_TypedNodeDisruption_1(self):
nt = self.dm.get_atom('Simple')
tn_consumer = TypedNodeDisruption()
ic = NodeInternalsCriteria(negative_node_subkinds=[String])
tn_consumer.set_node_interest(internals_criteria=ic)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(nt, tn_consumer, make_determinist=True,
max_steps=300):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 21)
def test_TypedNodeDisruption_2(self):
nt = self.dm.get_atom('Simple')
tn_consumer = TypedNodeDisruption(max_runs_per_node=3, min_runs_per_node=3)
ic = NodeInternalsCriteria(negative_node_subkinds=[String])
tn_consumer.set_node_interest(internals_criteria=ic)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(nt, tn_consumer, make_determinist=True,
max_steps=100):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 9)
def test_TypedNodeDisruption_3(self):
'''
Test case similar to test_TermNodeDisruption_1() but with more
powerfull TypedNodeDisruption.
'''
nt = self.dm.get_atom('Simple')
tn_consumer = TypedNodeDisruption(max_runs_per_node=1)
# ic = NodeInternalsCriteria(negative_node_subkinds=[String])
# tn_consumer.set_node_interest(internals_criteria=ic)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(nt, tn_consumer, make_determinist=True,
max_steps=-1):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 444)
def test_TypedNodeDisruption_BitfieldCollapse(self):
'''
Test case similar to test_TermNodeDisruption_1() but with more
powerfull TypedNodeDisruption.
'''
data = fmk.dm.get_external_atom(dm_name='sms', data_id='smscmd')
data.freeze()
data.show()
print('\norig value: ' + repr(data['smscmd/TP-DCS'].to_bytes()))
# self.assertEqual(data['smscmd/TP-DCS'].to_bytes(), b'\xF6')
corrupt_table = {
1: b'\x06',
2: b'\xE6',
3: b'\x16',
4: b'\xF7',
5: b'\xF4',
6: b'\xF5',
7: b'\xF2'
}
tn_consumer = TypedNodeDisruption(max_runs_per_node=1)
tn_consumer.set_node_interest(path_regexp='smscmd/TP-DCS')
# ic = NodeInternalsCriteria(negative_node_subkinds=[String])
# tn_consumer.set_node_interest(internals_criteria=ic)
for rnode, consumed_node, orig_node_val, idx in ModelWalker(data, tn_consumer,
make_determinist=True, max_steps=7):
print(colorize('\n[%d] ' % idx + repr(rnode['smscmd/TP-DCS$'].to_bytes()), rgb=Color.INFO))
print('node name: ' + consumed_node.name)
print('original value: {!s} ({!s})'.format(binascii.b2a_hex(orig_node_val),
bin(struct.unpack('B', orig_node_val)[0])))
print('corrupted value: {!s} ({!s})'.format(binascii.b2a_hex(consumed_node.to_bytes()),
bin(struct.unpack('B', consumed_node.to_bytes())[0])))
print('result: {!s} ({!s})'.format(binascii.b2a_hex(rnode['smscmd/TP-DCS$'].to_bytes()),
bin(struct.unpack('B', rnode['smscmd/TP-DCS$'].to_bytes())[0])))
rnode['smscmd/TP-DCS$'].show()
self.assertEqual(rnode['smscmd/TP-DCS'].to_bytes(), corrupt_table[idx])
def test_AltConfConsumer_1(self):
simple = self.dm.get_atom('Simple')
consumer = AltConfConsumer(max_runs_per_node=-1, min_runs_per_node=-1)
consumer.set_node_interest(owned_confs=['ALT'])
for rnode, consumed_node, orig_node_val, idx in ModelWalker(simple, consumer, make_determinist=True,
max_steps=100):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 15)
def test_AltConfConsumer_2(self):
simple = self.dm.get_atom('Simple')
consumer = AltConfConsumer(max_runs_per_node=2, min_runs_per_node=1)
consumer.set_node_interest(owned_confs=['ALT'])
for rnode, consumed_node, orig_node_val, idx in ModelWalker(simple, consumer, make_determinist=True,
max_steps=100):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 8)
def test_AltConfConsumer_3(self):
simple = self.dm.get_atom('Simple')
consumer = AltConfConsumer(max_runs_per_node=-1, min_runs_per_node=-1)
consumer.set_node_interest(owned_confs=['ALT', 'ALT_2'])
for rnode, consumed_node, orig_node_val, idx in ModelWalker(simple, consumer, make_determinist=True,
max_steps=100):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 24)
def test_AltConfConsumer_4(self):
simple = self.dm.get_atom('Simple')
consumer = AltConfConsumer(max_runs_per_node=-1, min_runs_per_node=-1)
consumer.set_node_interest(owned_confs=['ALT_2', 'ALT'])
for rnode, consumed_node, orig_node_val, idx in ModelWalker(simple, consumer, make_determinist=True,
max_steps=50):
print(colorize('[%d] ' % idx + repr(rnode.to_bytes()), rgb=Color.INFO))
self.assertEqual(idx, 24)
def test_JPG(self):
dm = fmk.get_data_model_by_name('jpg')
dm.build_data_model()
nt = dm.get_atom('jpg')
tn_consumer = TypedNodeDisruption()
walker = iter(ModelWalker(nt, tn_consumer, make_determinist=True))
while True:
try:
rnode, consumed_node, orig_node_val, idx = next(walker)
# rnode.get_value()
except StopIteration:
break
print(colorize('number of imgs: %d' % idx, rgb=Color.INFO))
self.assertEqual(idx, 112)
def test_USB(self):
dm_usb = fmk.get_data_model_by_name('usb')
dm_usb.build_data_model()
data = dm_usb.get_atom('CONF')
consumer = TypedNodeDisruption()
consumer.need_reset_when_structure_change = True
for rnode, consumed_node, orig_node_val, idx in ModelWalker(data, consumer, make_determinist=True,
max_steps=600):
pass
# print(colorize('[%d] '%idx + repr(rnode.to_bytes()), rgb=Color.INFO))
print(colorize('number of confs: %d' % idx, rgb=Color.INFO))
self.assertIn(idx, [542])
@ddt.ddt
class TestNodeFeatures(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def test_djobs(self):
tag_desc = \
{'name': 'tag',
'contents': [
{'name': 'type',
'contents': UINT16_be(values=[0x0101,0x0102,0x0103,0x0104, 0]),
'absorb_csts': AbsFullCsts()},
{'name': 'len',
'contents': UINT16_be(),
'absorb_csts': AbsNoCsts()},
{'name': 'value',
'contents': [
{'name': 'v000', # Final Tag (optional)
'exists_if': (IntCondition(0), 'type'),
'sync_enc_size_with': 'len',
'contents': String(size=0)},
{'name': 'v101', # Service Name
'exists_if': (IntCondition(0x0101), 'type'),
'sync_enc_size_with': 'len',
'contents': String(values=[u'my \u00fcber service'], codec='utf8'),
},
{'name': 'v102', # AC name
'exists_if': (IntCondition(0x0102), 'type'),
'sync_enc_size_with': 'len',
'contents': String(values=['AC name'], codec='utf8'),
},
{'name': 'v103', # Host Identifier
'exists_if': (IntCondition(0x0103), 'type'),
'sync_enc_size_with': 'len',
'contents': String(values=['Host Identifier']),
},
{'name': 'v104', # Cookie
'exists_if': (IntCondition(0x0104), 'type'),
'sync_enc_size_with': 'len',
'contents': String(values=['Cookie'], min_sz=0, max_sz=1000),
},
]}
]}
mb = NodeBuilder(delayed_jobs=True)
d = mb.create_graph_from_desc(tag_desc)
d.make_determinist(recursive=True)
d2 = d.get_clone()
d3 = d.get_clone()
d.freeze()
d['.*/value$'].unfreeze()
d_raw = d.to_bytes()
d.show()
d2.freeze()
d2['.*/value$'].unfreeze()
d2['.*/value$'].freeze()
d2_raw = d2.to_bytes()
d2.show()
d3.freeze()
d3['.*/value$'].unfreeze()
d3['.*/len$'].unfreeze()
d3_raw = d3.to_bytes()
d3.show()
self.assertEqual(d_raw, d2_raw)
self.assertEqual(d_raw, d3_raw)
def test_absorb_nonterm_1(self):
nint_1 = Node('nint1', value_type=UINT16_le(values=[0xabcd]))
nint_2 = Node('nint2', value_type=UINT8(values=[0xf]))
nint_3 = Node('nint3', value_type=UINT16_be(values=[0xeffe]))
nstr_1 = Node('str1', value_type=String(values=['TBD1'], max_sz=5))
nstr_2 = Node('str2', value_type=String(values=['TBD2'], max_sz=8))
vt = BitField(subfield_sizes=[4, 4, 4],
subfield_values=[[3, 2, 0xe, 1], None, [10, 13, 3]],
subfield_val_extremums=[None, [14, 15], None],
padding=1, endian=VT.BigEndian, lsb_padding=True)
bfield = Node('bfield', value_type=vt)
bfield.enforce_absorb_constraints(AbsCsts())
top = Node('top')
top.set_subnodes_with_csts([
1, ['u>', [nint_1, 1], [nint_2, 2], [nstr_1, 1], [nint_3, 2], [nstr_2, 1], [bfield, 1]]
])
top.set_env(Env())
# '?\xef' (\x3f\xe0) + padding 0b1111
msg_tail = struct.pack('>H', 0x3fe0 + 0b1111)
msg = b'\xe1\xe2\xff\xeeCOOL!\xc1\xc2\x88\x9912345678' + msg_tail
status, off, size, name = top.absorb(msg, constraints=AbsNoCsts(size=True))
print('\n ---[message to absorb]---')
print(repr(msg))
print('\n ---[absorbed message]---')
print(top.to_bytes())
top.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(size, len(msg))
def test_absorb_nonterm_2(self):
nint_1 = Node('nint1', value_type=UINT16_le(values=[0xcdab, 0xffee]))
nint_2 = Node('nint2', value_type=UINT8(values=[0xaf, 0xbf, 0xcf]))
nint_3 = Node('nint3', value_type=UINT16_be(values=[0xcfab, 0xeffe]))
nstr_1 = Node('str1', value_type=String(values=['STR1', 'str1'], max_sz=5))
nstr_2 = Node('str2', value_type=String(values=['STR22', 'str222'], max_sz=8))
top = Node('top')
top.set_subnodes_with_csts([
1, ['u=.', [nint_1, 1], [nint_2, 1, 2], [nstr_1, 1], [nint_3, 2], [nstr_2, 1]]
])
top.set_env(Env())
# 2*nint_3 + nstr_1 + nstr_2 + 2*nint_2 + nint_1
msg = b'\xef\xfe\xef\xfeSTR1str222\xcf\xab\xcd'
status, off, size, name = top.absorb(msg)
print('\n ---[message to absorb]---')
print(repr(msg))
print('\n ---[absobed message]---')
print(top.get_value())
top.show(alpha_order=True)
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(size, len(msg))
def test_absorb_nonterm_3(self):
nint_1 = Node('nint1', value_type=UINT16_le(values=[0xcdab, 0xffee]))
nint_2 = Node('nint2', value_type=UINT8(values=[0xaf, 0xbf, 0xcf]))
nint_3 = Node('nint3', value_type=UINT16_be(values=[0xcfab, 0xeffe]))
nstr_1 = Node('str1', value_type=String(values=['STR1', 'str1'], max_sz=5))
nstr_2 = Node('str2', value_type=String(values=['STR22', 'str222'], max_sz=8))
top = Node('top')
top.set_subnodes_with_csts([
1, ['u=+(2,2,1,5,1)', [nint_1, 1], [nint_2, 1], [nstr_1, 1], [nint_3, 2], [nstr_2, 1, 3]]
])
top.set_env(Env())
msg = 'str222str222'
status, off, size, name = top.absorb(msg)
print('\n ---[message to absorb]---')
print(repr(msg))
print('\n ---[absobed message]---')
print(top.get_value())
top.show(alpha_order=True)
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(size, len(msg))
def test_absorb_nonterm_fullyrandom(self):
test_desc = \
{'name': 'test',
'contents': [
{'section_type': MH.FullyRandom,
'contents': [
{'contents': String(values=['AAA', 'BBBB', 'CCCCC']),
'qty': (2, 3),
'name': 'str'},
{'contents': UINT8(values=[2, 4, 6, 8]),
'qty': (3, 6),
'name': 'int'}
]}
]}
for i in range(5):
mb = NodeBuilder()
node = mb.create_graph_from_desc(test_desc)
node_abs = Node('test_abs', base_node=node)
node.set_env(Env())
node_abs.set_env(Env())
node.show()
data = node.to_bytes()
status, off, size, name = node_abs.absorb(data, constraints=AbsFullCsts())
print('Absorb Status:', status, off, size, name)
print(' \_ length of original data:', len(data))
print(' \_ remaining:', data[size:])
node_abs.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
def test_intg_absorb_1(self):
self.helper1_called = False
self.helper2_called = False
def nint_1_helper(blob, constraints, node_internals):
if blob[:1] in [b'\xe1', b'\xcd']:
return AbsorbStatus.Accept, 0, None
else:
return AbsorbStatus.Reject, 0, None
def nint_1_alt_helper(blob, constraints, node_internals):
if blob[:1] == b'\xff':
return AbsorbStatus.Accept, 0, None
else:
self.helper1_called = True
return AbsorbStatus.Reject, 0, None
nint_1 = Node('nint1', value_type=UINT16_le(values=[0xabcd, 0xe2e1]))
nint_1.set_absorb_helper(nint_1_helper)
nint_1_cpy = nint_1.get_clone('nint1_cpy')
nint_1_alt = Node('nint1_alt', value_type=UINT16_le(values=[0xabff, 0xe2ff]))
nint_1_alt.set_absorb_helper(nint_1_alt_helper)
nint_1_alt_cpy = nint_1_alt.get_clone('nint1_alt_cpy')
nint_2 = Node('nint2', value_type=UINT8(values=[0xf, 0xff, 0xee]))
nint_3 = Node('nint3', value_type=UINT16_be(values=[0xeffe, 0xc1c2, 0x8899]))
nint_3_cpy = nint_3.get_clone('nint3_cpy')
nstr_1 = Node('cool', value_type=String(values=['TBD1'], size=4, codec='ascii'))
nstr_1.enforce_absorb_constraints(AbsNoCsts(regexp=True))
nstr_2 = Node('str2', value_type=String(values=['TBD2TBD2', '12345678'], size=8, codec='ascii'))
nint_50 = Node('nint50', value_type=UINT8(values=[0xaf, 0xbf, 0xcf]))
nint_51 = Node('nint51', value_type=UINT16_be(values=[0xcfab, 0xeffe]))
nstr_50 = Node('str50', value_type=String(values=['HERE', 'IAM'], max_sz=7))
middle1 = Node('middle1')
middle1.set_subnodes_with_csts([
3, ['u>', [nint_1_alt, 2]],
2, ['u>', [nint_1, 1, 10], [nint_2, 2], [nstr_1, 1], [nint_3, 2], [nstr_2, 1]],
1, ['u>', [nint_1_alt_cpy, 1], [nint_3_cpy, 1], 'u=+', [nstr_2, 1], [nint_1_cpy, 2], 'u>', [nstr_1, 1],
'u=.', [nint_50, 1], [nint_51, 1], [nstr_50, 2, 3]]
])
yeah = Node('yeah', value_type=String(values=['TBD', 'YEAH!'], max_sz=10, codec='ascii'))
splitter = Node('splitter', value_type=String(values=['TBD'], max_sz=10))
splitter.set_attr(NodeInternals.Abs_Postpone)
splitter.enforce_absorb_constraints(AbsNoCsts())
def nint_10_helper(blob, constraints, node_internals):
off = blob.find(b'\xd2')
if off > -1:
self.helper2_called = True
return AbsorbStatus.Accept, off, None
else:
return AbsorbStatus.Reject, 0, None
nint_10 = Node('nint10', value_type=UINT16_be(values=[0xcbbc, 0xd2d3]))
nint_10.set_absorb_helper(nint_10_helper)
nstr_10 = Node('str10', value_type=String(values=['TBD', 'THE_END'], max_sz=7))
delim = Node('delim', value_type=String(values=[','], size=1))
nint_20 = Node('nint20', value_type=INT_str(values=[1, 2, 3]))
nint_21 = Node('nint21', value_type=UINT8(values=[0xbb]))
bottom = Node('bottom', subnodes=[delim, nint_20, nint_21])
bottom2 = Node('bottom2', base_node=bottom)
middle2 = Node('middle2')
middle2.set_subnodes_with_csts([
1, ['u>', [splitter, 1], [nint_10, 1], [bottom, 0, 1], [nstr_10, 1], [bottom2, 0, 1]]
])
top = Node('top', subnodes=[middle1, yeah, middle2])
top2 = Node('top2', base_node=top)
top.set_env(Env())
top2.set_env(Env())
msg = b'\xe1\xe2\xe1\xe2\xff\xeeCOOL!\xc1\xc2\x88\x9912345678YEAH!\xef\xdf\xbf\xd2\xd3,2\xbbTHE_END'
# middle1: nint_1_alt + nint_3 + 2*nint_1 + nstr_1('ABCD') + nint_51 + 2*nstr_50 + nint_50
msg2 = b'\xff\xe2\x88\x99\xe1\xe2\xcd\xabABCD\xef\xfeIAMHERE\xbfYEAH!\xef\xdf\xbf\xd2\xd3,2\xbbTHE_END'
print('\n****** top ******\n')
status, off, size, name = top.absorb(msg)
print('\n---[message to absorb: msg]---')
print(repr(msg))
print('---[absorbed message]---')
# print(repr(top))
print(top.get_value())
def verif_val_and_print(*arg, **kwargs):
Node._print_contents(*arg, **kwargs)
if 'TBD' in arg:
raise ValueError('Dissection Error!')
top.show(print_contents_func=verif_val_and_print)
l = top.get_nodes_names()
print('\n****** top2 ******\n')
status2, off2, size2, name2 = top2.absorb(msg2)
print('\n---[message to absorb: msg2]---')
print(repr(msg2))
print('---[absorbed message]---')
top2.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(len(l), 19)
self.assertEqual(len(msg), size)
self.assertTrue(self.helper1_called)
self.assertTrue(self.helper2_called)
self.assertEqual(top.get_node_by_path("top/middle2/str10").to_bytes(), b'THE_END')
# Because constraints are untighten on this node, its nominal
# size of 4 is set to 5 when absorbing b'COOL!'
self.assertEqual(top.get_node_by_path("top/middle1/cool").to_bytes(), b'COOL!')
self.assertEqual(status2, AbsorbStatus.FullyAbsorbed)
del self.helper1_called
del self.helper2_called
print('\n*** test __getitem__() ***\n')
print(top["top/middle2"])
print('\n***\n')
print(repr(top["top/middle2"]))
def test_show(self):
a = fmk.dm.get_external_atom(dm_name='usb', data_id='DEV')
b = fmk.dm.get_external_atom(dm_name='png', data_id='PNG_00')
a.show(raw_limit=400)
b.show(raw_limit=400)
b['PNG_00/chunks/chk/height'] = a
b.show(raw_limit=400)
b['PNG_00/chunks/chk/height/idProduct'] = a
b.show(raw_limit=400)
def test_exist_condition_01(self):
''' Test existence condition for generation and absorption
'''
d = fmk.dm.get_external_atom(dm_name='mydf', data_id='exist_cond')
for i in range(10):
d_abs = fmk.dm.get_external_atom(dm_name='mydf', data_id='exist_cond')
d.show()
raw_data = d.to_bytes()
print('-----------------------')
print('Original Data:')
print(repr(raw_data))
print('-----------------------')
status, off, size, name = d_abs.absorb(raw_data, constraints=AbsFullCsts())
raw_data_abs = d_abs.to_bytes()
print('-----------------------')
print('Absorbed Data:')
print(repr(raw_data_abs))
print('-----------------------')
print('-----------------------')
print('Absorb Status: status=%s, off=%d, sz=%d, name=%s' % (status, off, size, name))
print(' \_ length of original data: %d' % len(raw_data))
print(' \_ remaining: %r' % raw_data[size:])
print('-----------------------')
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(raw_data, raw_data_abs)
d.unfreeze()
def test_exist_condition_02(self):
cond_desc = \
{'name': 'exist_cond',
'shape_type': MH.Ordered,
'contents': [
{'name': 'opcode',
'determinist': True,
'contents': String(values=['A3', 'A2'])},
{'name': 'command_A3',
'exists_if': (RawCondition('A3'), 'opcode'),
'contents': [
{'name': 'A3_subopcode',
'contents': BitField(subfield_sizes=[15, 2, 4], endian=VT.BigEndian,
subfield_values=[None, [1, 2], [5, 6, 12]],
subfield_val_extremums=[[500, 600], None, None],
determinist=False)},
{'name': 'A3_int',
'determinist': True,
'contents': UINT16_be(values=[10, 20, 30])},
{'name': 'A3_deco1',
'exists_if/and': [(IntCondition(val=[10]), 'A3_int'),
(BitFieldCondition(sf=2, val=[5]), 'A3_subopcode')],
'contents': String(values=['$ and_OK $'])},
{'name': 'A3_deco2',
'exists_if/and': [(IntCondition(val=[10]), 'A3_int'),
(BitFieldCondition(sf=2, val=[6]), 'A3_subopcode')],
'contents': String(values=['! and_KO !'])}
]},
{'name': 'A31_payload1',
'contents': String(values=['$ or_OK $']),
'exists_if/or': [(IntCondition(val=[20]), 'A3_int'),
(BitFieldCondition(sf=2, val=[5]), 'A3_subopcode')],
},
{'name': 'A31_payload2',
'contents': String(values=['! or_KO !']),
'exists_if/or': [(IntCondition(val=[20]), 'A3_int'),
(BitFieldCondition(sf=2, val=[6]), 'A3_subopcode')],
},
]}
mb = NodeBuilder()
node = mb.create_graph_from_desc(cond_desc)
print('***')
raw = node.to_bytes()
node.show()
print(raw, len(raw))
result = b"A3T\x0f\xa0\x00\n$ and_OK $$ or_OK $"
self.assertEqual(result, raw)
@ddt.data(
# gt_val test cases
{'opcode_val': [5], 'val': None, 'gt_val': 4, 'result': b'\x05[cond_checked]'},
{'opcode_val': [5], 'val': None, 'gt_val': 5, 'result': b'\x05[cond_checked]'},
{'opcode_val': [5], 'val': [5, 6], 'gt_val': 4, 'result': b'\x05[cond_checked]'},
{'opcode_val': [5], 'val': [6, 7], 'gt_val': 4, 'result': b'\x05'},
{'opcode_val': [5], 'val': 5, 'gt_val': 6, 'result': b'\x05'},
# lt_val test cases
{'opcode_val': [5], 'val': None, 'lt_val': 6, 'result': b'\x05[cond_checked]'},
{'opcode_val': [5], 'val': None, 'lt_val': 5, 'result': b'\x05[cond_checked]'},
{'opcode_val': [5], 'val': [4, 5], 'lt_val': 6, 'result': b'\x05[cond_checked]'},
{'opcode_val': [5], 'val': [3, 4], 'lt_val': 6, 'result': b'\x05'},
{'opcode_val': [5], 'val': 5, 'lt_val': 4, 'result': b'\x05'},
)
def test_exist_intcondition(self, params):
cond_desc = \
{'name': 'exist_cond',
'contents': [
{'name': 'opcode',
'determinist': True,
'contents': UINT8(values=params['opcode_val'])},
{'name': 'type',
'exists_if': (IntCondition(val=params['val'], gt_val=params.get('gt_val'),
lt_val=params.get('lt_val')),
'opcode'),
'contents': String(values=['[cond_checked]'])},
]}
node = NodeBuilder().create_graph_from_desc(cond_desc)
raw = node.to_bytes()
print('{} (len: {})'.format(raw, len(raw)))
self.assertEqual(params['result'], raw)
@ddt.data(
{'opcode_val': ['Test'], 'val': None, 'cond_func': lambda x: x.startswith(b'Te'),
'result': b'Test[cond_checked]'},
{'opcode_val': ['Tst'], 'val': None, 'cond_func': lambda x: x.startswith(b'Te'),
'result': b'Tst'},
)
def test_exist_rawcondition(self, params):
cond_desc = \
{'name': 'exist_cond',
'contents': [
{'name': 'opcode',
'determinist': True,
'contents': String(values=params['opcode_val'])},
{'name': 'type',
'exists_if': (RawCondition(val=params['val'], cond_func=params.get('cond_func')),
'opcode'),
'contents': String(values=['[cond_checked]'])},
]}
node = NodeBuilder().create_graph_from_desc(cond_desc)
raw = node.to_bytes()
print('{} (len: {})'.format(raw, len(raw)))
self.assertEqual(params['result'], raw)
def test_generalized_exist_cond(self):
gen_exist_desc = \
{'name': 'gen_exist_cond',
'separator': {'contents': {'name': 'sep_nl',
'contents': String(values=['\n'], max_sz=100, absorb_regexp='[\r\n|\n]+'),
'absorb_csts': AbsNoCsts(regexp=True)},
'prefix': False, 'suffix': False, 'unique': True},
'contents': [
{'name': 'body',
'qty': 7,
'separator': {'contents': {'name': 'sep_space',
'contents': String(values=[' '], max_sz=100, absorb_regexp=b'\s+'),
'absorb_csts': AbsNoCsts(size=True, regexp=True)},
'prefix': False, 'suffix': False, 'unique': True},
'contents': [
{'name': 'val_blk',
'separator': {'contents': {'name': 'sep_quote',
'contents': String(values=['"'])},
'prefix': False, 'suffix': True, 'unique': True},
'contents': [
{'name': 'key',
'contents': String(values=['value='])},
{'name': 'val1',
'contents': String(values=['Toulouse', 'Paris', 'Lyon']),
'exists_if': (RawCondition('Location'), 'param')},
{'name': 'val2',
'contents': String(values=['2015/10/08']),
'exists_if': (RawCondition('Date'), 'param')},
{'name': 'val3',
'contents': String(values=['10:40:42']),
'exists_if': (RawCondition('Time'), 'param')},
{'name': 'val4',
'contents': String(values=['NOT_SUPPORTED']),
'exists_if': (RawCondition(['NOTSUP1', 'NOTSUP2', 'NOTSUP3']), 'param')}
]},
{'name': 'name_blk',
'separator': {'contents': {'name': ('sep_quote', 2),
'contents': String(values=['"'])},
'prefix': False, 'suffix': True, 'unique': True},
'contents': [
{'name': ('key', 2),
'contents': String(values=['name='])},
{'name': 'param',
'contents': CYCLE(['NOTSUP1', 'Date', 'Time', 'NOTSUP2', 'NOTSUP3', 'Location'],
depth=2)}
]}
]}
]}
mb = NodeBuilder(delayed_jobs=True)
node = mb.create_graph_from_desc(gen_exist_desc)
print('***')
raw = node.to_bytes()
print(raw, len(raw))
result = \
b'value="NOT_SUPPORTED" name="NOTSUP1"\n' \
b'value="2015/10/08" name="Date"\n' \
b'value="10:40:42" name="Time"\n' \
b'value="NOT_SUPPORTED" name="NOTSUP2"\n' \
b'value="NOT_SUPPORTED" name="NOTSUP3"\n' \
b'value="Toulouse" name="Location"\n' \
b'value="NOT_SUPPORTED" name="NOTSUP1"'
print('***')
print(result, len(result))
self.assertEqual(result, raw)
def test_pick_and_cond(self):
pick_cond_desc = \
{'name': 'pick_cond',
'shape_type': MH.Ordered,
'contents': [
{'name': 'opcode',
'determinist': True,
'contents': String(values=['A1', 'A2', 'A3'])},
{'name': 'part1',
'determinist': True,
'shape_type': MH.Pick,
'contents': [
{'name': 'option2',
'exists_if': (RawCondition('A2'), 'opcode'),
'contents': String(values=[' 1_KO_A2'])},
{'name': 'option3',
'exists_if': (RawCondition('A3'), 'opcode'),
'contents': String(values=[' 1_KO_A3'])},
{'name': 'option1',
'exists_if': (RawCondition('A1'), 'opcode'),
'contents': String(values=[' 1_OK_A1'])},
]},
{'name': 'part2',
'determinist': False,
'weights': (100, 100, 1),
'shape_type': MH.Pick,
'contents': [
{'name': 'optionB',
'exists_if': (RawCondition('A2'), 'opcode'),
'contents': String(values=[' 2_KO_A2'])},
{'name': 'optionC',
'exists_if': (RawCondition('A3'), 'opcode'),
'contents': String(values=[' 2_KO_A3'])},
{'name': 'optionA',
'exists_if': (RawCondition('A1'), 'opcode'),
'contents': String(values=[' 2_OK_A1'])},
]},
]}
mb = NodeBuilder(delayed_jobs=True)
node = mb.create_graph_from_desc(pick_cond_desc)
print('***')
raw = node.to_bytes()
print(raw, len(raw))
result = b'A1 1_OK_A1 2_OK_A1'
self.assertEqual(result, raw)
def test_collapse_padding(self):
padding_desc = \
{'name': 'padding',
'shape_type': MH.Ordered,
'custo_set': MH.Custo.NTerm.CollapsePadding,
'contents': [
{'name': 'sublevel',
'contents': [
{'name': 'part2_msb',
'exists_if': (BitFieldCondition(sf=0, val=[1]), 'part1_lsb'),
'contents': BitField(subfield_sizes=[2, 2], endian=VT.BigEndian,
subfield_values=[[3], [3]])
},
{'name': 'part2_middle',
'exists_if': (BitFieldCondition(sf=0, val=[1]), 'part1_lsb'),
'contents': BitField(subfield_sizes=[2, 2, 1], endian=VT.BigEndian,
subfield_values=[[1, 2], [3], [0]])
},
{'name': 'part2_KO',
'exists_if': (BitFieldCondition(sf=0, val=[2]), 'part1_lsb'),
'contents': BitField(subfield_sizes=[2, 2], endian=VT.BigEndian,
subfield_values=[[1], [1]])
}
]},
{'name': 'part1_lsb',
'determinist': True,
'contents': BitField(subfield_sizes=[3, 1], padding=0, endian=VT.BigEndian,
subfield_values=[None, [1]],
subfield_val_extremums=[[1, 3], None])
},
]}
mb = NodeBuilder()
node = mb.create_graph_from_desc(padding_desc)
print('***')
raw = node.to_bytes()
node.show() # part2_KO should not be displayed
print(raw, binascii.b2a_hex(raw),
list(map(lambda x: bin(x), struct.unpack('>' + 'B' * len(raw), raw))),
len(raw))
result = b'\xf6\xc8'
self.assertEqual(result, raw)
abs_test_desc = \
{'name': 'test',
'contents': [
{'name': 'prefix',
'contents': String(values=['prefix'])},
{'name': 'TP-DCS', # Data Coding Scheme (refer to GSM 03.38)
'custo_set': MH.Custo.NTerm.CollapsePadding,
'contents': [
{'name': '8-bit',
'determinist': True,
'contents': BitField(subfield_sizes=[8], endian=VT.BigEndian,
subfield_values=[
[0xAA]],
) },
{'name': 'msb',
'determinist': True,
'contents': BitField(subfield_sizes=[4], endian=VT.BigEndian,
subfield_values=[
[0b1111,0b1101,0b1100,0b0000]],
) },
{'name': 'lsb1',
'determinist': True,
'exists_if': (BitFieldCondition(sf=0, val=[0b1111]), 'msb'),
'contents': BitField(subfield_sizes=[2,1,1,8], endian=VT.BigEndian,
subfield_values=[[0b10,0b11,0b00,0b01],
[1,0],
[0],[0xFE]]
) },
{'name': 'lsb2',
'determinist': True,
'exists_if': (BitFieldCondition(sf=0, val=[0b1101,0b1100]), 'msb'),
'contents': BitField(subfield_sizes=[2,1,1], endian=VT.BigEndian,
subfield_values=[[0b10,0b11,0b00,0b01],
[0],
[0,1]]
) },
{'name': 'lsb31',
'determinist': True,
'exists_if': (BitFieldCondition(sf=0, val=[0]), 'msb'),
'contents': BitField(subfield_sizes=[3], endian=VT.BigEndian,
subfield_values=[
[0,4]
]
) },
{'name': 'lsb32',
'determinist': True,
'exists_if': (BitFieldCondition(sf=0, val=[0]), 'msb'),
'contents': BitField(subfield_sizes=[8], endian=VT.BigEndian,
subfield_values=[
[0,0x5c]
]
) },
{'name': 'lsb33',
'determinist': True,
'exists_if': (BitFieldCondition(sf=0, val=[0]), 'msb'),
'contents': BitField(subfield_sizes=[1], endian=VT.BigEndian,
subfield_values=[
[0,1]
]
) },
]},
{'name': 'suffix',
'contents': String(values=['suffix'])}
]}
mb = NodeBuilder()
node = mb.create_graph_from_desc(abs_test_desc)
node_abs = node.get_clone()
raw = node.to_bytes()
node.show() # part2_KO should not be displayed
print(raw, binascii.b2a_hex(raw),
list(map(lambda x: bin(x), struct.unpack('>' + 'B' * len(raw), raw))),
len(raw))
result = b'prefix\xaa\xff\xe6suffix'
self.assertEqual(result, raw)
print('\n*** Absorption test ***')
result = b'prefix\xaa\xff\xe2suffix'
abs_result = node_abs.absorb(result)
print('\n--> Absorption status: {!r}\n'.format(abs_result))
self.assertEqual(abs_result[0], AbsorbStatus.FullyAbsorbed)
raw = node_abs.to_bytes()
node_abs.show() # part2_KO should not be displayed
print(raw, binascii.b2a_hex(raw),
list(map(lambda x: bin(x), struct.unpack('>' + 'B' * len(raw), raw))),
len(raw))
self.assertEqual(result, raw)
result = b'prefix\xaa\xdasuffix'
abs_result = node_abs.absorb(result)
print('\n--> Absorption status: {!r}\n'.format(abs_result))
self.assertEqual(abs_result[0], AbsorbStatus.FullyAbsorbed)
raw = node_abs.to_bytes()
node_abs.show() # part2_KO should not be displayed
print(raw, binascii.b2a_hex(raw),
list(map(lambda x: bin(x), struct.unpack('>' + 'B' * len(raw), raw))),
len(raw))
self.assertEqual(result, raw)
result = b'prefix\xaa\x08\xb9suffix'
abs_result = node_abs.absorb(result)
print('\n--> Absorption status: {!r}\n'.format(abs_result))
self.assertEqual(abs_result[0], AbsorbStatus.FullyAbsorbed)
raw = node_abs.to_bytes()
node_abs.show() # part2_KO should not be displayed
print(raw, binascii.b2a_hex(raw),
list(map(lambda x: bin(x), struct.unpack('>' + 'B' * len(raw), raw))),
len(raw))
self.assertEqual(result, raw)
def test_search_primitive(self):
data = fmk.dm.get_external_atom(dm_name='mydf', data_id='exist_cond')
data.freeze()
data.unfreeze()
data.freeze()
data.unfreeze()
data.freeze()
# At this step the data should exhibit 'command_A3'
ic = NodeInternalsCriteria(required_csts=[SyncScope.Existence])
l1 = data.get_reachable_nodes(internals_criteria=ic)
print("\n*** {:d} nodes with existence condition found".format(len(l1)))
res = []
for n in l1:
print(' |_ ' + n.name)
res.append(n.name)
self.assertEqual(len(res), 3)
self.assertTrue('command_A3' in res)
# node_to_corrupt = l1[1]
# print('\n*** Node that will be corrupted: {:s}'.format(node_to_corrupt.name))
# data.env.add_node_to_corrupt(node_to_corrupt)
# corrupted_data = Node(data.name, base_node=data, ignore_frozen_state=False, new_env=True)
# data.env.remove_node_to_corrupt(node_to_corrupt)
# corrupted_data.unfreeze(recursive=True, reevaluate_constraints=True)
# corrupted_data.show()
class TestNode_NonTerm(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def test_infinity(self):
infinity_desc = \
{'name': 'infinity',
'contents': [
{'name': 'prefix',
'contents': String(values=['A']),
'qty': (2, -1)},
{'name': 'mid',
'contents': String(values=['H']),
'qty': -1},
{'name': 'suffix',
'contents': String(values=['Z']),
'qty': (2, -1)},
]}
mb = NodeBuilder()
node = mb.create_graph_from_desc(infinity_desc)
node_abs = Node('infinity_abs', base_node=node)
node_abs2 = Node('infinity_abs', base_node=node)
node.set_env(Env())
node_abs.set_env(Env())
node_abs2.set_env(Env())
# node.show()
raw_data = node.to_bytes()
print('\n*** Test with generated raw data (infinite is limited to )\n\nOriginal data:')
print(repr(raw_data), len(raw_data))
status, off, size, name = node_abs.absorb(raw_data, constraints=AbsFullCsts())
print('Absorb Status:', status, off, size, name)
print(' \_ length of original data:', len(raw_data))
print(' \_ remaining:', raw_data[size:])
raw_data_abs = node_abs.to_bytes()
print(' \_ absorbed data:', repr(raw_data_abs), len(raw_data_abs))
# node_abs.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(raw_data, raw_data_abs)
print('\n*** Test with big raw data\n\nOriginal data:')
raw_data2 = b'A' * (NodeInternals_NonTerm.INFINITY_LIMIT + 30) + b'H' * (
NodeInternals_NonTerm.INFINITY_LIMIT + 1) + \
b'Z' * (NodeInternals_NonTerm.INFINITY_LIMIT - 1)
print(repr(raw_data2), len(raw_data2))
status, off, size, name = node_abs2.absorb(raw_data2, constraints=AbsFullCsts())
print('Absorb Status:', status, off, size, name)
print(' \_ length of original data:', len(raw_data2))
print(' \_ remaining:', raw_data2[size:])
raw_data_abs2 = node_abs2.to_bytes()
print(' \_ absorbed data:', repr(raw_data_abs2), len(raw_data_abs2))
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(raw_data2, raw_data_abs2)
def test_separator(self):
test_desc = \
{'name': 'test',
'determinist': True,
'separator': {'contents': {'name': 'SEP',
'contents': String(values=[' ', ' ', ' '],
absorb_regexp='\s+', determinist=False),
'absorb_csts': AbsNoCsts(regexp=True)},
'prefix': True,
'suffix': True,
'unique': True},
'contents': [
{'section_type': MH.FullyRandom,
'contents': [
{'contents': String(values=['AAA', 'BBBB', 'CCCCC']),
'qty': (3, 5),
'name': 'str'},
{'contents': String(values=['1', '22', '333']),
'qty': (3, 5),
'name': 'int'}
]},
{'section_type': MH.Random,
'contents': [
{'contents': String(values=['WW', 'YYY', 'ZZZZ']),
'qty': (2, 2),
'name': 'str2'},
{'contents': UINT16_be(values=[0xFFFF, 0xAAAA, 0xCCCC]),
'qty': (3, 3),
'name': 'int2'}
]},
{'section_type': MH.Pick,
'contents': [
{'contents': String(values=['LAST', 'END']),
'qty': (2, 2),
'name': 'str3'},
{'contents': UINT16_be(values=[0xDEAD, 0xBEEF]),
'qty': (2, 2),
'name': 'int3'}
]}
]}
mb = NodeBuilder()
node = mb.create_graph_from_desc(test_desc)
node.set_env(Env())
for i in range(5):
node_abs = Node('test_abs', base_node=node)
node_abs.set_env(Env())
node.show()
raw_data = node.to_bytes()
print('Original data:')
print(repr(raw_data), len(raw_data))
status, off, size, name = node_abs.absorb(raw_data, constraints=AbsFullCsts())
print('Absorb Status:', status, off, size, name)
print(' \_ length of original data:', len(raw_data))
print(' \_ remaining:', raw_data[size:])
raw_data_abs = node_abs.to_bytes()
print(' \_ absorbed data:', repr(raw_data_abs), len(raw_data_abs))
# node_abs.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(len(raw_data), len(raw_data_abs))
self.assertEqual(raw_data, raw_data_abs)
node.unfreeze()
def test_encoding_attr(self):
enc_desc = \
{'name': 'enc',
'contents': [
{'name': 'data0',
'contents': String(values=['Plip', 'Plop'])},
{'name': 'crc',
'contents': CRC(vt=UINT32_be, after_encoding=False),
'node_args': ['enc_data', 'data2'],
'absorb_csts': AbsFullCsts(contents=False)},
{'name': 'enc_data',
'encoder': GZIP_Enc(6),
'set_attrs': NodeInternals.Abs_Postpone,
'contents': [
{'name': 'len',
'contents': LEN(vt=UINT8, after_encoding=False),
'node_args': 'data1',
'absorb_csts': AbsFullCsts(contents=False)},
{'name': 'data1',
'contents': String(values=['Test!', 'Hello World!'], codec='utf-16-le')},
]},
{'name': 'data2',
'contents': String(values=['Red', 'Green', 'Blue'])},
]}
mb = NodeBuilder()
node = mb.create_graph_from_desc(enc_desc)
node.set_env(Env())
node_abs = Node('abs', base_node=node, new_env=True)
node_abs.set_env(Env())
node.show()
print('\nData:')
print(node.to_bytes())
self.assertEqual(struct.unpack('B', node['enc/enc_data/len$'].to_bytes())[0],
len(node['enc/enc_data/data1$'].get_raw_value()))
raw_data = b'Plop\x8c\xd6/\x06x\x9cc\raHe(f(aPd\x00\x00\x0bv\x01\xc7Blue'
status, off, size, name = node_abs.absorb(raw_data, constraints=AbsFullCsts())
print('\nAbsorb Status:', status, off, size, name)
print(' \_ length of original data:', len(raw_data))
print(' \_ remaining:', raw_data[size:])
raw_data_abs = node_abs.to_bytes()
print(' \_ absorbed data:', repr(raw_data_abs), len(raw_data_abs))
node_abs.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(raw_data, raw_data_abs)
class TestNode_TypedValue(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def test_str_alphabet(self):
alphabet1 = 'ABC'
alphabet2 = 'NED'
alpha_desc = \
{'name': 'top',
'contents': [
{'name': 'alpha1',
'contents': String(min_sz=10, max_sz=100, values=['A' * 10], alphabet=alphabet1),
'set_attrs': [NodeInternals.Abs_Postpone]},
{'name': 'alpha2',
'contents': String(min_sz=10, max_sz=100, alphabet=alphabet2)},
{'name': 'end',
'contents': String(values=['END'])},
]}
mb = NodeBuilder()
node = mb.create_graph_from_desc(alpha_desc)
node.set_env(Env())
node_abs = Node('alpha_abs', base_node=node)
node_abs.set_env(Env())
node.show()
raw_data = node.to_bytes()
print(repr(raw_data), len(raw_data))
alphabet = alphabet1 + alphabet2
for l in raw_data:
if sys.version_info[0] > 2:
l = chr(l)
self.assertTrue(l in alphabet)
print('\n*** Test with following data:')
raw_data = b'A' * 10 + b'DNE' * 30 + b'E' * 10 + b'END'
print(repr(raw_data), len(raw_data))
status, off, size, name = node_abs.absorb(raw_data, constraints=AbsFullCsts())
print('Absorb Status:', status, off, size, name)
print(' \_ length of original data:', len(raw_data))
print(' \_ remaining:', raw_data[size:])
raw_data_abs = node_abs.to_bytes()
print(' \_ absorbed data:', repr(raw_data_abs), len(raw_data_abs))
node_abs.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(raw_data, raw_data_abs)
node_abs = Node('alpha_abs', base_node=node)
node_abs.set_env(Env())
print('\n*** Test with following INVALID data:')
raw_data = b'A' * 10 + b'DNE' * 20 + b'F' + b'END'
print(repr(raw_data), len(raw_data))
status, off, size, name = node_abs.absorb(raw_data, constraints=AbsFullCsts())
print('Absorb Status:', status, off, size, name)
print(' \_ length of original data:', len(raw_data))
print(' \_ remaining:', raw_data[size:])
raw_data_abs = node_abs.to_bytes()
print(' \_ absorbed data:', repr(raw_data_abs), len(raw_data_abs))
node_abs.show()
self.assertEqual(status, AbsorbStatus.Reject)
self.assertEqual(raw_data[size:], b'FEND')
def test_encoded_str_1(self):
class EncodedStr(String):
def encode(self, val):
return val + b'***'
def decode(self, val):
return val[:-3]
data = ['Test!', u'Hell\u00fc World!']
enc_desc = \
{'name': 'enc',
'contents': [
{'name': 'len',
'contents': LEN(vt=UINT8, after_encoding=False),
'node_args': 'user_data',
'absorb_csts': AbsFullCsts(contents=False)},
{'name': 'user_data',
'contents': EncodedStr(values=data, codec='utf8')},
{'name': 'compressed_data',
'contents': GZIP(values=data, encoding_arg=6)}
]}
mb = NodeBuilder()
node = mb.create_graph_from_desc(enc_desc)
node.set_env(Env())
node_abs = Node('enc_abs', base_node=node, new_env=True)
node_abs.set_env(Env())
node.show()
self.assertEqual(struct.unpack('B', node['enc/len$'].to_bytes())[0],
len(node['enc/user_data$'].get_raw_value()))
raw_data = b'\x0CHell\xC3\xBC World!***' + \
b'x\x9c\xf3H\xcd\xc9\xf9\xa3\x10\x9e_\x94\x93\xa2\x08\x00 \xb1\x04\xcb'
status, off, size, name = node_abs.absorb(raw_data, constraints=AbsFullCsts())
print('Absorb Status:', status, off, size, name)
print(' \_ length of original data:', len(raw_data))
print(' \_ remaining:', raw_data[size:])
raw_data_abs = node_abs.to_bytes()
print(' \_ absorbed data:', repr(raw_data_abs), len(raw_data_abs))
node_abs.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(raw_data, raw_data_abs)
msg = b'Hello World'
gsm_t = GSM7bitPacking(max_sz=20)
gsm_enc = gsm_t.encode(msg)
gsm_dec = gsm_t.decode(gsm_enc)
self.assertEqual(msg, gsm_dec)
msg = b'Hello World!'
gsm_enc = gsm_t.encode(msg)
gsm_dec = gsm_t.decode(gsm_enc)
self.assertEqual(msg, gsm_dec)
msg = b'H'
gsm_enc = gsm_t.encode(msg)
gsm_dec = gsm_t.decode(gsm_enc)
self.assertEqual(msg, gsm_dec)
# msg = u'où ça'.encode(internal_repr_codec) #' b'o\xf9 \xe7a'
# vtype = UTF16_LE(max_sz=20)
# enc = vtype.encode(msg)
# dec = vtype.decode(enc)
# self.assertEqual(msg, dec)
#
# msg = u'où ça'.encode(internal_repr_codec)
# vtype = UTF16_BE(max_sz=20)
# enc = vtype.encode(msg)
# dec = vtype.decode(enc)
# self.assertEqual(msg, dec)
#
# msg = u'où ça'.encode(internal_repr_codec)
# vtype = UTF8(max_sz=20)
# enc = vtype.encode(msg)
# dec = vtype.decode(enc)
# self.assertEqual(msg, dec)
#
# msg = u'où ça'.encode(internal_repr_codec)
# vtype = Codec(max_sz=20, encoding_arg=None)
# enc = vtype.encode(msg)
# dec = vtype.decode(enc)
# self.assertEqual(msg, dec)
#
# msg = u'où ça'.encode(internal_repr_codec)
# vtype = Codec(max_sz=20, encoding_arg='utf_32')
# enc = vtype.encode(msg)
# dec = vtype.decode(enc)
# self.assertEqual(msg, dec)
# utf32_enc = b"\xff\xfe\x00\x00o\x00\x00\x00\xf9\x00\x00\x00 " \
# b"\x00\x00\x00\xe7\x00\x00\x00a\x00\x00\x00"
# self.assertEqual(enc, utf32_enc)
msg = b'Hello World!' * 10
vtype = GZIP(max_sz=20)
enc = vtype.encode(msg)
dec = vtype.decode(enc)
self.assertEqual(msg, dec)
msg = b'Hello World!'
vtype = Wrapper(max_sz=20, encoding_arg=[b'<test>', b'</test>'])
enc = vtype.encode(msg)
dec = vtype.decode(enc)
self.assertEqual(msg, dec)
vtype = Wrapper(max_sz=20, encoding_arg=[b'<test>', None])
enc = vtype.encode(msg)
dec = vtype.decode(enc)
self.assertEqual(msg, dec)
vtype = Wrapper(max_sz=20, encoding_arg=[None, b'</test>'])
enc = vtype.encode(msg)
dec = vtype.decode(enc)
self.assertEqual(msg, dec)
def test_encoded_str_2(self):
enc_desc = \
{'name': 'enc',
'contents': [
{'name': 'len',
'contents': UINT8()},
{'name': 'user_data',
'sync_enc_size_with': 'len',
'contents': String(values=['TEST'], codec='utf8')},
{'name': 'padding',
'contents': String(max_sz=0),
'absorb_csts': AbsNoCsts()},
]}
mb = NodeBuilder()
node = mb.create_graph_from_desc(enc_desc)
node.set_env(Env())
node_abs = Node('enc_abs', base_node=node, new_env=True)
node_abs.set_env(Env())
node_abs2 = node_abs.get_clone()
node_abs.show()
raw_data = b'\x0C' + b'\xC6\x67' + b'garbage' # \xC6\x67 --> invalid UTF8
status, off, size, name = node_abs.absorb(raw_data, constraints=AbsNoCsts(size=True, struct=True))
self.assertEqual(status, AbsorbStatus.Reject)
raw_data = b'\x05' + b'\xC3\xBCber' + b'padding' # \xC3\xBC = ü in UTF8
status, off, size, name = node_abs2.absorb(raw_data, constraints=AbsNoCsts(size=True, struct=True))
print('Absorb Status:', status, off, size, name)
print(' \_ length of original data:', len(raw_data))
print(' \_ remaining:', raw_data[size:])
raw_data_abs = node_abs2.to_bytes()
print(' \_ absorbed data:', repr(raw_data_abs), len(raw_data_abs))
node_abs2.show()
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
class TestHLAPI(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def test_create_graph(self):
a = {'name': 'top',
'contents': [
{'weight': 2,
'contents': [
# block 1
{'section_type': MH.Ordered,
'duplicate_mode': MH.Copy,
'contents': [
{'contents': String(max_sz=10),
'name': 'val1',
'qty': (1, 5)},
{'name': 'val2'},
{'name': 'middle',
'custo_clear': MH.Custo.NTerm.MutableClone,
'custo_set': MH.Custo.NTerm.FrozenCopy,
'contents': [{
'section_type': MH.Ordered,
'contents': [
{'contents': String(values=['OK', 'KO'], size=2),
'name': 'val2'},
{'name': 'val21',
'clone': 'val1'},
{'name': 'USB_desc',
'import_from': 'usb',
'data_id': 'STR'},
{'type': MH.Leaf,
'contents': lambda x: x[0] + x[1],
'name': 'val22',
'node_args': ['val1', 'val3'],
'custo_set': MH.Custo.Func.FrozenArgs}
]}]},
{'contents': String(max_sz=10),
'name': 'val3',
'sync_qty_with': 'val1',
'alt': [
{'conf': 'alt1',
'contents': SINT8(values=[1, 4, 8])},
{'conf': 'alt2',
'contents': UINT16_be(min=0xeeee, max=0xff56),
'determinist': True}]}
]},
# block 2
{'section_type': MH.Pick,
'contents': [
{'contents': String(values=['PLIP', 'PLOP'], size=4),
'name': ('val21', 2)},
{'contents': SINT16_be(values=[-1, -3, -5, 7]),
'name': ('val22', 2)}
]}
]}
]}
mb = NodeBuilder(fmk.dm)
node = mb.create_graph_from_desc(a)
node.set_env(Env())
node.show()
node.unfreeze_all()
node.show()
node.unfreeze_all()
node.show()
node.reset_state(recursive=True)
node.set_current_conf('alt1', recursive=True)
node.show()
node.reset_state(recursive=True)
node.set_current_conf('alt2', recursive=True)
node.show()
print('\nNode Dictionnary (size: {:d}):\n'.format(len(mb.node_dico)))
for name, node in mb.node_dico.items():
print(name, ': ', repr(node), node.c)
class TestDataModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def test_data_container(self):
node = fmk.dm.get_external_atom(dm_name='mydf', data_id='exist_cond')
data = copy.copy(Data(node))
data = copy.copy(Data('TEST'))
@unittest.skipIf(not run_long_tests, "Long test case")
def test_data_makers(self):
for dm in fmk.dm_list:
try:
dm.load_data_model(fmk._name2dm)
except:
print("\n*** WARNING: Data Model '{:s}' not tested because" \
" the loading process has failed ***\n".format(dm.name))
raise
print("Test '%s' Data Model" % dm.name)
for data_id in dm.atom_identifiers():
print("Try to get '%s'" % data_id)
data = dm.get_atom(data_id)
data.get_value()
# data.show(raw_limit=200)
print('Success!')
@unittest.skipIf(not run_long_tests, "Long test case")
def test_data_model_specifics(self):
for dm in fmk.dm_list:
try:
dm.load_data_model(fmk._name2dm)
except:
print("\n*** WARNING: Data Model '{:s}' not tested because" \
" the loading process has failed ***\n".format(dm.name))
raise
print("Validating '{:s}' Data Model".format(dm.name))
ok = dm.validation_tests()
self.assertTrue(ok)
def test_generic_generators(self):
dm = fmk.get_data_model_by_name('mydf')
dm.load_data_model(fmk._name2dm)
for i in range(5):
d = dm.get_atom('off_gen')
d.show()
raw = d.to_bytes()
print(raw)
if sys.version_info[0] > 2:
retr_off = raw[-1]
else:
retr_off = struct.unpack('B', raw[-1])[0]
print('\nRetrieved offset is: %d' % retr_off)
int_idx = d['off_gen/body$'].get_subnode_idx(d['off_gen/body/int'])
off = int_idx * 3 + 10 # +10 for 'prefix' delta
self.assertEqual(off, retr_off)
@unittest.skipIf(ignore_data_model_specifics, "USB specific test cases")
def test_usb_specifics(self):
dm = fmk.get_data_model_by_name('usb')
dm.build_data_model()
msd_conf = dm.get_atom('CONF')
msd_conf.set_current_conf('MSD', recursive=True)
msd_conf.show()
self.assertEqual(len(msd_conf.to_bytes()), 32)
@unittest.skipIf(ignore_data_model_specifics, "PNG specific test cases")
def test_png_specifics(self):
dm = fmk.get_data_model_by_name('png')
dm.build_data_model()
png_dict = dm.import_file_contents(extension='png')
for n, png in png_dict.items():
png_buff = png.to_bytes()
png.show(raw_limit=400)
with open(gr.workspace_folder + 'TEST_FUZZING_' + n, 'wb') as f:
f.write(png_buff)
filename = os.path.join(dm.get_import_directory_path(), n)
with open(filename, 'rb') as orig:
orig_buff = orig.read()
if png_buff == orig_buff:
print("\n*** Builded Node ('%s') match the original image" % png.name)
else:
print("\n*** ERROR: Builded Node ('%s') does not match the original image!" % png.name)
self.assertEqual(png_buff, orig_buff)
@unittest.skipIf(ignore_data_model_specifics, "JPG specific test cases")
def test_jpg_specifics(self):
dm = fmk.get_data_model_by_name('jpg')
dm.build_data_model()
jpg_dict = dm.import_file_contents(extension='jpg')
for n, jpg in jpg_dict.items():
jpg_buff = jpg.to_bytes()
with open(gr.workspace_folder + 'TEST_FUZZING_' + n, 'wb') as f:
f.write(jpg_buff)
filename = os.path.join(dm.get_import_directory_path(), n)
with open(filename, 'rb') as orig:
orig_buff = orig.read()
if jpg_buff == orig_buff:
print("\n*** Builded Node ('%s') match the original image" % jpg.name)
else:
print("\n*** ERROR: Builded Node ('%s') does not match the original image!" % jpg.name)
print(' [original size={:d}, generated size={:d}]'.format(len(orig_buff), len(jpg_buff)))
self.assertEqual(jpg_buff, orig_buff)
@unittest.skipIf(ignore_data_model_specifics, "Tutorial specific test cases, cover various construction")
def test_tuto_specifics(self):
'''Tutorial specific test cases, cover various data model patterns and
absorption.'''
dm = fmk.get_data_model_by_name('mydf')
dm.load_data_model(fmk._name2dm)
data_id_list = ['misc_gen', 'len_gen', 'exist_cond', 'separator', 'AbsTest', 'AbsTest2',
'regex']
loop_cpt = 5
for data_id in data_id_list:
d = dm.get_atom(data_id)
for i in range(loop_cpt):
d_abs = dm.get_atom(data_id)
d_abs.set_current_conf('ABS', recursive=True)
d.show()
raw_data = d.to_bytes()
print('-----------------------')
print('Original Data:')
print(repr(raw_data))
print('-----------------------')
status, off, size, name = d_abs.absorb(raw_data, constraints=AbsFullCsts())
raw_data_abs = d_abs.to_bytes()
print('-----------------------')
print('Absorbed Data:')
print(repr(raw_data_abs))
print('-----------------------')
print('-----------------------')
print('Absorb Status: status=%s, off=%d, sz=%d, name=%s' % (status, off, size, name))
print(' \_ length of original data: %d' % len(raw_data))
print(' \_ remaining: %r' % raw_data[size:])
print('-----------------------')
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
self.assertEqual(raw_data, raw_data_abs)
d.unfreeze()
@unittest.skipIf(ignore_data_model_specifics, "ZIP specific test cases")
def test_zip_specifics(self):
dm = fmk.get_data_model_by_name('zip')
dm.build_data_model()
abszip = dm.get_atom('ZIP')
abszip.set_current_conf('ABS', recursive=True)
# We generate a ZIP file from the model only (no real ZIP file)
zip_buff = dm.get_atom('ZIP').to_bytes()
lg = len(zip_buff)
# dm.pkzip.show(raw_limit=400)
# dm.pkzip.reset_state(recursive=True)
status, off, size, name = abszip.absorb(zip_buff, constraints=AbsNoCsts(size=True, struct=True))
# abszip.show(raw_limit=400)
print('\n*** Absorb Status:', status, off, size, name)
print('*** Length of generated ZIP:', lg)
self.assertEqual(status, AbsorbStatus.FullyAbsorbed)
abs_buff = abszip.to_bytes()
if zip_buff == abs_buff:
print("\n*** Absorption of the generated node has worked!")
else:
print("\n*** ERROR: Absorption of the generated node has NOT worked!")
self.assertEqual(zip_buff, abs_buff)
# abszip.show()
flen_before = len(abszip['ZIP/file_list/file/data'].to_bytes())
print('file data len before: ', flen_before)
off_before = abszip['ZIP/cdir/cdir_hdr:2/file_hdr_off']
# Needed to avoid generated ZIP files that have less than 2 files.
if off_before is not None:
# Make modification of the ZIP and verify that some other ZIP
# fields are automatically updated
off_before = off_before.to_bytes()
print('offset before:', off_before)
csz_before = abszip['ZIP/file_list/file/header/common_attrs/compressed_size'].to_bytes()
print('compressed_size before:', csz_before)
abszip['ZIP/file_list/file/header/common_attrs/compressed_size'].set_current_conf('MAIN')
NEWVAL = b'TEST'
print(abszip['ZIP/file_list/file/data'].absorb(NEWVAL, constraints=AbsNoCsts()))
flen_after = len(abszip['ZIP/file_list/file/data'].to_bytes())
print('file data len after: ', flen_after)
abszip.unfreeze(only_generators=True)
abszip.get_value()
# print('\n******\n')
# abszip.show()
off_after = abszip['ZIP/cdir/cdir_hdr:2/file_hdr_off'].to_bytes()
print('offset after: ', off_after)
csz_after = abszip['ZIP/file_list/file/header/common_attrs/compressed_size'].to_bytes()
print('compressed_size after:', csz_after)
# Should not be equal in the general case
self.assertNotEqual(off_before, off_after)
# Should be equal in the general case
self.assertEqual(struct.unpack('<L', off_before)[0] - struct.unpack('<L', off_after)[0],
flen_before - flen_after)
self.assertEqual(struct.unpack('<L', csz_after)[0], len(NEWVAL))
zip_dict = dm.import_file_contents(extension='zip')
for n, pkzip in zip_dict.items():
zip_buff = pkzip.to_bytes()
# pkzip.show(raw_limit=400)
with open(gr.workspace_folder + 'TEST_FUZZING_' + n, 'wb') as f:
f.write(zip_buff)
filename = os.path.join(dm.get_import_directory_path(), n)
with open(filename, 'rb') as orig:
orig_buff = orig.read()
err_msg = "Some ZIP are not supported (those that doesn't store compressed_size" \
" in the file headers)"
if zip_buff == orig_buff:
print("\n*** Builded Node ('%s') match the original image" % pkzip.name)
else:
print("\n*** ERROR: Builded Node ('%s') does not match the original image!" % pkzip.name)
# print(err_msg)
self.assertEqual(zip_buff, orig_buff, msg=err_msg)
@ddt.ddt
class TestDataModelHelpers(unittest.TestCase):
@classmethod
def setUpClass(cls):
fmk.run_project(name='tuto', tg_ids=0, dm_name='mydf')
@ddt.data("HTTP_version_regex", ("HTTP_version_regex", 17), ("HTTP_version_regex", "whatever"))
def test_regex(self, regex_node_name):
HTTP_version_classic = \
{'name': 'HTTP_version_classic',
'contents': [
{'name': 'HTTP_name', 'contents': String(values=["HTTP"])},
{'name': 'slash', 'contents': String(values=["/"])},
{'name': 'major_version_digit', 'contents': String(size=1, values=["0", "1", "2", "3", "4",
"5", "6", "7", "8", "9"])},
{'name': '.', 'contents': String(values=["."])},
{'name': 'minor_version_digit', 'clone': 'major_version_digit'},
]}
HTTP_version_regex = \
{'name': regex_node_name, 'contents': "(HTTP)(/)(0|1|2|3|4|5|6|7|8|9)(\.)(0|1|2|3|4|5|6|7|8|9)"}
mb = NodeBuilder()
node_classic = mb.create_graph_from_desc(HTTP_version_classic)
node_classic.make_determinist(recursive=True)
mb = NodeBuilder()
node_regex = mb.create_graph_from_desc(HTTP_version_regex)
node_regex.make_determinist(recursive=True)
node_regex.show()
node_classic.show()
self.assertEqual(node_regex.to_bytes(), node_classic.to_bytes())
@ddt.data(('(HTTP)/[0-9]\.[0-9]|this|is|it[0123456789]', [5, 1, 2]),
('this|.is|it|[0123456789]', [1, 2, 1, 1]),
('|this|is|it[0123456789]|\dyes\-', [1, 2, 2]))
@ddt.unpack
def test_regex_shape(self, regexp, shapes):
revisited_HTTP_version = {'name': 'HTTP_version_classic', 'contents': regexp}
mb = NodeBuilder()
node = mb.create_graph_from_desc(revisited_HTTP_version)
excluded_idx = []
while True:
node_list, idx = node.cc._get_next_heavier_component(node.subnodes_order, excluded_idx=excluded_idx)
if len(node_list) == 0:
break
excluded_idx.append(idx)
print(node_list)
try:
idx = shapes.index(len(node_list[0][1]))
except ValueError:
print(len(node_list[0][1]))
self.fail()
else:
del shapes[idx]
self.assertEqual(len(shapes), 0)
def test_xml_helpers(self):
xml5_samples = [
'<?xml encoding="UTF-8" version="1.0" standalone="no"?>\n<command name="LOGIN">'
'\n<LOGIN backend="ssh" auth="cert">\n<msg_id>\n0\n</msg_id>\n<username>\nMyUser'
'\n</username>\n<password>\nplopi\n</password>\n</LOGIN>\n</command>',
'<?xml \t encoding="UTF-16" standalone="yes"\n version="7.9"?>\n <command name="LOGIN">'
'\n<LOGIN backend="ssh" auth="cert">\t \n<msg_id>\n56\n\t\n</msg_id>\n<username>\nMyUser'
'\n</username>\n<password>\nohohoh! \n</password>\n</LOGIN>\n</command>']
for idx, sample in enumerate(xml5_samples):
xml_atom = fmk.dm.get_atom('xml5')
status, off, size, name = xml_atom.absorb(sample, constraints=AbsFullCsts())
print('{:s} Absorb Status: {:d}, {:d}, {:s}'.format(status, off, size, name))
print(' \_ length of original data: {:d}'.format(len(sample)))
print(' \_ remaining: {!r}'.format(sample[size:size+1000]))
xml_atom.show()
assert status == AbsorbStatus.FullyAbsorbed
data_sizes = [211, 149, 184]
for i in range(100):
data = fmk.get_data(['XML5', ('tWALK', UI(path='xml5/command/start-tag/content/attr1/cmd_val'))])
if data is None:
break
assert len(data.to_bytes()) == data_sizes[i]
go_on = fmk.send_data_and_log([data])
if not go_on:
raise ValueError
else:
raise ValueError
assert i == 3
specific_cases_checked = False
for i in range(100):
data = fmk.get_data(['XML5', ('tTYPE', UI(path='xml5/command/LOGIN/start-tag/content/attr1/val'))])
if data is None:
break
node_to_check = data.content['xml5/command/LOGIN/start-tag/content/attr1/val']
if node_to_check.to_bytes() == b'None':
# one case should trigger this condition
specific_cases_checked = True
go_on = fmk.send_data_and_log([data])
if not go_on:
raise ValueError
else:
raise ValueError
assert i == 22, 'number of test cases: {:d}'.format(i)
assert specific_cases_checked
class TestFMK(unittest.TestCase):
@classmethod
def setUpClass(cls):
fmk.run_project(name='tuto', tg_ids=0, dm_name='mydf')
fmk.prj.reset_target_mappings()
def setUp(self):
fmk.reload_all(tg_ids=[0])
fmk.prj.reset_target_mappings()
def test_generic_disruptors_01(self):
dmaker_type = 'TESTNODE'
# fmk.cleanup_dmaker(dmaker_type=dmaker_type, reset_existing_seed=True)
gen_disruptors = fmk._generic_tactics.disruptor_types
print('\n-=[ GENERIC DISRUPTORS ]=-\n')
print(gen_disruptors)
for dis in gen_disruptors:
if dis in ['tCROSS']:
continue
print("\n\n---[ Tested Disruptor %r ]---" % dis)
if dis == 'EXT':
act = [dmaker_type, (dis, UI(cmd='/bin/cat', file_mode=True))]
d = fmk.get_data(act)
else:
act = [dmaker_type, dis]
d = fmk.get_data(act)
if d is not None:
fmk._log_data(d)
print("\n---[ Pretty Print ]---\n")
d.show()
fmk.cleanup_dmaker(dmaker_type=dmaker_type, reset_existing_seed=True)
else:
raise ValueError("\n***WARNING: the sequence {!r} returns {!r}!".format(act, d))
fmk.cleanup_all_dmakers(reset_existing_seed=True)
def test_separator_disruptor(self):
for i in range(100):
d = fmk.get_data(['SEPARATOR', 'tSEP'])
if d is None:
break
fmk._setup_new_sending()
fmk._log_data(d)
self.assertGreater(i, 2)
def test_struct_disruptor(self):
idx = 0
expected_idx = 6
expected_outcomes = [b'A1', b'A2', b'A3$ A32_VALID $', b'A3T\x0f\xa0\x00\n$ A32_VALID $',
b'A3T\x0f\xa0\x00\n*1*0*', b'A1']
expected_outcomes_24_alt = [b'A3$ A32_INVALID $', b'A3T\x0f\xa0\x00\n$ A32_INVALID $']
outcomes = []
act = [('EXIST_COND', UI(determinist=True)), 'tWALK', 'tSTRUCT']
for i in range(4):
for j in range(10):
d = fmk.get_data(act)
if d is None:
print('--> Exiting (need new input)')
break
fmk._setup_new_sending()
fmk._log_data(d)
outcomes.append(d.to_bytes())
d.show()
idx += 1
self.assertEqual(outcomes[:2], expected_outcomes[:2])
self.assertTrue(outcomes[2:4] == expected_outcomes[2:4] or outcomes[2:4] == expected_outcomes_24_alt)
self.assertEqual(outcomes[-2:], expected_outcomes[-2:])
self.assertEqual(idx, expected_idx)
print('\n****\n')
expected_idx = 10
idx = 0
act = [('SEPARATOR', UI(determinist=True)), ('tSTRUCT', UI(deep=True))]
for j in range(10):
d = fmk.get_data(act)
if d is None:
print('--> Exiting (need new input)')
break
fmk._setup_new_sending()
fmk._log_data(d)
outcomes.append(d.to_bytes())
d.show()
idx += 1
self.assertEqual(idx, expected_idx)
def test_typednode_disruptor(self):
idx = 0
expected_idx = 13
expected_outcomes = []
outcomes = []
act = ['OFF_GEN', ('tTYPE', UI(runs_per_node=1))]
for j in range(100):
d = fmk.get_data(act)
if d is None:
print('--> Exiting (need new input)')
break
fmk._setup_new_sending()
fmk._log_data(d)
outcomes.append(d.to_bytes())
d.show()
idx += 1
self.assertEqual(idx, expected_idx)
def test_operator_1(self):
fmk.reload_all(tg_ids=[7,8])
fmk.launch_operator('MyOp', user_input=UI(max_steps=100, mode=1))
last_data_id = max(fmk.lg._last_data_IDs.values())
print('\n*** Last data ID: {:d}'.format(last_data_id))
fmkinfo = fmk.fmkDB.execute_sql_statement(
"SELECT CONTENT FROM FMKINFO "
"WHERE DATA_ID == {data_id:d} "
"ORDER BY ERROR DESC;".format(data_id=last_data_id)
)
self.assertTrue(fmkinfo)
for info in fmkinfo:
if 'Exhausted data maker' in info[0]:
break
else:
raise ValueError('the data maker should be exhausted and trigger the end of the operator')
@unittest.skipIf(not run_long_tests, "Long test case")
def test_operator_2(self):
fmk.reload_all(tg_ids=[7,8])
myop = fmk.get_operator(name='MyOp')
fmk.launch_operator('MyOp')
fbk = fmk.feedback_gate.get_feedback_from(myop)[0]['content']
print(fbk)
self.assertIn(b'You win!', fbk)
fmk.launch_operator('MyOp')
fbk = fmk.feedback_gate.get_feedback_from(myop)[0]['content']
print(fbk)
self.assertIn(b'You loose!', fbk)
def test_scenario_infra_01a(self):
print('\n*** test scenario SC_NO_REGEN via _send_data()')
base_qty = 0
for i in range(100):
data = fmk.get_data(['SC_NO_REGEN'])
data_list = fmk._send_data([data]) # needed to make the scenario progress
if not data_list:
base_qty = i
break
else:
raise ValueError
err_list = fmk.get_error()
code_vector = [str(e) for e in err_list]
print('\n*** Retrieved error code vector: {!r}'.format(code_vector))
self.assertEqual(code_vector, ['DataUnusable', 'HandOver', 'DataUnusable', 'HandOver',
'DPHandOver', 'NoMoreData'])
self.assertEqual(base_qty, 55)
print('\n*** test scenario SC_AUTO_REGEN via _send_data()')
for i in range(base_qty * 3):
data = fmk.get_data(['SC_AUTO_REGEN'])
data_list = fmk._send_data([data])
if not data_list:
raise ValueError
@unittest.skipIf(not run_long_tests, "Long test case")
def test_scenario_infra_01b(self):
print('\n*** test scenario SC_NO_REGEN via send_data_and_log()')
# send_data_and_log() is used to stimulate the framework in more places.
base_qty = 0
for i in range(100):
data = fmk.get_data(['SC_NO_REGEN'])
go_on = fmk.send_data_and_log([data])
if not go_on:
base_qty = i
break
else:
raise ValueError
err_list = fmk.get_error()
code_vector = [str(e) for e in err_list]
full_code_vector = [(str(e), e.msg) for e in err_list]
print('\n*** Retrieved error code vector: {!r}'.format(full_code_vector))
self.assertEqual(code_vector, ['DataUnusable', 'HandOver', 'DataUnusable', 'HandOver',
'DPHandOver', 'NoMoreData'])
self.assertEqual(base_qty, 55)
print('\n*** test scenario SC_AUTO_REGEN via send_data_and_log()')
for i in range(base_qty * 3):
data = fmk.get_data(['SC_AUTO_REGEN'])
go_on = fmk.send_data_and_log([data])
if not go_on:
raise ValueError
@unittest.skipIf(not run_long_tests, "Long test case")
def test_scenario_infra_02(self):
fmk.reload_all(tg_ids=[1]) # to collect feedback from monitoring probes
fmk.prj.reset_target_mappings()
fmk.prj.map_targets_to_scenario('ex1', {0: 1, 1: 1, None: 1})
fmk.prj.map_targets_to_scenario('ex2', {0: 1, 1: 1, None: 1})
print('\n*** Test scenario EX1')
data = None
prev_data = None
now = datetime.datetime.now()
for i in range(10):
prev_data = data
data = fmk.get_data(['SC_EX1'])
ok = fmk.send_data_and_log([data]) # needed to make the scenario progress
if not ok:
raise ValueError
exec_time = (datetime.datetime.now() - now).total_seconds()
self.assertEqual(prev_data.to_bytes(), data.to_bytes())
self.assertGreater(exec_time, 5)
print('\n\n*** Test SCENARIO EX2 ***\n\n')
data = None
steps = []
for i in range(4):
data = fmk.get_data(['SC_EX2'])
if i == 3:
self.assertTrue(data is None)
if data is not None:
steps.append(data.origin.current_step)
ok = fmk.send_data_and_log([data]) # needed to make the scenario progress
if not ok:
raise ValueError
if i == 0:
self.assertTrue(bool(fmk._task_list))
for idx, s in enumerate(steps):
print('\n[{:d}]-----'.format(idx))
print(s)
print('-----')
self.assertNotEqual(steps[-1], steps[-2])
self.assertFalse(bool(fmk._task_list))
def test_scenario_infra_03(self):
steps = []
for i in range(6):
data = fmk.get_data(['SC_EX3'])
steps.append(data.origin.current_step)
ok = fmk.send_data_and_log([data]) # needed to make the scenario progress
if not ok:
raise ValueError
for idx, s in enumerate(steps):
print('\n[{:d}]-----'.format(idx))
print(s)
print('-----')
self.assertEqual(steps[3], steps[5])
self.assertNotEqual(steps[5], steps[1])
self.assertEqual(steps[2], steps[4])
self.assertEqual(steps[0], steps[2])
def test_scenario_infra_04(self):
def walk_scenario(name, iter_num):
print('\n===== run scenario {:s} ======\n'.format(name))
steps = []
scenario = None
for i in range(iter_num):
data = fmk.get_data([name])
if i == 1:
scenario = data.origin
steps.append(data.origin.current_step)
ok = fmk.send_data_and_log([data]) # needed to make the scenario progress
if not ok:
raise ValueError
for idx, s in enumerate(steps):
print('\n[{:d}]-----'.format(idx))
print(s)
print('-----')
return scenario, steps
scenario, steps = walk_scenario('SC_TEST', 4)
print('\n++++ env.cbk_true_cpt={:d} | env.cbk_false_cpt={:d}'
.format(scenario.env.cbk_true_cpt, 0))
self.assertEqual(steps[0], steps[-1])
self.assertEqual(scenario.env.cbk_true_cpt, 2)
self.assertEqual(str(steps[-2]), '4TG1')
scenario, steps = walk_scenario('SC_TEST2', 2)
print('\n++++ env.cbk_true_cpt={:d} | env.cbk_false_cpt={:d}'
.format(scenario.env.cbk_true_cpt, 0))
# self.assertEqual(steps[0], steps[-1])
self.assertEqual(scenario.env.cbk_true_cpt, 1)
self.assertEqual(str(steps[-1]), '4TG1')
scenario, steps = walk_scenario('SC_TEST3', 2)
print('\n++++ env.cbk_true_cpt={:d} | env.cbk_false_cpt={:d}'
.format(scenario.env.cbk_true_cpt, 0))
# self.assertEqual(steps[0], steps[-1])
self.assertEqual(scenario.env.cbk_true_cpt, 2)
self.assertEqual(str(steps[-1]), '4TG1')
scenario, steps = walk_scenario('SC_TEST4', 2)
print('\n++++ env.cbk_true_cpt={:d} | env.cbk_false_cpt={:d}'
.format(scenario.env.cbk_true_cpt, scenario.env.cbk_false_cpt))
# self.assertEqual(steps[0], steps[-1])
self.assertEqual(scenario.env.cbk_true_cpt, 1)
self.assertEqual(scenario.env.cbk_false_cpt, 4)
self.assertEqual(str(steps[-1]), '4DEFAULT')
| k0retux/fuddly | test/integration/test_integration.py | Python | gpl-3.0 | 147,040 |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectUD import DistributedObjectUD
class DistributedDeliveryManagerUD(DistributedObjectUD):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedDeliveryManagerUD")
def hello(self, todo0):
pass
def rejectHello(self, todo0):
pass
def helloResponse(self, todo0):
pass
def getName(self, todo0):
pass
def receiveRejectGetName(self, todo0):
pass
def receiveAcceptGetName(self, todo0):
pass
def addName(self, todo0, todo1):
pass
def receiveRejectAddName(self, todo0):
pass
def receiveAcceptAddName(self, todo0):
pass
def addGift(self, todo0, todo1, todo2, todo3, todo4):
pass
def receiveRejectAddGift(self, todo0):
pass
def receiveAcceptAddGift(self, todo0, todo1, todo2, todo3):
pass
def deliverGifts(self, todo0, todo1):
pass
def receiveAcceptDeliverGifts(self, todo0, todo1):
pass
def receiveRejectDeliverGifts(self, todo0, todo1):
pass
def receiveRequestPayForGift(self, todo0, todo1, todo2):
pass
def receiveRequestPurchaseGift(self, todo0, todo1, todo2, todo3):
pass
def receiveAcceptPurchaseGift(self, todo0, todo1, todo2):
pass
def receiveRejectPurchaseGift(self, todo0, todo1, todo2, todo3):
pass
def heartbeat(self):
pass
def giveBeanBonus(self, todo0, todo1):
pass
def requestAck(self):
pass
def returnAck(self):
pass
def givePartyRefund(self, todo0, todo1, todo2, todo3, todo4):
pass
| silly-wacky-3-town-toon/SOURCE-COD | toontown/uberdog/DistributedDeliveryManagerUD.py | Python | apache-2.0 | 1,707 |
import warnings
import numpy as np
from copy import deepcopy as copy
from .environ import environ
from .tensor import VOIGT
from .deformation import update_deformation
def d_from_prescribed_stress(func, t, dt, temp, dtemp, f0, f,
stran, d, sig, ufield, dufield, statev, v, sigspec):
'''Determine the symmetric part of the velocity gradient given stress
Parameters
----------
func : callable
t, dt : float
temp, dtemp : float
f0, f : ndarray
stran, d, sig : ndarray
statev : ndarray
v : ndarray
sigspec : ndarray
Returns
-------
d : ndarray
The symmetric part of the velocity gradient
Notes
-----
The strain `stran` and rate of deformation `d` are assumed to be their
engineering representations (i.e., off-diagonals multiplied by 2)
Approach
--------
Seek to determine the unknown components of the symmetric part of
velocity gradient d[v] satisfying
P(d[v]) = Ppres[:] (1)
where P is the current stress, d the symmetric part of the velocity
gradient, v is a vector subscript array containing the components for
which stresses (or stress rates) are prescribed, and Ppres[:] are the
prescribed values at the current time.
Solution is found iteratively in (up to) 3 steps
1) Call newton to solve 1, return stress, statev, d if converged
2) Call newton with d[v] = 0. to solve 1, return stress, statev, d
if converged
3) Call simplex with d[v] = 0. to solve 1, return stress, statev, d
'''
dsave = d.copy()
d = newton(func, t, dt, temp, dtemp, f0, f, stran, d,
sig, ufield, dufield, statev, v, sigspec)
if d is not None:
return d
# --- didn't converge, try Newton's method with initial
# --- d[v]=0.
d = dsave.copy()
d[v] = np.zeros(len(v))
d = newton(func, t, dt, temp, dtemp, f0, f, stran, d,
sig, ufield, dufield, statev, v, sigspec)
if d is not None:
return d
# --- Still didn't converge. Try downhill simplex method and accept
# whatever answer it returns:
d = dsave.copy()
return simplex(func, t, dt, temp, dtemp, f0, f, stran, d,
sig, ufield, dufield, statev, v, sigspec)
def newton(func, t, dt, temp, dtemp, f0, farg, stran, darg,
sigarg, ufield, dufield, statev_arg, v, sigspec):
'''Seek to determine the unknown components of the symmetric part of velocity
gradient d[v] satisfying
sig(d[v]) = sigspec
where sig is the current stress, d the symmetric part of the velocity
gradient, v is a vector subscript array containing the components for
which stresses (or stress rates) are prescribed, and sigspec are the
prescribed values at the current time.
Parameters
----------
func : instance
constiutive model instance
dt : float
time step
sig : ndarray
stress at beginning of step
statev_arg : ndarray
state dependent variables at beginning of step
v : ndarray
vector subscript array containing the components for which
stresses (or stress rates) are specified
sigspec : ndarray
Prescribed stress
Returns
-------
d : ndarray || None
If converged, the symmetric part of the velocity gradient, else None
Notes
-----
The approach is an iterative scheme employing a multidimensional Newton's
method. Each iteration begins with a call to subroutine jacobian, which
numerically computes the Jacobian submatrix
Js = J[v, v]
where J[:,;] is the full Jacobian matrix J = dsig/deps. The value of
d[v] is then updated according to
d[v] = d[v] - Jsi*sigerr(d[v])/dt
where
sigerr(d[v]) = sig(d[v]) - sigspec
The process is repeated until a convergence critierion is satisfied. The
argument converged is a flag indicat- ing whether or not the procedure
converged:
'''
depsmag = lambda a: np.sqrt(sum(a[:3] ** 2) + 2. * sum(a[3:] ** 2)) * dt
# Initialize
eps = np.finfo(np.float).eps
tol1, tol2 = eps, np.sqrt(eps)
maxit1, maxit2, depsmax = 20, 30, .2
sig = sigarg.copy()
d = darg.copy()
f = farg.copy()
statev = copy(statev_arg)
sigsave = sig.copy()
statev_save = copy(statev)
# --- Check if strain increment is too large
if (depsmag(d) > depsmax):
return None
# update the material state to get the first guess at the new stress
sig, statev, stif = func(0, t, dt, temp, dtemp, f0, f, stran, d, sig,
ufield, dufield, statev)
sigerr = sig[v] - sigspec
# --- Perform Newton iteration
for i in range(maxit2):
sig = sigsave.copy()
statev = copy(statev_save)
stif = func(0, t, dt, temp, dtemp, f0, f, stran, d, sig,
ufield, dufield, statev)[2]
if stif is None:
# material models without an analytic jacobian send the Jacobian
# back as None so that it is found numerically here. Likewise, we
# find the numerical jacobian for visco materials - otherwise we
# would have to convert the the stiffness to that corresponding to
# the Truesdell rate, pull it back to the reference frame, apply
# the visco correction, push it forward, and convert to Jaummann
# rate. It's not as trivial as it sounds...
statev = copy(statev_save)
stif = numerical_jacobian(func, t, dt, temp, dtemp, f0, f,
stran, d, sig, ufield, dufield, statev, v)
else:
stif = stif[[[i] for i in v], v]
if environ.SQA:
try:
evals = np.linalg.eigvalsh(stif)
except np.linalg.LinAlgError:
raise RuntimeError('failed to determine elastic '
'stiffness eigenvalues')
else:
if np.any(evals < 0.):
negevals = evals[np.where(evals < 0.)]
warnings.warn('negative eigen value[s] encountered '
'in material Jacobian: '
'{0} ({1:.2f})'.format(negevals, t))
try:
d[v] -= np.linalg.solve(stif, sigerr) / dt
except np.linalg.LinAlgError:
if environ.SQA:
warnings.warn('using least squares approximation to '
'matrix inverse')
d[v] -= np.linalg.lstsq(stif, sigerr)[0] / dt
if (depsmag(d) > depsmax or np.any(np.isnan(d)) or np.any(np.isinf(d))):
# increment too large
return None
# with the updated rate of deformation, update stress and check
sig = sigsave.copy()
statev = copy(statev_save)
fp, ep = update_deformation(f, d, dt, 0)
sig, statev, stif = func(0, t, dt, temp, dtemp, f0, fp, ep, d, sig,
ufield, dufield, statev)
sigerr = sig[v] - sigspec
dnom = max(np.amax(np.abs(sigspec)), 1.)
relerr = np.amax(np.abs(sigerr) / dnom)
if i <= maxit1 and relerr < tol1:
return d
elif i > maxit1 and relerr < tol2:
return d
continue
# didn't converge, restore restore data and exit
return None
def simplex(func, t, dt, temp, dtemp, f0, farg, stran, darg, sigarg,
ufield, dufield, statev_arg, v, sigspec):
'''Perform a downhill simplex search to find sym_velgrad[v] such that
sig(sym_velgrad[v]) = sigspec[v]
Parameters
----------
func : callable
Function to evaluate material
dt : float
time step
sig : ndarray
stress at beginning of step
statev_arg : ndarray
state dependent variables at beginning of step
v : ndarray
vector subscript array containing the components for which
stresses (or stress rates) are specified
sigspec : ndarray
Prescribed stress
Returns
-------
d : ndarray
the symmetric part of the velocity gradient
'''
# --- Perform the simplex search
import scipy.optimize
d = darg.copy()
f = farg.copy()
sig = sigarg.copy()
statev = copy(statev_arg)
args = (func, t, dt, temp, dtemp, f0, f, stran, d,
sig, ufield, dufield, statev, v, sigspec)
d[v] = scipy.optimize.fmin(_func, d[v], args=args, maxiter=20, disp=False)
return d
def _func(x, func, t, dt, temp, dtemp, f0, farg, stran, darg,
sigarg, ufield, dufield, statev_arg, v, sigspec):
'''Objective function to be optimized by simplex
'''
d = darg.copy()
f = farg.copy()
sig = sigarg.copy()
statev = copy(statev_arg)
# initialize
d[v] = x
fp, ep = update_deformation(f, d, dt, 0)
# store the best guesses
sig, statev, stif = func(0, t, dt, temp, dtemp, f0, fp, ep, d, sig,
ufield, dufield, statev)
# check the error
error = 0.
for i, j in enumerate(v):
error += (sig[j] - sigspec[i]) ** 2
continue
return error
def numerical_jacobian(func, time, dtime, temp, dtemp, F0, F, stran, d,
stress, ufield, dufield, statev, v):
'''Numerically compute material Jacobian by a centered difference scheme.
Parameters
----------
time : float
Time at beginning of step
dtime : float
Time step length. `time+dtime` is the time at the end of the step
temp : float
Temperature at beginning of step
dtemp : float
Temperature increment. `temp+dtemp` is the temperature at the end
of the step
F0, F : ndarray
Deformation gradient at the beginning and end of the step
strain : ndarray
Strain at the beginning of the step
d : ndarray
Symmetric part of the velocity gradient at the middle of the step
stress : ndarray
Stress at the beginning of the step
statev : ndarray
State variables at the beginning of the step
v : ndarray
Array of subcomponents of Jacobian to return
Returns
-------
Js : array_like
Jacobian of the deformation J = dsig / dE
Notes
-----
The submatrix returned is the one formed by the intersections of the
rows and columns specified in the vector subscript array, v. That is,
Js = J[v, v]. The physical array containing this submatrix is
assumed to be dimensioned Js[nv, nv], where nv is the number of
elements in v. Note that in the special case v = [1,2,3,4,5,6], with
nv = 6, the matrix that is returned is the full Jacobian matrix, J.
The components of Js are computed numerically using a centered
differencing scheme which requires two calls to the material model
subroutine for each element of v. The centering is about the point eps
= epsold + d * dt, where d is the rate-of-strain array.
History
-------
This subroutine is a python implementation of a routine by the same
name in Tom Pucick's MMD driver.
Authors
-------
Tom Pucick, original fortran implementation in the MMD driver
Tim Fuller, Sandial National Laboratories, [email protected]
'''
# local variables
nv = len(v)
deps = np.sqrt(np.finfo(np.float64).eps)
Jsub = np.zeros((nv, nv))
dtime = 1 if dtime < 1.e-12 else dtime
for i in range(nv):
# perturb forward
Dp = d.copy()
Dp[v[i]] = d[v[i]] + (deps / dtime) / 2.
Fp, Ep = update_deformation(F, Dp, dtime, 0)
sigp = stress.copy()
xp = copy(statev)
sigp = func(0, time, dtime, temp, dtemp, F0, Fp, Ep, Dp, sigp,
ufield, dufield, xp)[0]
# perturb backward
Dm = d.copy()
Dm[v[i]] = d[v[i]] - (deps / dtime) / 2.
Fm, Em = update_deformation(F, Dm, dtime, 0)
sigm = stress.copy()
xm = copy(statev)
sigm = func(0, time, dtime, temp, dtemp, F0, Fm, Em, Dm, sigm,
ufield, dufield, xm)[0]
# compute component of jacobian
Jsub[i, :] = (sigp[v] - sigm[v]) / deps
continue
return Jsub
| matmodlab/matmodlab2 | matmodlab2/core/stress_control.py | Python | bsd-3-clause | 12,427 |
#!/usr/bin/env python
"""
@package mi.core.instrument.wrapper
@file mi/core/instrument/wrapper.py
@author Peter Cable
@brief Driver process using ZMQ messaging.
Usage:
run_driver <module> <driver_class> <refdes> <event_url> <particle_url>
run_driver <module> <driver_class> <refdes> <event_url> <particle_url> <config_file>
Options:
-h, --help Show this screen.
"""
import base64
import importlib
import json
import os
import signal
import threading
import time
import yaml
import zmq
from docopt import docopt
from logging import _levelNames
from mi.core.common import BaseEnum
from mi.core.exceptions import UnexpectedError, InstrumentCommandException, InstrumentException
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.publisher import Publisher
from mi.core.log import get_logger, get_logging_metaclass
from mi.core.service_registry import ConsulServiceRegistry
log = get_logger()
META_LOGGER = get_logging_metaclass('trace')
__author__ = 'Peter Cable'
__license__ = 'Apache 2.0'
class Commands(BaseEnum):
STOP_DRIVER = 'stop_driver_process'
TEST_EVENTS = 'test_events'
PING = 'process_echo'
OVERALL_STATE = 'overall_state'
STOP_WORKER = 'stop_worker'
DEFAULT = 'default'
SET_LOG_LEVEL = 'set_log_level'
class EventKeys(BaseEnum):
TIME = 'time'
TYPE = 'type'
VALUE = 'value'
COMMAND = 'cmd'
ARGS = 'args'
KWARGS = 'kwargs'
# semaphore to prevent multiple simultaneous commands into the driver
COMMAND_SEM = threading.BoundedSemaphore(1)
def encode_exception(exception):
if not isinstance(exception, InstrumentException):
exception = UnexpectedError("%s('%s')" % (exception.__class__.__name__, exception.message))
return exception.get_triple()
def _decode(data):
if isinstance(data, (list, tuple)):
return [_decode(x) for x in data]
if isinstance(data, dict):
return {_decode(k): _decode(v) for k, v in data.iteritems()}
if isinstance(data, basestring):
return data.decode('utf-8', 'ignore')
return data
def _transform(value):
flag = '_base64:'
if isinstance(value, basestring):
if value.startswith(flag):
data = value.split(flag, 1)[1]
return base64.b64decode(data)
return value
elif isinstance(value, (list, tuple)):
return [_transform(x) for x in value]
elif isinstance(value, dict):
return {k: _transform(value[k]) for k in value}
return value
def build_event(event_type, value, command=None, args=None, kwargs=None):
event = {
EventKeys.TIME: time.time(),
EventKeys.TYPE: event_type,
EventKeys.VALUE: value
}
if any((command, args, kwargs)):
event[EventKeys.COMMAND] = {
EventKeys.COMMAND: command,
EventKeys.ARGS: args,
EventKeys.KWARGS: kwargs
}
return event
class CommandHandler(threading.Thread):
def __init__(self, wrapper, worker_url):
super(CommandHandler, self).__init__()
self.wrapper = wrapper
self.driver = wrapper.driver
self.send_event = wrapper.send_event
self.worker_url = worker_url
self._stop = False
self._routes = {
Commands.SET_LOG_LEVEL: self._set_log_level,
Commands.OVERALL_STATE: self._overall_state,
Commands.PING: self._ping,
Commands.TEST_EVENTS: self._test_events,
Commands.STOP_DRIVER: self._stop_driver,
Commands.STOP_WORKER: self._stop_worker,
}
def _execute(self, raw_command, raw_args, raw_kwargs):
# check for b64 encoded values
# decode them prior to processing this command
command = _transform(raw_command)
args = _transform(raw_args)
kwargs = _transform(raw_kwargs)
# lookup the function to be executed
_func = self._routes.get(command, self._send_command)
# ensure args is iterable
if not isinstance(args, (list, tuple)):
args = (args,)
# Attempt to execute this command
try:
reply = _func(command, *args, **kwargs)
event_type = DriverAsyncEvent.RESULT
except Exception as e:
log.error('Exception in command handler: %r', e)
reply = encode_exception(e)
event_type = DriverAsyncEvent.ERROR
# Build the response event. Use the raw values, if something was
# base64 encoded, we may not be able to send the decoded value back raw
event = build_event(event_type, reply, raw_command, raw_args, raw_kwargs)
log.trace('CommandHandler generated event: %r', event)
return event
def _set_log_level(self, *args, **kwargs):
level_name = kwargs.get('level')
level = None
if isinstance(level_name, int):
if level_name in _levelNames:
level = level_name
elif isinstance(level_name, basestring):
level_name = level_name.upper()
level = _levelNames.get(level_name)
if level is None:
raise UnexpectedError('Invalid logging level supplied')
log.setLevel(level)
return 'Set logging level to %s' % level
def _test_events(self, *args, **kwargs):
events = kwargs['events']
if type(events) not in (list, tuple):
events = [events]
for e in events:
self.send_event(e)
return 'Enqueued test events'
def _stop_driver(self, *args, **kwargs):
self.wrapper.stop_messaging()
return 'Stopped driver process'
def _stop_worker(self, *args, **kwargs):
self._stop = True
return 'Stopping worker thread'
def _ping(self, *args, **kwargs):
return 'ping from wrapper pid:%s, resource:%s' % (os.getpid(), self.driver)
def _overall_state(self, *args, **kwargs):
direct_config = {}
if hasattr(self.driver, 'get_direct_config'):
direct_config = self.driver.get_direct_config()
return {'capabilities': self.driver.get_resource_capabilities(),
'state': self.driver.get_resource_state(),
'metadata': self.driver.get_config_metadata(),
'parameters': self.driver.get_cached_config(),
'direct_config': direct_config,
'init_params': self.driver.get_init_params()}
def _send_command(self, command, *args, **kwargs):
if not COMMAND_SEM.acquire(False):
return 'BUSY'
try:
cmd_func = getattr(self.driver, command, None)
if cmd_func and callable(cmd_func):
reply = cmd_func(*args, **kwargs)
else:
raise InstrumentCommandException('Unknown driver command.')
return reply
finally:
COMMAND_SEM.release()
def cmd_driver(self, msg):
"""
This method should NEVER throw an exception, as this will break the event loop
"""
log.debug('executing command: %s', msg)
command = msg.get(EventKeys.COMMAND, '')
args = msg.get(EventKeys.ARGS, ())
kwargs = msg.get(EventKeys.KWARGS, {})
return self._execute(command, args, kwargs)
def run(self):
"""
Await commands on a ZMQ REP socket, forwarding them to the
driver for processing and returning the result.
"""
context = zmq.Context.instance()
sock = context.socket(zmq.REQ)
sock.connect(self.worker_url)
sock.send('READY')
address = None
while not self._stop:
try:
address, _, request = sock.recv_multipart()
msg = json.loads(request)
log.info('received message: %r', msg)
reply = _decode(self.cmd_driver(msg))
sock.send_multipart([address, '', json.dumps(reply)])
except zmq.ContextTerminated:
log.info('ZMQ Context terminated, exiting worker thread')
break
except zmq.ZMQError:
# If we have an error on the socket we'll need to restart it
sock = context.socket(zmq.REQ)
sock.connect(self.worker_url)
sock.send('READY')
except Exception as e:
log.error('Exception in command loop: %r', e)
if address is not None:
event = build_event(DriverAsyncEvent.ERROR, repr(e))
sock.send_multipart([address, '', event])
sock.close()
class LoadBalancer(object):
"""
The load balancer creates two router connections.
Workers and clients create REQ sockets to connect. A worker will
send 'READY' upon initialization and subsequent "requests" will be
the results from the previous command.
"""
def __init__(self, wrapper, num_workers, worker_url='inproc://workers'):
self.wrapper = wrapper
self.num_workers = num_workers
self.worker_url = worker_url
self.context = zmq.Context.instance()
self.frontend = self.context.socket(zmq.ROUTER)
self.backend = self.context.socket(zmq.ROUTER)
self.port = self.frontend.bind_to_random_port('tcp://*')
self.backend.bind(worker_url)
self._start_workers()
self.running = True
def run(self):
workers = []
poller = zmq.Poller()
poller.register(self.backend, zmq.POLLIN)
while self.running:
try:
sockets = dict(poller.poll(100))
if self.backend in sockets:
request = self.backend.recv_multipart()
worker, _, client = request[:3]
if not workers:
poller.register(self.frontend, zmq.POLLIN)
workers.append(worker)
if client != 'READY' and len(request) > 3:
_, reply = request[3:]
self.frontend.send_multipart([client, '', reply])
if self.frontend in sockets:
client, _, request = self.frontend.recv_multipart()
worker = workers.pop(0)
self.backend.send_multipart([worker, '', client, '', request])
if not workers:
poller.unregister(self.frontend)
except zmq.ContextTerminated:
log.info('ZMQ Context terminated, exiting load balancer loop')
break
def _start_workers(self):
for _ in xrange(self.num_workers):
t = CommandHandler(self.wrapper, self.worker_url)
t.setDaemon(True)
t.start()
def stop(self):
self.running = False
class DriverWrapper(object):
"""
Base class for messaging enabled OS-level driver processes. Provides
run loop, dynamic driver import and construction and interface
for messaging implementation subclasses.
"""
__metaclass__ = META_LOGGER
worker_url = "inproc://workers"
num_workers = 5
def __init__(self, driver_module, driver_class, refdes, event_url, particle_url, init_params):
"""
@param driver_module The python module containing the driver code.
@param driver_class The python driver class.
"""
self.driver_module = driver_module
self.driver_class = driver_class
self.refdes = refdes
self.event_url = event_url
self.particle_url = particle_url
self.driver = None
self.messaging_started = False
self.int_time = 0
self.port = None
self.init_params = init_params
self.load_balancer = None
self.status_thread = None
self.particle_count = 0
self.version = self.get_version(driver_module)
headers = {'sensor': self.refdes, 'deliveryType': 'streamed', 'version': self.version, 'module': driver_module}
log.info('Publish headers set to: %r', headers)
self.event_publisher = Publisher.from_url(self.event_url, headers)
self.particle_publisher = Publisher.from_url(self.particle_url, headers)
@staticmethod
def get_version(driver_module):
module = importlib.import_module(driver_module)
dirname = os.path.dirname(module.__file__)
metadata_file = os.path.join(dirname, 'metadata.yml')
if os.path.exists(metadata_file):
metadata = yaml.load(open(metadata_file))
return metadata.get('driver_metadata', {}).get('version')
return 'UNVERSIONED'
def construct_driver(self):
"""
Attempt to import and construct the driver object based on
configuration.
@retval True if successful, False otherwise.
"""
module = importlib.import_module(self.driver_module)
driver_class = getattr(module, self.driver_class)
self.driver = driver_class(self.send_event, self.refdes)
self.driver.set_init_params(self.init_params)
log.info('Imported and created driver from module: %r class: %r driver: %r refdes: %r',
module, driver_class, self.driver, self.refdes)
return True
def send_event(self, evt):
"""
Append an event to the list to be sent by the event thread.
"""
if isinstance(evt[EventKeys.VALUE], Exception):
evt[EventKeys.VALUE] = encode_exception(evt[EventKeys.VALUE])
if evt[EventKeys.TYPE] == DriverAsyncEvent.ERROR:
log.error(evt)
if evt[EventKeys.TYPE] == DriverAsyncEvent.SAMPLE:
if evt[EventKeys.VALUE].get('stream_name') == 'raw':
# don't publish raw
return
self.particle_publisher.enqueue(evt)
else:
self.event_publisher.enqueue(evt)
def run(self):
"""
Process entry point. Construct driver and start messaging loops.
Periodically check messaging is going and parent exists if
specified.
"""
log.info('Driver process started.')
# noinspection PyUnusedLocal
def shand(signum, frame):
self.stop_messaging()
signal.signal(signal.SIGINT, shand)
if self.driver is not None or self.construct_driver():
self.start_threads()
def start_threads(self):
"""
Initialize and start messaging resources for the driver, blocking
until messaging terminates. This ZMQ implementation starts and
joins command and event threads, managing nonblocking send/recv calls
on REP and PUB sockets, respectively. Terminate loops and close
sockets when stop flag is set in driver process.
"""
self.event_publisher.start()
self.particle_publisher.start()
self.load_balancer = LoadBalancer(self, self.num_workers)
self.port = self.load_balancer.port
# now that we have a port, start our status thread
self.status_thread = ConsulServiceRegistry.create_health_thread(self.refdes, self.port)
self.status_thread.setDaemon(True)
self.status_thread.start()
self.load_balancer.run()
def stop_messaging(self):
"""
Close messaging resource for the driver. Set flags to cause
command and event threads to close sockets and conclude.
"""
self.load_balancer.stop()
self.status_thread.stop()
self.event_publisher.stop()
self.particle_publisher.stop()
def main():
options = docopt(__doc__)
module = options['<module>']
event_url = options['<event_url>']
particle_url = options['<particle_url>']
klass = options.get('<driver_class>')
refdes = options['<refdes>']
config_file = options['<config_file>']
if config_file is not None:
init_params = yaml.load(open(config_file))
else:
init_params = {}
wrapper = DriverWrapper(module, klass, refdes, event_url, particle_url, init_params)
wrapper.run()
if __name__ == '__main__':
main()
| janeen666/mi-instrument | mi/core/instrument/wrapper.py | Python | bsd-2-clause | 16,160 |
# data.world-py
# Copyright 2017 data.world, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# This product includes software developed at
# data.world, Inc.(http://data.world/).
import configparser
import os
import re
import tempfile
from os import path
import six
class DefaultConfig(object):
"""Base class for configuration objects
This class defines default values and establishes the contract for
sub-classes.
Attributes
----------
auth_token : str
API token for access to data.world.
tmp_dir : str
Path of temporary directory, where temporary files are created.
cache_dir : str
Path of dataset cache directory, where downloaded datasets are saved.
"""
def __init__(self):
self._auth_token = None
self._tmp_dir = path.expanduser(tempfile.gettempdir())
self._cache_dir = path.expanduser('~/.dw/cache')
@property
def auth_token(self):
return self._auth_token
@property
def cache_dir(self):
return self._cache_dir
@property
def tmp_dir(self):
return self._tmp_dir
class EnvConfig(DefaultConfig):
"""Environment-based configuration
This class will obtain configuration parameters from environment variables:
- DW_AUTH_TOKEN
- DW_CACHE_DIR
- DW_TMP_DIR
"""
def __init__(self):
super(EnvConfig, self).__init__()
self._auth_token = os.environ.get('DW_AUTH_TOKEN')
self._cache_dir = os.environ.get('DW_CACHE_DIR')
self._tmp_dir = os.environ.get('DW_TMP_DIR')
class FileConfig(DefaultConfig):
"""File-based configuration
This class allows configuration to be saved to and obtained from
data.world's configuration file.
Multiple configurations profiles can be saved in the same file and are
identified by their profile name.
:param profile: Name of configuration profile.
:type profile: str
"""
def __init__(self, profile='default', **kwargs):
super(FileConfig, self).__init__()
# Overrides, for testing
self._config_file_path = path.expanduser(
kwargs.get('config_file_path', '~/.dw/config'))
legacy_file_path = path.expanduser(
kwargs.get('legacy_file_path', '~/.data.world'))
if not path.isdir(path.dirname(self._config_file_path)):
os.makedirs(path.dirname(self._config_file_path))
self._config_parser = (configparser.ConfigParser()
if six.PY3 else configparser.SafeConfigParser())
if path.isfile(self._config_file_path):
self._config_parser.read_file(open(self._config_file_path))
if self.__migrate_invalid_defaults(self._config_parser) > 0:
self.save()
elif path.isfile(legacy_file_path):
self._config_parser = self.__migrate_config(legacy_file_path)
self.save()
self._profile = profile
self._section = (profile
if profile.lower() != configparser.DEFAULTSECT.lower()
else configparser.DEFAULTSECT)
if not path.isdir(path.dirname(self.cache_dir)):
os.makedirs(path.dirname(self.cache_dir))
@property
def auth_token(self):
self.__validate_config()
return self._config_parser.get(self._section, 'auth_token')
@auth_token.setter
def auth_token(self, auth_token):
"""
:param auth_token:
"""
if (self._section != configparser.DEFAULTSECT and
not self._config_parser.has_section(self._section)):
self._config_parser.add_section(self._section)
self._config_parser.set(self._section, 'auth_token', auth_token)
def save(self):
"""Persist config changes"""
with open(self._config_file_path, 'w') as file:
self._config_parser.write(file)
def __validate_config(self):
if not path.isfile(self._config_file_path):
raise RuntimeError(
'Configuration file not found at {}.'
'To fix this issue, run dw configure'.format(
self._config_file_path))
if not self._config_parser.has_option(self._section, 'auth_token'):
raise RuntimeError(
'The {0} profile is not properly configured. '
'To fix this issue, run dw -p {0} configure'.format(
self._profile))
@staticmethod
def __migrate_config(legacy_file_path):
config_parser = configparser.ConfigParser()
with open(legacy_file_path, 'r') as legacy:
regex = re.compile(r"^token\s*=\s*(\S.*)$")
token = next(iter(
[regex.match(line.strip()).group(1) for line in legacy if
regex.match(line)]),
None)
if token is not None:
config_parser[configparser.DEFAULTSECT] = {'auth_token': token}
# Will leave legacy in case R SDK may still need it
# os.remove(legacy_file_path)
return config_parser
@staticmethod
def __migrate_invalid_defaults(config_parser):
# This fixes an issue related to us having referred to the default
# section in the config file as 'default' as opposed to using
# configparser.DEFAULTSECT
# That may result in 'ValueError: Invalid section name: default'
# https://github.com/datadotworld/data.world-py/issues/18
invalid_defaults = []
for section in config_parser.sections():
# Doesn't include DEFAULTSECT, but checking nonetheless
if (section != configparser.DEFAULTSECT and
section.lower() == configparser.DEFAULTSECT.lower()):
invalid_defaults.append(section)
if len(invalid_defaults) == 1:
old_default = invalid_defaults[0]
config_parser[configparser.DEFAULTSECT] = {
option: config_parser.get(old_default, option)
for option in config_parser.options(old_default)}
for section in invalid_defaults:
config_parser.remove_section(section)
return len(invalid_defaults)
class ChainedConfig(DefaultConfig):
"""Checks for env config first, then file-based config"""
def __init__(self, **kwargs):
# Overrides (for testing)
self._config_chain = kwargs.get('config_chain',
[EnvConfig(), FileConfig()])
def __getattribute__(self, item):
"""Delegates requests to config objects in the chain
"""
return object.__getattribute__(self, '_first_not_none')(
object.__getattribute__(self, '_config_chain'),
lambda c: c.__getattribute__(item))
@staticmethod
def _first_not_none(seq, supplier_func):
"""Applies supplier_func to each element in seq, returns 1st not None
:param seq: Sequence of object
:type seq: iterable
:param supplier_func: Function that extracts the desired value from
elements in seq
:type supplier_func: function
"""
for i in seq:
obj = supplier_func(i)
if obj is not None:
return obj
return None
class InlineConfig(DefaultConfig):
def __init__(self, token):
super(InlineConfig, self).__init__()
self._auth_token = token
| datadotworld/data.world-py | datadotworld/config.py | Python | apache-2.0 | 7,935 |
import asynctnt
from .tube import Tube
__all__ = (
'Queue',
)
class Queue:
__slots__ = (
'_conn', '_tube_cls', '_tubes', '_namespace'
)
def __init__(self,
conn: asynctnt.Connection,
tube_cls=Tube,
namespace='queue'):
"""
Queue constructor.
:param conn:
asynctnt connection (see
`asynctnt <https://github.com/igorcoding/asynctnt>`__
documentation)
:param tube_cls:
Tube class that is used for Tube creation (default is
:class:`asynctnt_queue.Tube`)
:param namespace:
Variable which was used for queue module import (
default is `queue`)
"""
assert isinstance(conn, asynctnt.Connection), \
'conn must be asynctnt.Connection instance'
self._conn = conn
self._tube_cls = tube_cls
self._tubes = {}
self._namespace = namespace
@property
def conn(self):
"""
``asynctnt`` connection
:returns: :class:`asynctnt.Connection` instance
"""
return self._conn
@property
def namespace(self):
"""
Queues namespace
:returns: :class:`str` instance
"""
return self._namespace
def tube(self, name):
"""
Returns tube by its name
:param name: Tube name
:returns: ``self.tube_cls`` instance
(by default :class:`asynctnt_queue.Tube`)
"""
if name in self._tubes:
return self._tubes[name]
assert name, 'Tube name must be specified'
t = self._tube_cls(self, name)
self._tubes[name] = t
return t
async def statistics(self, tube_name=None):
"""
Returns queue statistics (coroutine)
:param tube_name:
If specified, statistics by a specific tube is returned,
else statistics about all tubes is returned
"""
args = None
if tube_name is not None:
args = (tube_name,)
res = await self._conn.call('{}.statistics'.format(self._namespace), args)
if self._conn.version < (1, 7): # pragma: nocover
return res.body[0][0]
return res.body[0]
| igorcoding/asynctnt-queue | asynctnt_queue/queue.py | Python | apache-2.0 | 2,395 |
from google.appengine.ext import ndb
class Dresseur(ndb.Model):
tirage = ndb.KeyProperty(kind='Tirage')
nomig = ndb.StringProperty()
codeami = ndb.StringProperty()
| crancerkill/pokeploud | server/DresseurModele.py | Python | apache-2.0 | 177 |
# -*- coding: utf-8 -*-
# Copyright (c) 2003 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing the builtin documentation generator.
The different parts of the module document are assembled from the parsed
Python file. The appearance is determined by several templates defined within
this module.
"""
from __future__ import unicode_literals
import sys
import re
from Utilities import html_uencode
from Utilities.ModuleParser import RB_SOURCE, Function
_signal = re.compile(
r"""
^@signal [ \t]+
(?P<SignalName1>
[a-zA-Z_] \w* [ \t]* \( [^)]* \)
)
[ \t]* (?P<SignalDescription1> .*)
|
^@signal [ \t]+
(?P<SignalName2>
[a-zA-Z_] \w*
)
[ \t]+ (?P<SignalDescription2> .*)
""", re.VERBOSE | re.DOTALL | re.MULTILINE).search
_event = re.compile(
r"""
^@event [ \t]+
(?P<EventName1>
[a-zA-Z_] \w* [ \t]* \( [^)]* \)
)
[ \t]* (?P<EventDescription1> .*)
|
^@event [ \t]+
(?P<EventName2>
[a-zA-Z_] \w*
)
[ \t]+ (?P<EventDescription2> .*)
""", re.VERBOSE | re.DOTALL | re.MULTILINE).search
class TagError(Exception):
"""
Exception class raised, if an invalid documentation tag was found.
"""
pass
class ModuleDocument(object):
"""
Class implementing the builtin documentation generator.
"""
def __init__(self, module, colors, stylesheet=None):
"""
Constructor
@param module the information of the parsed Python file
@param colors dictionary specifying the various colors for the output
(dictionary of strings)
@param stylesheet the style to be used for the generated pages (string)
"""
self.module = module
self.empty = True
self.stylesheet = stylesheet
if self.stylesheet:
from . import TemplatesListsStyleCSS
self.headerTemplate = TemplatesListsStyleCSS.headerTemplate
self.footerTemplate = TemplatesListsStyleCSS.footerTemplate
self.moduleTemplate = TemplatesListsStyleCSS.moduleTemplate
self.rbFileTemplate = TemplatesListsStyleCSS.rbFileTemplate
self.classTemplate = TemplatesListsStyleCSS.classTemplate
self.methodTemplate = TemplatesListsStyleCSS.methodTemplate
self.constructorTemplate = \
TemplatesListsStyleCSS.constructorTemplate
self.rbModuleTemplate = TemplatesListsStyleCSS.rbModuleTemplate
self.rbModulesClassTemplate = \
TemplatesListsStyleCSS.rbModulesClassTemplate
self.functionTemplate = TemplatesListsStyleCSS.functionTemplate
self.listTemplate = TemplatesListsStyleCSS.listTemplate
self.listEntryTemplate = TemplatesListsStyleCSS.listEntryTemplate
self.listEntryNoneTemplate = \
TemplatesListsStyleCSS.listEntryNoneTemplate
self.listEntryDeprecatedTemplate = \
TemplatesListsStyleCSS.listEntryDeprecatedTemplate
self.listEntrySimpleTemplate = \
TemplatesListsStyleCSS.listEntrySimpleTemplate
self.paragraphTemplate = TemplatesListsStyleCSS.paragraphTemplate
self.parametersListTemplate = \
TemplatesListsStyleCSS.parametersListTemplate
self.parametersListEntryTemplate = \
TemplatesListsStyleCSS.parametersListEntryTemplate
self.returnsTemplate = TemplatesListsStyleCSS.returnsTemplate
self.exceptionsListTemplate = \
TemplatesListsStyleCSS.exceptionsListTemplate
self.exceptionsListEntryTemplate = \
TemplatesListsStyleCSS.exceptionsListEntryTemplate
self.signalsListTemplate = \
TemplatesListsStyleCSS.signalsListTemplate
self.signalsListEntryTemplate = \
TemplatesListsStyleCSS.signalsListEntryTemplate
self.eventsListTemplate = TemplatesListsStyleCSS.eventsListTemplate
self.eventsListEntryTemplate = \
TemplatesListsStyleCSS.eventsListEntryTemplate
self.deprecatedTemplate = TemplatesListsStyleCSS.deprecatedTemplate
self.authorInfoTemplate = TemplatesListsStyleCSS.authorInfoTemplate
self.seeListTemplate = TemplatesListsStyleCSS.seeListTemplate
self.seeListEntryTemplate = \
TemplatesListsStyleCSS.seeListEntryTemplate
self.seeLinkTemplate = TemplatesListsStyleCSS.seeLinkTemplate
self.sinceInfoTemplate = TemplatesListsStyleCSS.sinceInfoTemplate
else:
from . import TemplatesListsStyle
self.headerTemplate = \
TemplatesListsStyle.headerTemplate.format(**colors)
self.footerTemplate = \
TemplatesListsStyle.footerTemplate.format(**colors)
self.moduleTemplate = \
TemplatesListsStyle.moduleTemplate.format(**colors)
self.rbFileTemplate = \
TemplatesListsStyle.rbFileTemplate.format(**colors)
self.classTemplate = \
TemplatesListsStyle.classTemplate.format(**colors)
self.methodTemplate = \
TemplatesListsStyle.methodTemplate.format(**colors)
self.constructorTemplate = \
TemplatesListsStyle.constructorTemplate.format(**colors)
self.rbModuleTemplate = \
TemplatesListsStyle.rbModuleTemplate.format(**colors)
self.rbModulesClassTemplate = \
TemplatesListsStyle.rbModulesClassTemplate.format(**colors)
self.functionTemplate = \
TemplatesListsStyle.functionTemplate.format(**colors)
self.listTemplate = \
TemplatesListsStyle.listTemplate.format(**colors)
self.listEntryTemplate = \
TemplatesListsStyle.listEntryTemplate.format(**colors)
self.listEntryNoneTemplate = \
TemplatesListsStyle.listEntryNoneTemplate.format(**colors)
self.listEntryDeprecatedTemplate = \
TemplatesListsStyle.listEntryDeprecatedTemplate.format(
**colors)
self.listEntrySimpleTemplate = \
TemplatesListsStyle.listEntrySimpleTemplate.format(**colors)
self.paragraphTemplate = \
TemplatesListsStyle.paragraphTemplate.format(**colors)
self.parametersListTemplate = \
TemplatesListsStyle.parametersListTemplate.format(**colors)
self.parametersListEntryTemplate = \
TemplatesListsStyle.parametersListEntryTemplate.format(
**colors)
self.returnsTemplate = \
TemplatesListsStyle.returnsTemplate.format(**colors)
self.exceptionsListTemplate = \
TemplatesListsStyle.exceptionsListTemplate.format(**colors)
self.exceptionsListEntryTemplate = \
TemplatesListsStyle.exceptionsListEntryTemplate.format(
**colors)
self.signalsListTemplate = \
TemplatesListsStyle.signalsListTemplate.format(**colors)
self.signalsListEntryTemplate = \
TemplatesListsStyle.signalsListEntryTemplate.format(**colors)
self.eventsListTemplate = \
TemplatesListsStyle.eventsListTemplate.format(**colors)
self.eventsListEntryTemplate = \
TemplatesListsStyle.eventsListEntryTemplate.format(**colors)
self.deprecatedTemplate = \
TemplatesListsStyle.deprecatedTemplate.format(**colors)
self.authorInfoTemplate = \
TemplatesListsStyle.authorInfoTemplate.format(**colors)
self.seeListTemplate = \
TemplatesListsStyle.seeListTemplate.format(**colors)
self.seeListEntryTemplate = \
TemplatesListsStyle.seeListEntryTemplate.format(**colors)
self.seeLinkTemplate = \
TemplatesListsStyle.seeLinkTemplate.format(**colors)
self.sinceInfoTemplate = \
TemplatesListsStyle.sinceInfoTemplate.format(**colors)
self.keywords = []
# list of tuples containing the name (string) and
# the ref (string). The ref is without the filename part.
self.generated = False
def isEmpty(self):
"""
Public method to determine, if the module contains any classes or
functions.
@return Flag indicating an empty module (i.e. __init__.py without
any contents)
"""
return self.empty
def name(self):
"""
Public method used to get the module name.
@return The name of the module. (string)
"""
return self.module.name
def description(self):
"""
Public method used to get the description of the module.
@return The description of the module. (string)
"""
return self.__formatDescription(self.module.description)
def shortDescription(self):
"""
Public method used to get the short description of the module.
The short description is just the first line of the modules
description.
@return The short description of the module. (string)
"""
return self.__getShortDescription(self.module.description)
def genDocument(self):
"""
Public method to generate the source code documentation.
@return The source code documentation. (string)
"""
doc = self.headerTemplate.format(
**{'Title': self.module.name,
'Style': self.stylesheet}) + \
self.__genModuleSection() + \
self.footerTemplate
self.generated = True
return doc
def __genModuleSection(self):
"""
Private method to generate the body of the document.
@return The body of the document. (string)
"""
globalsList = self.__genGlobalsListSection()
classList = self.__genClassListSection()
functionList = self.__genFunctionListSection()
try:
if self.module.type == RB_SOURCE:
rbModulesList = self.__genRbModulesListSection()
modBody = self.rbFileTemplate.format(
**{'Module': self.module.name,
'ModuleDescription':
self.__formatDescription(self.module.description),
'GlobalsList': globalsList,
'ClassList': classList,
'RbModulesList': rbModulesList,
'FunctionList': functionList,
})
else:
modBody = self.moduleTemplate.format(
**{'Module': self.module.name,
'ModuleDescription':
self.__formatDescription(self.module.description),
'GlobalsList': globalsList,
'ClassList': classList,
'FunctionList': functionList,
})
except TagError as e:
sys.stderr.write(
"Error in tags of description of module {0}.\n".format(
self.module.name))
sys.stderr.write("{0}\n".format(e))
return ""
classesSection = self.__genClassesSection()
functionsSection = self.__genFunctionsSection()
if self.module.type == RB_SOURCE:
rbModulesSection = self.__genRbModulesSection()
else:
rbModulesSection = ""
return "{0}{1}{2}{3}".format(
modBody, classesSection, rbModulesSection, functionsSection)
def __genListSection(self, names, dict, kwSuffix=""):
"""
Private method to generate a list section of the document.
@param names The names to appear in the list. (list of strings)
@param dict A dictionary containing all relevant information.
@param kwSuffix suffix to be used for the QtHelp keywords (string)
@return The list section. (string)
"""
lst = []
for name in names:
lst.append(self.listEntryTemplate.format(
**{'Link': "{0}".format(name),
'Name': dict[name].name,
'Description':
self.__getShortDescription(dict[name].description),
'Deprecated':
self.__checkDeprecated(dict[name].description) and
self.listEntryDeprecatedTemplate or "",
}))
if kwSuffix:
n = "{0} ({1})".format(name, kwSuffix)
else:
n = "{0}".format(name)
self.keywords.append((n, "#{0}".format(name)))
return ''.join(lst)
def __genGlobalsListSection(self, class_=None):
"""
Private method to generate the section listing all global attributes of
the module.
@param class_ reference to a class object (Class)
@return The globals list section. (string)
"""
attrNames = []
if class_ is not None:
scope = class_
else:
scope = self.module
attrNames = sorted([attr for attr in scope.globals.keys()
if not scope.globals[attr].isSignal])
if attrNames:
s = ''.join(
[self.listEntrySimpleTemplate.format(**{'Name': name})
for name in attrNames])
else:
s = self.listEntryNoneTemplate
return self.listTemplate.format(**{'Entries': s})
def __genClassListSection(self):
"""
Private method to generate the section listing all classes of the
module.
@return The classes list section. (string)
"""
names = sorted(list(self.module.classes.keys()))
if names:
self.empty = False
s = self.__genListSection(names, self.module.classes)
else:
s = self.listEntryNoneTemplate
return self.listTemplate.format(**{'Entries': s})
def __genRbModulesListSection(self):
"""
Private method to generate the section listing all modules of the file
(Ruby only).
@return The modules list section. (string)
"""
names = sorted(list(self.module.modules.keys()))
if names:
self.empty = False
s = self.__genListSection(names, self.module.modules)
else:
s = self.listEntryNoneTemplate
return self.listTemplate.format(**{'Entries': s})
def __genFunctionListSection(self):
"""
Private method to generate the section listing all functions of the
module.
@return The functions list section. (string)
"""
names = sorted(list(self.module.functions.keys()))
if names:
self.empty = False
s = self.__genListSection(names, self.module.functions)
else:
s = self.listEntryNoneTemplate
return self.listTemplate.format(**{'Entries': s})
def __genClassesSection(self):
"""
Private method to generate the document section with details about
classes.
@return The classes details section. (string)
"""
classNames = sorted(list(self.module.classes.keys()))
classes = []
for className in classNames:
_class = self.module.classes[className]
supers = _class.super
if len(supers) > 0:
supers = ', '.join(supers)
else:
supers = 'None'
globalsList = self.__genGlobalsListSection(_class)
classMethList, classMethBodies = \
self.__genMethodSection(_class, className, Function.Class)
methList, methBodies = \
self.__genMethodSection(_class, className, Function.General)
staticMethList, staticMethBodies = \
self.__genMethodSection(_class, className, Function.Static)
try:
clsBody = self.classTemplate.format(
**{'Anchor': className,
'Class': _class.name,
'ClassSuper': supers,
'ClassDescription':
self.__formatDescription(_class.description),
'GlobalsList': globalsList,
'ClassMethodList': classMethList,
'MethodList': methList,
'StaticMethodList': staticMethList,
'MethodDetails':
classMethBodies + methBodies + staticMethBodies,
})
except TagError as e:
sys.stderr.write(
"Error in tags of description of class {0}.\n".format(
className))
sys.stderr.write("{0}\n".format(e))
clsBody = ""
classes.append(clsBody)
return ''.join(classes)
def __genMethodsListSection(self, names, dict, className, clsName,
includeInit=True):
"""
Private method to generate the methods list section of a class.
@param names names to appear in the list (list of strings)
@param dict dictionary containing all relevant information
@param className class name containing the names
@param clsName visible class name containing the names
@param includeInit flag indicating to include the __init__ method
(boolean)
@return methods list section (string)
"""
lst = []
if includeInit:
try:
lst.append(self.listEntryTemplate.format(
**{'Link': "{0}.{1}".format(className, '__init__'),
'Name': clsName,
'Description': self.__getShortDescription(
dict['__init__'].description),
'Deprecated': self.__checkDeprecated(
dict['__init__'].description) and
self.listEntryDeprecatedTemplate or "",
}))
self.keywords.append(
("{0} (Constructor)".format(className),
"#{0}.{1}".format(className, '__init__')))
except KeyError:
pass
for name in names:
lst.append(self.listEntryTemplate.format(
**{'Link': "{0}.{1}".format(className, name),
'Name': dict[name].name,
'Description':
self.__getShortDescription(dict[name].description),
'Deprecated':
self.__checkDeprecated(dict[name].description) and
self.listEntryDeprecatedTemplate or "",
}))
self.keywords.append(("{0}.{1}".format(className, name),
"#{0}.{1}".format(className, name)))
return ''.join(lst)
def __genMethodSection(self, obj, className, filter):
"""
Private method to generate the method details section.
@param obj reference to the object being formatted
@param className name of the class containing the method (string)
@param filter filter value designating the method types
@return method list and method details section (tuple of two string)
"""
methList = []
methBodies = []
methods = sorted([k for k in obj.methods.keys()
if obj.methods[k].modifier == filter])
if '__init__' in methods:
methods.remove('__init__')
try:
methBody = self.constructorTemplate.format(
**{'Anchor': className,
'Class': obj.name,
'Method': '__init__',
'MethodDescription':
self.__formatDescription(
obj.methods['__init__'].description),
'Params':
', '.join(obj.methods['__init__'].parameters[1:]),
})
except TagError as e:
sys.stderr.write(
"Error in tags of description of method {0}.{1}.\n".format(
className, '__init__'))
sys.stderr.write("{0}\n".format(e))
methBody = ""
methBodies.append(methBody)
if filter == Function.Class:
methodClassifier = " (class method)"
elif filter == Function.Static:
methodClassifier = " (static)"
else:
methodClassifier = ""
for method in methods:
try:
methBody = self.methodTemplate.format(
**{'Anchor': className,
'Class': obj.name,
'Method': obj.methods[method].name,
'MethodClassifier': methodClassifier,
'MethodDescription':
self.__formatDescription(
obj.methods[method].description),
'Params': ', '.join(obj.methods[method].parameters[1:]),
})
except TagError as e:
sys.stderr.write(
"Error in tags of description of method {0}.{1}.\n".format(
className, method))
sys.stderr.write("{0}\n".format(e))
methBody = ""
methBodies.append(methBody)
methList = self.__genMethodsListSection(
methods, obj.methods, className, obj.name,
includeInit=filter == Function.General)
if not methList:
methList = self.listEntryNoneTemplate
return (self.listTemplate.format(**{'Entries': methList}),
''.join(methBodies))
def __genRbModulesSection(self):
"""
Private method to generate the document section with details about
Ruby modules.
@return The Ruby modules details section. (string)
"""
rbModulesNames = sorted(list(self.module.modules.keys()))
rbModules = []
for rbModuleName in rbModulesNames:
rbModule = self.module.modules[rbModuleName]
globalsList = self.__genGlobalsListSection(rbModule)
methList, methBodies = self.__genMethodSection(
rbModule, rbModuleName, Function.General)
classList, classBodies = self.__genRbModulesClassesSection(
rbModule, rbModuleName)
try:
rbmBody = self.rbModuleTemplate.format(
**{'Anchor': rbModuleName,
'Module': rbModule.name,
'ModuleDescription':
self.__formatDescription(rbModule.description),
'GlobalsList': globalsList,
'ClassesList': classList,
'ClassesDetails': classBodies,
'FunctionsList': methList,
'FunctionsDetails': methBodies,
})
except TagError as e:
sys.stderr.write(
"Error in tags of description of Ruby module {0}.\n"
.format(rbModuleName))
sys.stderr.write("{0}\n".format(e))
rbmBody = ""
rbModules.append(rbmBody)
return ''.join(rbModules)
def __genRbModulesClassesSection(self, obj, modName):
"""
Private method to generate the Ruby module classes details section.
@param obj Reference to the object being formatted.
@param modName Name of the Ruby module containing the classes. (string)
@return The classes list and classes details section.
(tuple of two string)
"""
classNames = sorted(list(obj.classes.keys()))
classes = []
for className in classNames:
_class = obj.classes[className]
supers = _class.super
if len(supers) > 0:
supers = ', '.join(supers)
else:
supers = 'None'
methList, methBodies = \
self.__genMethodSection(_class, className, Function.General)
try:
clsBody = self.rbModulesClassTemplate.format(
**{'Anchor': className,
'Class': _class.name,
'ClassSuper': supers,
'ClassDescription':
self.__formatDescription(_class.description),
'MethodList': methList,
'MethodDetails': methBodies,
})
except TagError as e:
sys.stderr.write(
"Error in tags of description of class {0}.\n".format(
className))
sys.stderr.write("{0}\n".format(e))
clsBody = ""
classes.append(clsBody)
classesList = self.__genRbModulesClassesListSection(
classNames, obj.classes, modName)
if not classesList:
classesList = self.listEntryNoneTemplate
return (self.listTemplate.format(**{'Entries': classesList}),
''.join(classes))
def __genRbModulesClassesListSection(self, names, dict, moduleName):
"""
Private method to generate the classes list section of a Ruby module.
@param names The names to appear in the list. (list of strings)
@param dict A dictionary containing all relevant information.
@param moduleName Name of the Ruby module containing the classes.
(string)
@return The list section. (string)
"""
lst = []
for name in names:
lst.append(self.listEntryTemplate.format(
**{'Link': "{0}.{1}".format(moduleName, name),
'Name': dict[name].name,
'Description':
self.__getShortDescription(dict[name].description),
'Deprecated':
self.__checkDeprecated(dict[name].description) and
self.listEntryDeprecatedTemplate or "",
}))
self.keywords.append(("{0}.{1}".format(moduleName, name),
"#{0}.{1}".format(moduleName, name)))
return ''.join(lst)
def __genFunctionsSection(self):
"""
Private method to generate the document section with details about
functions.
@return The functions details section. (string)
"""
funcBodies = []
funcNames = sorted(list(self.module.functions.keys()))
for funcName in funcNames:
try:
funcBody = self.functionTemplate.format(
**{'Anchor': funcName,
'Function': self.module.functions[funcName].name,
'FunctionDescription': self.__formatDescription(
self.module.functions[funcName].description),
'Params':
', '.join(self.module.functions[funcName].parameters),
})
except TagError as e:
sys.stderr.write(
"Error in tags of description of function {0}.\n".format(
funcName))
sys.stderr.write("{0}\n".format(e))
funcBody = ""
funcBodies.append(funcBody)
return ''.join(funcBodies)
def __getShortDescription(self, desc):
"""
Private method to determine the short description of an object.
The short description is just the first non empty line of the
documentation string.
@param desc The documentation string. (string)
@return The short description. (string)
"""
dlist = desc.splitlines()
sdlist = []
descfound = 0
for desc in dlist:
desc = desc.strip()
if desc:
descfound = 1
dotpos = desc.find('.')
if dotpos == -1:
sdlist.append(desc.strip())
else:
while dotpos + 1 < len(desc) and \
not desc[dotpos + 1].isspace():
# don't recognize '.' inside a number or word as
# stop condition
dotpos = desc.find('.', dotpos + 1)
if dotpos == -1:
break
if dotpos == -1:
sdlist.append(desc.strip())
else:
sdlist.append(desc[:dotpos + 1].strip())
break # break if a '.' is found
else:
if descfound:
break # break if an empty line is found
if sdlist:
return html_uencode(' '.join(sdlist))
else:
return ''
def __checkDeprecated(self, descr):
"""
Private method to check, if the object to be documented contains a
deprecated flag.
@param descr documentation string (string)
@return flag indicating the deprecation status (boolean)
"""
dlist = descr.splitlines()
for desc in dlist:
desc = desc.strip()
if desc.startswith("@deprecated"):
return True
return False
def __genParagraphs(self, lines):
"""
Private method to assemble the descriptive paragraphs of a docstring.
A paragraph is made up of a number of consecutive lines without
an intermediate empty line. Empty lines are treated as a paragraph
delimiter.
@param lines A list of individual lines. (list of strings)
@return Ready formatted paragraphs. (string)
"""
lst = []
linelist = []
for line in lines:
if line.strip():
if line == '.':
linelist.append("")
else:
linelist.append(html_uencode(line))
else:
lst.append(self.paragraphTemplate.format(
**{'Lines': '\n'.join(linelist)}))
linelist = []
if linelist:
lst.append(self.paragraphTemplate.format(
**{'Lines': '\n'.join(linelist)}))
return ''.join(lst)
def __genDescriptionListSection(self, dictionary, template):
"""
Private method to generate the list section of a description.
@param dictionary Dictionary containing the info for the
list section.
@param template The template to be used for the list. (string)
@return The list section. (string)
"""
lst = []
keys = sorted(list(dictionary.keys()))
for key in keys:
lst.append(template.format(
**{'Name': key,
'Description': html_uencode('\n'.join(dictionary[key])),
}))
return ''.join(lst)
def __genParamDescriptionListSection(self, _list, template):
"""
Private method to generate the list section of a description.
@param _list List containing the info for the
list section.
@param template The template to be used for the list. (string)
@return The list section. (string)
"""
lst = []
for name, lines in _list:
lst.append(template.format(
**{'Name': name,
'Description': html_uencode('\n'.join(lines)),
}))
return ''.join(lst)
def __formatCrossReferenceEntry(self, entry):
"""
Private method to format a cross reference entry.
This cross reference entry looks like "package.module#member label".
@param entry the entry to be formatted (string)
@return formatted entry (string)
"""
if entry.startswith('"'):
return entry
elif entry.startswith('<'):
entry = entry[3:]
else:
try:
reference, label = entry.split(None, 1)
except ValueError:
reference = entry
label = entry
try:
path, anchor = reference.split('#', 1)
except ValueError:
path = reference
anchor = ''
reference = path and "{0}.html".format(path) or ''
if anchor:
reference = "{0}#{1}".format(reference, anchor)
entry = 'href="{0}">{1}</a>'.format(reference, label)
return self.seeLinkTemplate.format(**{'Link': entry})
def __genSeeListSection(self, _list, template):
"""
Private method to generate the "see also" list section of a
description.
@param _list List containing the info for the section.
@param template The template to be used for the list. (string)
@return The list section. (string)
"""
lst = []
for seeEntry in _list:
seeEntryString = ''.join(seeEntry)
lst.append(template.format(
**{'Link': html_uencode(self.__formatCrossReferenceEntry(
seeEntryString)),
}))
return '\n'.join(lst)
def __processInlineTags(self, desc):
"""
Private method to process inline tags.
@param desc One line of the description (string)
@return processed line with inline tags expanded (string)
@exception TagError raised to indicate an invalid tag
"""
start = desc.find('{@')
while start != -1:
stop = desc.find('}', start + 2)
if stop == -1:
raise TagError("Unterminated inline tag.\n{0}".format(desc))
tagText = desc[start + 1:stop]
if tagText.startswith('@link'):
parts = tagText.split(None, 1)
if len(parts) < 2:
raise TagError(
"Wrong format in inline tag {0}.\n{1}".format(
parts[0], desc))
formattedTag = self.__formatCrossReferenceEntry(parts[1])
desc = desc.replace("{{{0}}}".format(tagText), formattedTag)
else:
tag = tagText.split(None, 1)[0]
raise TagError(
"Unknown inline tag encountered, {0}.\n{1}".format(
tag, desc))
start = desc.find('{@')
return desc
def __formatDescription(self, descr):
"""
Private method to format the contents of the documentation string.
@param descr The contents of the documentation string. (string)
@exception TagError A tag doesn't have the correct number
of arguments.
@return The formated contents of the documentation string. (string)
"""
if not descr:
return ""
paragraphs = []
paramList = []
returns = []
exceptionDict = {}
signalDict = {}
eventDict = {}
deprecated = []
authorInfo = []
sinceInfo = []
seeList = []
lastItem = paragraphs
inTagSection = False
dlist = descr.splitlines()
while dlist and not dlist[0]:
del dlist[0]
for ditem in dlist:
ditem = self.__processInlineTags(ditem)
desc = ditem.strip()
if desc:
if desc.startswith(("@param", "@keyparam")):
inTagSection = True
parts = desc.split(None, 2)
if len(parts) < 2:
raise TagError(
"Wrong format in {0} line.\n".format(parts[0]))
paramName = parts[1]
if parts[0] == "@keyparam":
paramName += '='
try:
paramList.append([paramName, [parts[2]]])
except IndexError:
paramList.append([paramName, []])
lastItem = paramList[-1][1]
elif desc.startswith(("@return", "@ireturn")):
inTagSection = True
parts = desc.split(None, 1)
if len(parts) < 2:
raise TagError(
"Wrong format in {0} line.\n".format(parts[0]))
returns = [parts[1]]
lastItem = returns
elif desc.startswith(("@exception", "@throws", "@raise")):
inTagSection = True
parts = desc.split(None, 2)
if len(parts) < 2:
raise TagError(
"Wrong format in {0} line.\n".format(parts[0]))
excName = parts[1]
try:
exceptionDict[excName] = [parts[2]]
except IndexError:
exceptionDict[excName] = []
lastItem = exceptionDict[excName]
elif desc.startswith("@signal"):
inTagSection = True
m = _signal(desc, 0)
if m is None:
raise TagError("Wrong format in @signal line.\n")
signalName = 1 and m.group("SignalName1") \
or m.group("SignalName2")
signalDesc = 1 and m.group("SignalDescription1") \
or m.group("SignalDescription2")
signalDict[signalName] = []
if signalDesc is not None:
signalDict[signalName].append(signalDesc)
lastItem = signalDict[signalName]
elif desc.startswith("@event"):
inTagSection = True
m = _event(desc, 0)
if m is None:
raise TagError(
"Wrong format in {0} line.\n".format(parts[0]))
eventName = 1 and m.group("EventName1") \
or m.group("EventName2")
eventDesc = 1 and m.group("EventDescription1") \
or m.group("EventDescription2")
eventDict[eventName] = []
if eventDesc is not None:
eventDict[eventName].append(eventDesc)
lastItem = eventDict[eventName]
elif desc.startswith("@deprecated"):
inTagSection = True
parts = desc.split(None, 1)
if len(parts) < 2:
raise TagError(
"Wrong format in {0} line.\n".format(parts[0]))
deprecated = [parts[1]]
lastItem = deprecated
elif desc.startswith("@author"):
inTagSection = True
parts = desc.split(None, 1)
if len(parts) < 2:
raise TagError(
"Wrong format in {0} line.\n".format(parts[0]))
authorInfo = [parts[1]]
lastItem = authorInfo
elif desc.startswith("@since"):
inTagSection = True
parts = desc.split(None, 1)
if len(parts) < 2:
raise TagError(
"Wrong format in {0} line.\n".format(parts[0]))
sinceInfo = [parts[1]]
lastItem = sinceInfo
elif desc.startswith("@see"):
inTagSection = True
parts = desc.split(None, 1)
if len(parts) < 2:
raise TagError(
"Wrong format in {0} line.\n".format(parts[0]))
seeList.append([parts[1]])
lastItem = seeList[-1]
elif desc.startswith("@@"):
lastItem.append(desc[1:])
elif desc.startswith("@"):
tag = desc.split(None, 1)[0]
raise TagError(
"Unknown tag encountered, {0}.\n".format(tag))
else:
lastItem.append(ditem)
elif not inTagSection:
lastItem.append(ditem)
if paragraphs:
description = self.__genParagraphs(paragraphs)
else:
description = ""
if paramList:
parameterSect = self.parametersListTemplate.format(
**{'Parameters': self.__genParamDescriptionListSection(
paramList, self.parametersListEntryTemplate)})
else:
parameterSect = ""
if returns:
returnSect = self.returnsTemplate.format(
html_uencode('\n'.join(returns)))
else:
returnSect = ""
if exceptionDict:
exceptionSect = self.exceptionsListTemplate.format(
**{'Exceptions': self.__genDescriptionListSection(
exceptionDict, self.exceptionsListEntryTemplate)})
else:
exceptionSect = ""
if signalDict:
signalSect = self.signalsListTemplate.format(
**{'Signals': self.__genDescriptionListSection(
signalDict, self.signalsListEntryTemplate)})
else:
signalSect = ""
if eventDict:
eventSect = self.eventsListTemplate.format(
**{'Events': self.__genDescriptionListSection(
eventDict, self.eventsListEntryTemplate)})
else:
eventSect = ""
if deprecated:
deprecatedSect = self.deprecatedTemplate.format(
**{'Lines': html_uencode('\n'.join(deprecated))})
else:
deprecatedSect = ""
if authorInfo:
authorInfoSect = self.authorInfoTemplate.format(
**{'Authors': html_uencode('\n'.join(authorInfo))})
else:
authorInfoSect = ""
if sinceInfo:
sinceInfoSect = self.sinceInfoTemplate.format(
**{'Info': html_uencode(sinceInfo[0])})
else:
sinceInfoSect = ""
if seeList:
seeSect = self.seeListTemplate.format(
**{'Links': self.__genSeeListSection(
seeList, self.seeListEntryTemplate)})
else:
seeSect = ''
return "{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}".format(
deprecatedSect, description, parameterSect, returnSect,
exceptionSect, signalSect, eventSect, authorInfoSect,
seeSect, sinceInfoSect
)
def getQtHelpKeywords(self):
"""
Public method to retrieve the parts for the QtHelp keywords section.
@return list of tuples containing the name (string) and the ref
(string). The ref is without the filename part.
"""
if not self.generated:
self.genDocument()
return self.keywords
| davy39/eric | DocumentationTools/ModuleDocumentor.py | Python | gpl-3.0 | 44,668 |
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTime}.
"""
__metaclass__ = type
import signal
from twisted.internet.defer import TimeoutError
from twisted.trial.unittest import TestCase, SkipTest
from twisted.python.reflect import namedAny
from twisted.python import log
from twisted.python.failure import Failure
# Access private APIs.
try:
from twisted.internet import process
except ImportError:
process = None
class ReactorBuilder:
"""
L{TestCase} mixin which provides a reactor-creation API. This mixin
defines C{setUp} and C{tearDown}, so mix it in before L{TestCase} or call
its methods from the overridden ones in the subclass.
@cvar skippedReactors: A dict mapping FQPN strings of reactors for
which the tests defined by this class will be skipped to strings
giving the skip message.
@ivar reactorFactory: A no-argument callable which returns the reactor to
use for testing.
@ivar originalHandler: The SIGCHLD handler which was installed when setUp
ran and which will be re-installed when tearDown runs.
@ivar _reactors: A list of FQPN strings giving the reactors for which
TestCases will be created.
"""
_reactors = ["twisted.internet.selectreactor.SelectReactor",
"twisted.internet.pollreactor.PollReactor",
"twisted.internet.epollreactor.EPollReactor",
"twisted.internet.glib2reactor.Glib2Reactor",
"twisted.internet.gtk2reactor.Gtk2Reactor",
"twisted.internet.kqueuereactor.KQueueReactor",
"twisted.internet.win32eventreactor.Win32Reactor",
"twisted.internet.iocpreactor.reactor.IOCPReactor"]
reactorFactory = None
originalHandler = None
skippedReactors = {}
def setUp(self):
"""
Clear the SIGCHLD handler, if there is one, to ensure an environment
like the one which exists prior to a call to L{reactor.run}.
"""
if getattr(signal, 'SIGCHLD', None) is not None:
self.originalHandler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def tearDown(self):
"""
Restore the original SIGCHLD handler and reap processes as long as
there seem to be any remaining.
"""
if self.originalHandler is not None:
signal.signal(signal.SIGCHLD, self.originalHandler)
if process is not None:
while process.reapProcessHandlers:
log.msg(
"ReactorBuilder.tearDown reaping some processes %r" % (
process.reapProcessHandlers,))
process.reapAllProcesses()
def unbuildReactor(self, reactor):
"""
Clean up any resources which may have been allocated for the given
reactor by its creation or by a test which used it.
"""
# Chris says:
#
# XXX These explicit calls to clean up the waker should become obsolete
# when bug #3063 is fixed. -radix, 2008-02-29. Fortunately it should
# probably cause an error when bug #3063 is fixed, so it should be
# removed in the same branch that fixes it.
#
# -exarkun
if getattr(reactor, 'waker', None) is not None:
reactor.removeReader(reactor.waker)
reactor.waker.connectionLost(None)
# Here's an extra thing unrelated to wakers but necessary for
# cleaning up after the reactors we make. -exarkun
reactor.disconnectAll()
# It would also be bad if any timed calls left over were allowed to
# run.
calls = reactor.getDelayedCalls()
for c in calls:
c.cancel()
def buildReactor(self):
"""
Create and return a reactor using C{self.reactorFactory}.
"""
try:
reactor = self.reactorFactory()
except:
# Unfortunately, not all errors which result in a reactor being
# unusable are detectable without actually instantiating the
# reactor. So we catch some more here and skip the test if
# necessary.
raise SkipTest(Failure().getErrorMessage())
self.addCleanup(self.unbuildReactor, reactor)
return reactor
def runReactor(self, reactor, timeout=None):
"""
Run the reactor for at most the given amount of time.
@param reactor: The reactor to run.
@type timeout: C{int} or C{float}
@param timeout: The maximum amount of time, specified in seconds, to
allow the reactor to run. If the reactor is still running after
this much time has elapsed, it will be stopped and an exception
raised. If C{None}, the default test method timeout imposed by
Trial will be used. This depends on the L{IReactorTime}
implementation of C{reactor} for correct operation.
@raise TimeoutError: If the reactor is still running after C{timeout}
seconds.
"""
if timeout is None:
timeout = self.getTimeout()
timedOut = []
def stop():
timedOut.append(None)
reactor.stop()
reactor.callLater(timeout, stop)
reactor.run()
if timedOut:
raise TimeoutError(
"reactor still running after %s seconds" % (timeout,))
def makeTestCaseClasses(cls):
"""
Create a L{TestCase} subclass which mixes in C{cls} for each known
reactor and return a dict mapping their names to them.
"""
classes = {}
for reactor in cls._reactors:
shortReactorName = reactor.split(".")[-1]
name = (cls.__name__ + "." + shortReactorName).replace(".", "_")
class testcase(cls, TestCase):
__module__ = cls.__module__
if reactor in cls.skippedReactors:
skip = cls.skippedReactors[reactor]
try:
reactorFactory = namedAny(reactor)
except:
skip = Failure().getErrorMessage()
testcase.__name__ = name
classes[testcase.__name__] = testcase
return classes
makeTestCaseClasses = classmethod(makeTestCaseClasses)
__all__ = ['ReactorBuilder']
| hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/twisted/internet/test/reactormixins.py | Python | apache-2.0 | 6,429 |
# -*- coding: utf-8 -*-
import xc_base
import geom
import xc
import math
import os
from model import predefined_spaces
from materials import typical_materials
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2014, LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "[email protected]"
ndivZ= 500
# Problem type
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
nodes= preprocessor.getNodeHandler
modelSpace= predefined_spaces.SolidMechanics3D(nodes)
# Define materials
elast= typical_materials.defElasticMaterial(preprocessor, "elast",3000)
nodes.newSeedNode()
seedElemHandler= preprocessor.getElementHandler.seedElemHandler
seedElemHandler.defaultMaterial= "elast"
seedElemHandler.dimElem= 3 # Dimension of element space
seedElemHandler.defaultTag= 1 #Tag for the next element.
truss= seedElemHandler.newElement("Truss",xc.ID([0,0]))
truss.area= 10.0
unifGrids= preprocessor.getMultiBlockTopology.getUniformGrids
uGrid= unifGrids.newUniformGrid()
uGrid.Lx= 1
uGrid.Ly= 1
uGrid.Lz= 10
uGrid.nDivX= 0
uGrid.nDivY= 0
uGrid.nDivZ= ndivZ
setTotal= preprocessor.getSets.getSet("total")
setTotal.genMesh(xc.meshDir.I)
numNodes= setTotal.getNodes.size
numElem= setTotal.getElements.size
numNodesTeor= ndivZ+1
numElemTeor= ndivZ
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (abs(numNodesTeor-numNodes)<1e-15) & (abs(numElemTeor-numElem)<1e-15):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
| lcpt/xc | verif/tests/preprocessor/cad/test_unif_grid_01.py | Python | gpl-3.0 | 1,501 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
quota_group = cfg.OptGroup(
name='quota',
title='Quota Options',
help="""
Quota options allow to manage quotas in openstack deployment.
""")
quota_opts = [
cfg.IntOpt('instances',
min=-1,
default=10,
deprecated_group='DEFAULT',
deprecated_name='quota_instances',
help="""
The number of instances allowed per project.
Possible Values
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('cores',
min=-1,
default=20,
deprecated_group='DEFAULT',
deprecated_name='quota_cores',
help="""
The number of instance cores or vCPUs allowed per project.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('ram',
min=-1,
default=50 * 1024,
deprecated_group='DEFAULT',
deprecated_name='quota_ram',
help="""
The number of megabytes of instance RAM allowed per project.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('floating_ips',
min=-1,
default=10,
deprecated_group='DEFAULT',
deprecated_name='quota_floating_ips',
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
The number of floating IPs allowed per project.
Floating IPs are not allocated to instances by default. Users need to select
them from the pool configured by the OpenStack administrator to attach to their
instances.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('fixed_ips',
min=-1,
default=-1,
deprecated_group='DEFAULT',
deprecated_name='quota_fixed_ips',
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
The number of fixed IPs allowed per project.
Unlike floating IPs, fixed IPs are allocated dynamically by the network
component when instances boot up. This quota value should be at least the
number of instances allowed
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('metadata_items',
min=-1,
default=128,
deprecated_group='DEFAULT',
deprecated_name='quota_metadata_items',
help="""
The number of metadata items allowed per instance.
Users can associate metadata with an instance during instance creation. This
metadata takes the form of key-value pairs.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('injected_files',
min=-1,
default=5,
deprecated_group='DEFAULT',
deprecated_name='quota_injected_files',
help="""
The number of injected files allowed.
File injection allows users to customize the personality of an instance by
injecting data into it upon boot. Only text file injection is permitted: binary
or ZIP files are not accepted. During file injection, any existing files that
match specified files are renamed to include ``.bak`` extension appended with a
timestamp.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('injected_file_content_bytes',
min=-1,
default=10 * 1024,
deprecated_group='DEFAULT',
deprecated_name='quota_injected_file_content_bytes',
help="""
The number of bytes allowed per injected file.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('injected_file_path_length',
min=-1,
default=255,
deprecated_group='DEFAULT',
deprecated_name='quota_injected_file_path_length',
help="""
The maximum allowed injected file path length.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('security_groups',
min=-1,
default=10,
deprecated_group='DEFAULT',
deprecated_name='quota_security_groups',
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
The number of security groups per project.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('security_group_rules',
min=-1,
default=20,
deprecated_group='DEFAULT',
deprecated_name='quota_security_group_rules',
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
The number of security rules per security group.
The associated rules in each security group control the traffic to instances in
the group.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('key_pairs',
min=-1,
default=100,
deprecated_group='DEFAULT',
deprecated_name='quota_key_pairs',
help="""
The maximum number of key pairs allowed per user.
Users can create at least one key pair for each project and use the key pair
for multiple instances that belong to that project.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('server_groups',
min=-1,
default=10,
deprecated_group='DEFAULT',
deprecated_name='quota_server_groups',
help="""
The maxiumum number of server groups per project.
Server groups are used to control the affinity and anti-affinity scheduling
policy for a group of servers or instances. Reducing the quota will not affect
any existing group, but new servers will not be allowed into groups that have
become over quota.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
cfg.IntOpt('server_group_members',
min=-1,
default=10,
deprecated_group='DEFAULT',
deprecated_name='quota_server_group_members',
help="""
The maximum number of servers per server group.
Possible values:
* A positive integer or 0.
* -1 to disable the quota.
"""),
# TODO(stephenfin): This should have a min parameter
cfg.IntOpt('reservation_expire',
default=86400,
deprecated_group='DEFAULT',
help="""
The number of seconds until a reservation expires.
This quota represents the time period for invalidating quota reservations.
"""),
cfg.IntOpt('until_refresh',
min=0,
default=0,
deprecated_group='DEFAULT',
help="""
The count of reservations until usage is refreshed.
This defaults to 0 (off) to avoid additional load but it is useful to turn on
to help keep quota usage up-to-date and reduce the impact of out of sync usage
issues.
"""),
cfg.IntOpt('max_age',
min=0,
default=0,
deprecated_group='DEFAULT',
help="""
The number of seconds between subsequent usage refreshes.
This defaults to 0 (off) to avoid additional load but it is useful to turn on
to help keep quota usage up-to-date and reduce the impact of out of sync usage
issues. Note that quotas are not updated on a periodic task, they will update
on a new reservation if max_age has passed since the last reservation.
"""),
# TODO(pumaranikar): Add a new config to select between the db_driver and
# the no_op driver using stevedore.
cfg.StrOpt('driver',
default='nova.quota.DbQuotaDriver',
deprecated_for_removal=True,
deprecated_since='14.0.0',
deprecated_group='DEFAULT',
deprecated_name='quota_driver',
help="""
The quota enforcer driver.
Provides abstraction for quota checks. Users can configure a specific
driver to use for quota checks.
Possible values:
* nova.quota.DbQuotaDriver (default) or any string representing fully
qualified class name.
"""),
cfg.BoolOpt('recheck_quota',
default=True,
help="""
Recheck quota after resource creation to prevent allowing quota to be exceeded.
This defaults to True (recheck quota after resource creation) but can be set to
False to avoid additional load if allowing quota to be exceeded because of
racing requests is considered acceptable. For example, when set to False, if a
user makes highly parallel REST API requests to create servers, it will be
possible for them to create more servers than their allowed quota during the
race. If their quota is 10 servers, they might be able to create 50 during the
burst. After the burst, they will not be able to create any more servers but
they will be able to keep their 50 servers until they delete them.
The initial quota check is done before resources are created, so if multiple
parallel requests arrive at the same time, all could pass the quota check and
create resources, potentially exceeding quota. When recheck_quota is True,
quota will be checked a second time after resources have been created and if
the resource is over quota, it will be deleted and OverQuota will be raised,
usually resulting in a 403 response to the REST API user. This makes it
impossible for a user to exceed their quota with the caveat that it will,
however, be possible for a REST API user to be rejected with a 403 response in
the event of a collision close to reaching their quota limit, even if the user
has enough quota available when they made the request.
"""),
]
def register_opts(conf):
conf.register_group(quota_group)
conf.register_opts(quota_opts, group=quota_group)
def list_opts():
return {quota_group: quota_opts}
| phenoxim/nova | nova/conf/quota.py | Python | apache-2.0 | 10,497 |
#! /usr/bin/env python3
""" Plots an order-of-accuracy plot using data points given by a command line argument.
Uses linear regression to compute slope of the data.
"""
import sys
import numpy as np
from matplotlib import pyplot as plt
fname = sys.argv[1]
data = np.genfromtxt(fname)
n = data.shape[0]
print("Number of points = " + str(n))
data = np.log10(data)
psigy = data[:,1].sum()
sigx = data[:,0].sum()
sigx2 = (data[:,0]*data[:,0]).sum()
psigxy = (data[:,1]*data[:,0]).sum()
pslope = (n*psigxy-sigx*psigy)/(n*sigx2-sigx**2)
plt.plot(data[:,0],data[:,1],'o-')
plt.title("Grid-refinement Study: slope = "+str(pslope))
plt.xlabel("Log mesh size")
plt.ylabel("Log error")
plt.show()
| Slaedr/amovemesh | test-cases/curved-mesh-gen-splines/plot_order.py | Python | gpl-3.0 | 694 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import os.path
import sys
# Disable lint check for finding modules:
# pylint: disable=F0401
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("ply")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("mojo"), "third_party"))
from ply.lex import TOKEN
from ..error import Error
# Disable lint check for exceptions deriving from Exception:
# pylint: disable=W0710
class LexError(Error):
"""Class for errors from the lexer."""
def __init__(self, filename, message, lineno):
Error.__init__(self, filename, message, lineno=lineno)
# We have methods which look like they could be functions:
# pylint: disable=R0201
class Lexer(object):
def __init__(self, filename):
self.filename = filename
######################-- PRIVATE --######################
##
## Internal auxiliary methods
##
def _error(self, msg, token):
raise LexError(self.filename, msg, token.lineno)
##
## Reserved keywords
##
keywords = (
'HANDLE',
'IMPORT',
'MODULE',
'STRUCT',
'INTERFACE',
'ENUM',
'CONST',
'TRUE',
'FALSE',
'DEFAULT',
)
keyword_map = {}
for keyword in keywords:
keyword_map[keyword.lower()] = keyword
##
## All the tokens recognized by the lexer
##
tokens = keywords + (
# Identifiers
'NAME',
# Constants
'ORDINAL',
'INT_CONST_DEC', 'INT_CONST_HEX',
'FLOAT_CONST',
'CHAR_CONST',
# String literals
'STRING_LITERAL',
# Operators
'MINUS',
'PLUS',
'AMP',
# Assignment
'EQUALS',
# Request / response
'RESPONSE',
# Delimiters
'LPAREN', 'RPAREN', # ( )
'LBRACKET', 'RBRACKET', # [ ]
'LBRACE', 'RBRACE', # { }
'LANGLE', 'RANGLE', # < >
'SEMI', # ;
'COMMA', 'DOT' # , .
)
##
## Regexes for use in tokens
##
# valid C identifiers (K&R2: A.2.3)
identifier = r'[a-zA-Z_][0-9a-zA-Z_]*'
hex_prefix = '0[xX]'
hex_digits = '[0-9a-fA-F]+'
# integer constants (K&R2: A.2.5.1)
decimal_constant = '0|([1-9][0-9]*)'
hex_constant = hex_prefix+hex_digits
# Don't allow octal constants (even invalid octal).
octal_constant_disallowed = '0[0-9]+'
# character constants (K&R2: A.2.5.2)
# Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line
# directives with Windows paths as filenames (..\..\dir\file)
# For the same reason, decimal_escape allows all digit sequences. We want to
# parse all correct code, even if it means to sometimes parse incorrect
# code.
#
simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])"""
decimal_escape = r"""(\d+)"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])"""
escape_sequence = \
r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))'
cconst_char = r"""([^'\\\n]|"""+escape_sequence+')'
char_const = "'"+cconst_char+"'"
unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)"
bad_char_const = \
r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+ \
bad_escape+r"""[^'\n]*')"""
# string literals (K&R2: A.2.6)
string_char = r"""([^"\\\n]|"""+escape_sequence+')'
string_literal = '"'+string_char+'*"'
bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"'
# floating constants (K&R2: A.2.5.3)
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
floating_constant = \
'(((('+fractional_constant+')'+ \
exponent_part+'?)|([0-9]+'+exponent_part+')))'
# Ordinals
ordinal = r'@[0-9]+'
missing_ordinal_value = r'@'
# Don't allow ordinal values in octal (even invalid octal, like 09) or
# hexadecimal.
octal_or_hex_ordinal_disallowed = r'@((0[0-9]+)|('+hex_prefix+hex_digits+'))'
##
## Rules for the normal state
##
t_ignore = ' \t\r'
# Newlines
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
# Operators
t_MINUS = r'-'
t_PLUS = r'\+'
t_AMP = r'&'
# =
t_EQUALS = r'='
# =>
t_RESPONSE = r'=>'
# Delimiters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_LANGLE = r'<'
t_RANGLE = r'>'
t_COMMA = r','
t_DOT = r'\.'
t_SEMI = r';'
t_STRING_LITERAL = string_literal
# The following floating and integer constants are defined as
# functions to impose a strict order (otherwise, decimal
# is placed before the others because its regex is longer,
# and this is bad)
#
@TOKEN(floating_constant)
def t_FLOAT_CONST(self, t):
return t
@TOKEN(hex_constant)
def t_INT_CONST_HEX(self, t):
return t
@TOKEN(octal_constant_disallowed)
def t_OCTAL_CONSTANT_DISALLOWED(self, t):
msg = "Octal values not allowed"
self._error(msg, t)
@TOKEN(decimal_constant)
def t_INT_CONST_DEC(self, t):
return t
# Must come before bad_char_const, to prevent it from
# catching valid char constants as invalid
#
@TOKEN(char_const)
def t_CHAR_CONST(self, t):
return t
@TOKEN(unmatched_quote)
def t_UNMATCHED_QUOTE(self, t):
msg = "Unmatched '"
self._error(msg, t)
@TOKEN(bad_char_const)
def t_BAD_CHAR_CONST(self, t):
msg = "Invalid char constant %s" % t.value
self._error(msg, t)
# unmatched string literals are caught by the preprocessor
@TOKEN(bad_string_literal)
def t_BAD_STRING_LITERAL(self, t):
msg = "String contains invalid escape code"
self._error(msg, t)
# Handle ordinal-related tokens in the right order:
@TOKEN(octal_or_hex_ordinal_disallowed)
def t_OCTAL_OR_HEX_ORDINAL_DISALLOWED(self, t):
msg = "Octal and hexadecimal ordinal values not allowed"
self._error(msg, t)
@TOKEN(ordinal)
def t_ORDINAL(self, t):
return t
@TOKEN(missing_ordinal_value)
def t_BAD_ORDINAL(self, t):
msg = "Missing ordinal value"
self._error(msg, t)
@TOKEN(identifier)
def t_NAME(self, t):
t.type = self.keyword_map.get(t.value, "NAME")
return t
# Ignore C and C++ style comments
def t_COMMENT(self, t):
r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
msg = "Illegal character %s" % repr(t.value[0])
self._error(msg, t)
| boundarydevices/android_external_chromium_org | mojo/public/tools/bindings/pylib/mojom/parse/lexer.py | Python | bsd-3-clause | 6,890 |
"""Query the switch for configured queues on a port."""
# System imports
# Third-party imports
# Local source tree imports
from pyof.foundation.base import GenericMessage
from pyof.foundation.basic_types import Pad, UBInt32
from pyof.v0x04.common.header import Header, Type
from pyof.v0x04.common.port import PortNo
__all__ = ('QueueGetConfigRequest',)
class QueueGetConfigRequest(GenericMessage):
"""Query structure for configured queues on a port."""
#: Openflow :class:`~pyof.v0x04.common.header.Header`.
header = Header(message_type=Type.OFPT_GET_CONFIG_REQUEST)
#: Port to be queried. Should refer to a valid physical port
#: (i.e. < OFPP_MAX), or OFPP_ANY to request all configured queues.
port = UBInt32(enum_ref=PortNo)
pad = Pad(4)
def __init__(self, xid=None, port=None):
"""Create a QueueGetConfigRequest with the optional parameters below.
Args:
xid (int): xid of OpenFlow header
port (:class:`~.common.port.PortNo`): Target port for the query.
"""
super().__init__(xid)
self.port = port
| kytos/python-openflow | pyof/v0x04/controller2switch/queue_get_config_request.py | Python | mit | 1,105 |
# -*- coding: utf-8 -*
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import range
import errno
import os
import resource
import signal
import socket
import stat
import subprocess
import sys
import tempfile
import time
from importlib import import_module
from itertools import chain
from itertools import count
from itertools import filterfalse
from itertools import zip_longest
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def import_from(mod, obj, default=lambda: None):
"""
Returns the object from module mod if the given module exists and has such
object, else 'default' parameter.
The object is not assigned to the caller's namespace
"""
try:
return getattr(get_module(mod), obj)
except AttributeError:
return default()
def get_module(module, pkg=None, default=lambda: None):
"""
Returns the module if the given module exists, else 'default' parameter.
The module is not assigned to the caller's namespace
"""
try:
return import_module(module, pkg)
except ImportError:
return default()
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
# From python3 shutil
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and
not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to
# the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def sage_browser(sage_root):
"""
Set up default programs for opening web pages.
INPUT:
EXAMPLES::
sage: from sagenb.config import _sage_browser_fb
sage: _sage_browser_fb() # random -- depends on OS, etc.
'sage-open'
NOTE:
Extracted from sage.misc.viewer.default_viewer
"""
if os.uname()[0] == 'Darwin':
return os.path.join(sage_root, 'local', 'bin', 'sage-open')
if os.uname()[0][:6] == 'CYGWIN':
brs = (
os.environ.get('BROWSER', ''),
'/cygdrive/{}/system32/rundll32.exe '
'url.dll,FileProtocolHandler'.format(
os.environ['SYSTEMROOT'].replace(':', '/').replace('\\', '')))
return next(filter(None, brs))
brs = ('xdg-open', os.environ.get('BROWSER', ''), 'firefox',
'google-chrome', 'mozilla', 'mozilla-firefox', 'konqueror')
# : Does nothing in shell
browser = next(filter(None, map(which, brs)), ':')
return browser
def tmp_filename(name='tmp', ext='', **kwargs):
# TODO: rethink this
# based on sage.misc.temporary_file
handle, tmp = tempfile.mkstemp(prefix=name, suffix=ext, **kwargs)
os.close(handle)
return tmp
def tmp_dir(name='dir', ext='', **kwargs):
# TODO: rethink this
# based on sage.misc.temporary_file
tmp = tempfile.mkdtemp(prefix=name, suffix=ext, **kwargs)
return '{}{}'.format(tmp, os.sep)
def cputime(t=0):
# TODO: Not used
try:
t = float(t)
except TypeError:
t = 0.0
u, s = resource.getrusage(resource.RUSAGE_SELF)[:2]
return u + s - t
def walltime(t=0):
return time.time() - t
def set_restrictive_permissions(filename, allow_execute=False):
x = stat.S_IRWXU
if allow_execute:
x = x | stat.S_IXGRP | stat.S_IXOTH
os.chmod(filename, x)
def set_permissive_permissions(filename):
# TODO: Not used
os.chmod(filename, stat.S_IRWXO | stat.S_IRWXU | stat.S_IRWXG)
def ignore_nonexistent_files(curdir, dirlist):
"""
Returns a list of non-existent files, given a directory and its
contents. The returned list includes broken symbolic links. Use
this, e.g., with :func:`shutil.copytree`, as shown below.
INPUT:
- ``curdir`` - a string; the name of the current directory
- ``dirlist`` - a list of strings; names of ``curdir``'s contents
OUTPUT:
- a list of strings; names of ``curdir``'s non-existent files
EXAMPLES::
sage: import os, shutil
sage: from sagenb.misc.misc import ignore_nonexistent_files
sage: opj = os.path.join; ope = os.path.exists; t = tmp_dir()
sage: s = opj(t, 'src'); t = opj(t, 'trg'); hi = opj(s, 'hi.txt');
sage: os.makedirs(s)
sage: f = open(hi, 'w'); f.write('hi'); f.close()
sage: os.symlink(hi, opj(s, 'good.txt'))
sage: os.symlink(opj(s, 'bad'), opj(s, 'bad.txt'))
sage: slist = sorted(os.listdir(s)); slist
['bad.txt', 'good.txt', 'hi.txt']
sage: map(lambda x: ope(opj(s, x)), slist)
[False, True, True]
sage: map(lambda x: os.path.islink(opj(s, x)), slist)
[True, True, False]
sage: shutil.copytree(s, t)
Traceback (most recent call last):
...
Error: [('.../src/bad.txt',
'.../trg/bad.txt',
"[Errno 2] No such file or directory: '.../src/bad.txt'")]
sage: shutil.rmtree(t); ope(t)
False
sage: shutil.copytree(s, t, ignore = ignore_nonexistent_files)
sage: tlist = sorted(os.listdir(t)); tlist
['good.txt', 'hi.txt']
sage: map(lambda x: ope(opj(t, x)), tlist)
[True, True]
sage: map(lambda x: os.path.islink(opj(t, x)), tlist) # Note!
[False, False]
"""
ignore = []
for x in dirlist:
if not os.path.exists(os.path.join(curdir, x)):
ignore.append(x)
return ignore
def word_wrap(s, ncols=85):
t = []
if ncols == 0:
return s
for x in s.split('\n'):
if len(x) == 0 or x.lstrip()[:5] == 'sage:':
t.append(x)
continue
while len(x) > ncols:
k = ncols
while k > 0 and x[k] != ' ':
k -= 1
if k == 0:
k = ncols
end = '\\'
else:
end = ''
t.append(x[:k] + end)
x = x[k:]
k = 0
while k < len(x) and x[k] == ' ':
k += 1
x = x[k:]
t.append(x)
return '\n'.join(t)
def N_(message):
return message
def nN_(message_singular, message_plural):
return [message_singular, message_plural]
def open_msg(address, port, secure=False, path=""):
"""
Print a message on the screen suggesting that the user open their
web browser to a certain URL.
INPUT:
- ``address`` -- a string; a computer address or name
- ``port`` -- an int; a port number
- ``secure`` -- a bool (default: False); whether to prefix the URL
with 'http' or 'https'
- ``path`` -- a string; the URL's path following the port.
EXAMPLES::
sage: from sagenb.misc.misc import open_msg
sage: print(open_msg('localhost', 8080, True))
┌──────────────────────────────────────────────────┐
│ │
│ Open your web browser to https://localhost:8080 │
│ │
└──────────────────────────────────────────────────┘
sage: print(open_msg('sagemath.org', 8080, False))
┌────────────────────────────────────────────────────┐
│ │
│ Open your web browser to http://sagemath.org:8080 │
│ │
└────────────────────────────────────────────────────┘
sage: print(open_msg('sagemath.org', 90, False))
┌──────────────────────────────────────────────────┐
│ │
│ Open your web browser to http://sagemath.org:90 │
│ │
└──────────────────────────────────────────────────┘
sage: print(open_msg('sagemath.org', 80, False))
┌────────────────────────────────────────────────┐
│ │
│ Open your web browser to http://sagemath.org │
│ │
└────────────────────────────────────────────────┘
"""
port = '' if port == 80 else ':{}'.format(port)
s = "Open your web browser to http%s://%s%s%s" % (
's' if secure else '', address, port, path)
template = '{0}{4:{1}^{3}}{2}'
n = len(s) + 2
return '\n'.join((
template.format('┌', '─', '┐', n, ''),
template.format('│', ' ', '│', n, ''),
template.format('│', ' ', '│', n, s),
template.format('│', ' ', '│', n, ''),
template.format('└', '─', '┘', n, ''),
))
def find_next_available_port(interface, start, max_tries=100, verbose=False):
"""
Find the next available port at a given interface, that is, a port for
which a current connection attempt returns a 'Connection refused'
error message. If no port is found, raise a RuntimeError exception.
INPUT:
- ``interface`` - address to check
- ``start`` - an int; the starting port number for the scan
- ``max_tries`` - an int (default: 100); how many ports to scan
- ``verbose`` - a bool (default: True); whether to print information
about the scan
OUTPUT:
- an int - the port number
EXAMPLES::
sage: from sagenb.misc.misc import find_next_available_port
sage: find_next_available_port(
'127.0.0.1',
9000, verbose=False) # random output -- depends on network
9002
"""
def handler(signum, frame):
raise UserWarning('timed out')
alarm_count = 0
for port in range(start, start + max_tries + 1):
signal.signal(signal.SIGALRM, handler)
signal.alarm(5)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((interface, port))
except socket.error as err:
if err.errno == errno.ECONNREFUSED:
if verbose:
print("Using port = %s" % port)
return port
except UserWarning:
if verbose:
print("Port %s timed out." % port)
print("Trying next port...")
continue
except KeyboardInterrupt:
if verbose:
print("alarm")
alarm_count += 1
if alarm_count >= 10:
break
finally:
signal.signal(signal.SIGALRM, signal.SIG_DFL)
signal.alarm(0)
if verbose:
print("Port %s is already in use." % port)
print("Trying next port...")
raise RuntimeError("no available port.")
def open_page(browser, address, port, secure, path=""):
rsrc = 'https' if secure else 'http'
os.system('%s %s://%s:%s%s 1>&2 > /dev/null &' %
(browser, rsrc, address, port, path))
def system_command(cmd, msg=None):
msg = cmd if msg is None else '\n'.join((msg, cmd))
print(msg)
subprocess.call([cmd], shell=True)
def cached_property(writable=False, invalidate=()):
def invalidate_cache(self):
for attr in invalidate:
delattr(self, attr)
def wrapper(function):
attr_name = '___{}___'.format(function.__name__)
def get_cached_property(self):
try:
output = getattr(self, attr_name)
except AttributeError:
invalidate_cache(self)
output = function(self)
setattr(self, attr_name, output)
return output
def del_cached_property(self):
invalidate_cache(self)
try:
delattr(self, attr_name)
except AttributeError:
pass
if writable:
def set_cached_property(self, value):
invalidate_cache(self)
setattr(self, attr_name, value)
else:
set_cached_property = None
return property(fget=get_cached_property, fset=set_cached_property,
fdel=del_cached_property)
return wrapper
def id_generator(exclude=None, offset=0):
if exclude:
m = max(exclude)
return chain(sorted(set(range(m)) - set(exclude)),
count(m + 1 + offset))
else:
return count(offset)
def make_path_relative(dir):
r"""
Replace an absolute path with a relative path, if possible.
Otherwise, return the given path.
INPUT:
- ``dir`` - a string containing, e.g., a directory name
OUTPUT:
- a string
"""
base, file = os.path.split(dir)
if os.path.exists(file):
return file
return dir
def sort_worksheet_list(v, sort, reverse):
"""
Sort a given list on a given key, in a given order.
INPUT:
- ``sort`` - a string; 'last_edited', 'owner', 'rating', or 'name'
- ``reverse`` - a bool; if True, reverse the order of the sort.
OUTPUT:
- the sorted list
"""
def key_last_edited(a):
return -a.last_edited
if sort == 'last_edited':
v.sort(key=key_last_edited, reverse=reverse)
elif sort in ['name', 'owner']:
v.sort(key=lambda a: (getattr(a, sort).lower(), key_last_edited(a)),
reverse=reverse)
elif sort == 'rating':
v.sort(key=lambda a: (getattr(a, sort), key_last_edited(a)),
reverse=reverse)
else:
raise ValueError('Invalid sort key {!r}'.format(sort))
def set_default(val, default):
return default if val is None else val
def abspath(*paths):
"""
None is a path with abspath None and not filesystem counterpart
"""
return None if paths == (None,) else os.path.abspath(os.path.join(*paths))
def testpaths(*paths):
"""
None is a path with abspath None and not filesystem counterpart
"""
return all(map(lambda p: p is None or os.path.exists(p), paths))
def makedirs(*paths):
"""
None is a path with abspath None and not filesystem counterpart
"""
for path in filterfalse(testpaths, paths):
os.makedirs(path)
def securepath(path):
"""
None is a path with abspath None and not filesystem counterpart
"""
return None if path is None else path.replace(os.path.sep, '_')
| migeruhito/sagenb | sagewui/util/__init__.py | Python | gpl-3.0 | 17,402 |
# Generated by Django 2.0.3 on 2018-07-19 10:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('order', '0048_auto_20180629_1055'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ('-pk',), 'permissions': (('manage_orders', 'Manage orders.'),)},
),
]
| UITools/saleor | saleor/order/migrations/0049_auto_20180719_0520.py | Python | bsd-3-clause | 397 |
#!/usr/bin/python
import ConfigParser
import os
from crontab import CronTab
from decimal import *
#This first section is about interpteting the configuration file This seciont includes details on how to confugre the variables that are in the first section and convert them to be used by the tests.
getcontext().prec = 8
#This section is used for the variables
config = ConfigParser.ConfigParser()
config.readfp(open('/etc/curl/curl.conf'))
url_number = config.get('Section 1', 'url_number')
url_number = int(url_number)
tests_per_min = int(60/int(config.get('Section 1', 'seconds_per_test')))
time_per_tests = Decimal(60)/ Decimal(tests_per_min * url_number)
user_agent_string = config.get('Section 1', 'user_agent_string')
#wait_time = int(config.get('Section 1','seconds_per_test'))
test_script = open(config.get('Section 2', 'big_curl_script_location'), 'w')
curl_file = config.get('Section 2', 'curl_script_location')
csv_file = config.get('Section 2', 'curl_csv_output')
curl_script_frequency = int(config.get('Section 3', 'curl_script_frequency'))
db_script_frequency = int(config.get('Section 4', 'local_db_update'))
cron_user = str(config.get('Section 3', 'cron_user'))
remote_db_script_frequency = int(config.get('Section 5', 'remote_db_update'))
print time_per_tests
exit
#This is to write the test file
test_script.write('#!/bin/bash\n') # the first line required to call bash
for y in range (tests_per_min,0,-1):
for x in range (url_number,0, -1):
x = str(x)
try:
test_script.write( 'nohup ' +' '+ curl_file +' '+ config.get('Section 1', 'url' + (x)) +' '+ user_agent_string +' '+ csv_file +' &'+ '\n')
except ConfigParser.NoOptionError:
print "missing url" + (x)
break
wait_time2= str(time_per_tests)
test_script.write( 'sleep ' + wait_time2 + '\n')
test_script.close()
os.chmod(config.get('Section 2', 'big_curl_script_location'), 0777)
print test_script
#time to create cron jobs
#running the tests
tab = CronTab(user= cron_user)
cmd_curl = config.get('Section 2', 'big_curl_script_location')
tab.remove_all(comment='CurlScript')
cron_job_curl = tab.new(cmd_curl, comment='CurlScript')
cron_job_curl.minute.every(curl_script_frequency)
tab.write()
#2
tab = CronTab(user= cron_user)
cmd_curl1 = config.get('Section 4', 'local_db_file_copy')
cmd_curl2 = config.get('Section 4', 'local_db_upload')
tab.remove_all(comment='db_update')
cron_job_curl = tab.new("/usr/bin/python " + cmd_curl1 + " && " + cmd_curl2 + " > /tmp/update.log", comment='db_update')
cron_job_curl.minute.every(db_script_frequency)
tab.write()
#3
tab = CronTab(user= cron_user)
cmd_curl = config.get('Section 5', 'remote_upload_file_location')
tab.remove_all(comment='remote_db_update')
cron_job_curl = tab.new("/usr/bin/python " + cmd_curl + " > /tmp/remote.log 2>&1", comment='remote_db_update')
cron_job_curl.minute.every(remote_db_script_frequency)
tab.write()
tab.write()
print tab.render ()
exit
| manhof/test_isp_curl | curltest12.py | Python | mit | 2,918 |
import os
from unittest import TestCase
from cookiejar.client import CookiejarClient
class ClientTests(TestCase):
maxDiff = None
def test_pagination(self):
index = os.path.join((os.path.dirname(os.path.abspath(__file__))), 'index.1.json')
client = CookiejarClient(index=index)
expected = [
{
u'id': 1,
u'name': u'audreyr/pypackage',
u'url': u'https://github.com/audreyr/cookiecutter-pypackage/archive/fe165c5242cc889db0c58476abde905cecf14dfa.zip',
u'version': u'0.0.1',
u'author': u'Audrey Roy',
u'description': u'Cookiecutter template for a Python package.',
u'checksum': "md5$a79cc0ef3897d14eeb3b5be6a37a5ff8",
u'user': u'audreyr',
},
{
u'id': 2,
u'name': u'sloria/flask',
u'url': u'https://github.com/sloria/cookiecutter-flask/archive/97e835461d31c00e9f16ac79ef3af9aeb13ae84a.zip',
u'version': u'0.0.1',
u'author': u'Steven Loria',
u'description': u'A flask template with Twitter Bootstrap 3, starter templates, and basic registration/authentication.',
u'checksum': "md5$72aa94d5768756231c66d8ce03ca51cc",
u'user': u'sloria',
},
{
u'id': 3,
u'name': u'pydanny/django',
u'url': u'https://github.com/pydanny/cookiecutter-django/archive/172036f8f34b82c29bdc0bb3f31f5b703d0ce8f8.zip',
u'version': u'0.0.1',
u'author': u'Daniel Greenfeld',
u'description': u'A cookiecutter template for creating reusable Django projects quickly.',
u'checksum': "md5$874ce3c00faabde6a11fb3c9d3909649",
u'user': u'pydanny',
}
]
results = client.filter()
res = list(results)
self.assertEqual(len(res), len(expected))
self.assertEqual(res, expected)
def test_get(self):
index = os.path.join((os.path.dirname(os.path.abspath(__file__))), 'index.1.json')
client = CookiejarClient(index=index)
expected = {
u'id': 2,
u'name': u'sloria/flask',
u'url': u'https://github.com/sloria/cookiecutter-flask/archive/97e835461d31c00e9f16ac79ef3af9aeb13ae84a.zip',
u'version': u'0.0.1',
u'author': u'Steven Loria',
u'description': u'A flask template with Twitter Bootstrap 3, starter templates, and basic registration/authentication.',
u'checksum': "md5$72aa94d5768756231c66d8ce03ca51cc",
u'user': u'sloria',
}
client.fetch()
result = client.get('sloria/flask')
self.assertEqual(result, expected)
self.assertRaises(RuntimeError, client.get, 'unexisting_tmeplate')
| fcurella/cookiejar | tests/test_client.py | Python | mit | 2,942 |
#! /usr/bin/python
# Copyright (c) 2015, Matthew P. Grosvenor
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the project, the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import math
import numpy
import os
if len(sys.argv) < 2:
print "Usage ./process_mem <directory>"
sys.exit(-1)
dirs=sys.argv[1:]
i = 0
ts_start = 0
found_total = 0
dropped_total = 0
ts_prev = 0
lts_total = 0
lts_count = 0
sum_square_error = 0
sum_square = 0
for directory in dirs:
ts_start = 0
found_total = 0
dropped_total = 0
ts_prev = 0
lts_total = 0
lts_count = 0
print >> sys.stderr, "Working on %s" % directory
#for f in os.listdir(directory):
values = []
file_out = open(directory + ".set.processed2", "w" )
for i in range(1,26):
try:
fname = directory + "/" + directory + ".%02d" % i
#print >> sys.stderr, "Opening on %s" % fname
data_file = open(fname)
except:
print >> sys.stderr, "Can't open %s" % fname
continue
print >> sys.stderr, "Working on %s" % fname
for line in data_file:
if dropped_total and (dropped_total % 1000000 == 0):
print "Dropped %ik so far..." % (dropped_total / 1000)
try:
cols = line.split(" ")
#TOTAL 0 1390395616147117616 910
ts = int(cols[2])
lt = int(cols[3])
ty = cols[0]
if ty != "SET":
dropped_total += 1
continue
#if lt < 150000:
values.append(lt)
except:
dropped_total += 1
continue
if i and (i % 1000000 == 0):
print >> sys.stderr, "Processed %ik so far..." % (i / 1000)
#mean_square_error = sum_square_error / found_total
#mean_square = sum_square / found_total
#rmse = math.sqrt(mean_square_error)
#rms = math.sqrt(mean_square)
#avg = numpy.mean(values)
#stdev = numpy.std(values)
#perc99 = numpy.percentile(values, 99)
#print "avg: %10.2f 99th: %10.2f stdev: %10.2f RMSE=%10.2fus RMS=%10.2fus SSE=%i MSE=%i found=%i dropped=%i (%s)" \
# % (avg, perc99, stdev, rmse, rms, sum_square_error, mean_square_error, found_total, dropped_total, file)
#sys.stdout.flush()
if not ts_start:
ts_start = ts
#print ts, ts - ts_start
if (ts - ts_start)/ 1000.0 / 1000.0 / 1000.0 > ts_prev + 0.25:
#print "%f %f" % ( ts_prev, lts_total * 1.0 / lts_count )
file_out.write("%f %f\n" % ( ts_prev, lts_total * 1.0 / lts_count ) )
ts_prev = (ts - ts_start)/ 1000.0 / 1000.0 / 1000.0
lts_total = 0
lts_count = 0
sum_square_error += (lt - 443 ) * (lt- 443)
sum_square += (lt ) * (lt)
found_total += 1
i += 1
lts_count += 1
lts_total += lt
file_out.write("-----------------------------------\n")
count = len(values)
if(count == 0):
continue
stddev = numpy.std(values)
mean = numpy.mean(values)
p0 = numpy.percentile(values,0)
p50 = numpy.percentile(values,50)
p99 = numpy.percentile(values,99)
p999 = numpy.percentile(values,99.9)
p9999 = numpy.percentile(values,99.99)
p100 = numpy.percentile(values,100)
mean_square_error = sum_square_error / found_total
mean_square = sum_square / found_total
rmse = math.sqrt(mean_square_error)
rms = math.sqrt(mean_square)
file_out.write( "MEMD, %s, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n" % (directory, count, p0, mean, stddev, p50, p99, p999, p9999, p100, rms, rmse) )
print "MEMD, %s, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f" % (directory, count, p0, mean, stddev, p50, p99, p999, p9999, p100, rms, rmse)
| camsas/qjump-nsdi15-plotting | figure1a_5/process_mem.py | Python | bsd-3-clause | 5,511 |
import os.path
import pickle
import gzip as libraryGzip
from OSMPythonTools.cachingStrategy.base import CachingStrategyBase
class Pickle(CachingStrategyBase):
def __init__(self, cacheFile='cache', gzip=True):
self._cacheFile = cacheFile + '.pickle' + ('.gzip' if gzip else '')
self._open = libraryGzip.open if gzip else open
self.close()
def get(self, key):
if self._cache is None:
self.open()
return self._cache[key] if key in self._cache else None
def set(self, key, value):
if self._cache is None:
self.open()
with self._open(self._cacheFile, 'ab') as file:
pickle.dump((key, value), file)
self._cache[key] = value
def open(self):
if os.path.exists(self._cacheFile):
with self._open(self._cacheFile, 'rb') as file:
self._cache = {}
try:
while True:
k, v = pickle.load(file)
self._cache[k] = v
except EOFError:
pass
else:
self._cache = {}
def close(self):
self._cache = None
| mocnik-science/osm-python-tools | OSMPythonTools/cachingStrategy/pickle.py | Python | gpl-3.0 | 1,187 |
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2021 Rusty Bird <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#
import unittest
import qubes.tests
class TC_00_SelfTest(qubes.tests.QubesTestCase):
@qubes.tests.never_awaited.ignore()
def test_000_ignore_never_awaited(self):
intentionally_never_awaited()
@unittest.expectedFailure
def test_001_raise_never_awaited_by_default(self):
intentionally_never_awaited()
async def intentionally_never_awaited():
pass
| QubesOS/qubes-core-admin | qubes/tests/selftest.py | Python | lgpl-2.1 | 1,154 |
from __future__ import unicode_literals, print_function
import sqlite3, os, random
_select_random = 'select {0} from {1} limit 1 offset abs(random()) % (select count({0}) from {1});'
_select_uncommon = 'select value from uncommons where key=?;'
def generate_name():
conn = sqlite3.connect(os.path.join(os.path.dirname(__file__), 'names.db'))
cursor = conn.cursor()
adj = cursor.execute(_select_random.format('adjective', 'adjectives')).fetchone()[0]
anim = cursor.execute(_select_random.format('animal', 'animals')).fetchone()[0]
rare = cursor.execute(_select_random.format('name', 'rares')).fetchone()[0]
uncommon_anim = cursor.execute(_select_uncommon, [adj]).fetchone()
uncommon_adj = cursor.execute(_select_uncommon, [anim]).fetchone()
conn.close()
r = random.random()
if r < 0.001 or r >= 0.999:
return rare
elif r < 0.3 and uncommon_anim is not None:
return ' '.join((adj, uncommon_anim[0]))
elif r >= 0.7 and uncommon_adj is not None:
return ' '.join((uncommon_adj[0], anim))
return ' '.join((adj, anim))
if __name__ == '__main__':
print(generate_name())
| rotated8/mgsv_names | mgsv_names.py | Python | unlicense | 1,149 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-04-12 20:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crowdcop_web', '0014_auto_20160412_1607'),
]
operations = [
migrations.AlterField(
model_name='tip',
name='suspect_eye_color',
field=models.CharField(choices=[('UNKNOWN', 'Unknown'), ('AMBER', 'Amber'), ('BLACK', 'Black'), ('BLUE', 'Blue'), ('BROWN', 'Brown'), ('GRAY', 'Gray'), ('GREEN', 'Green'), ('HAZEL', 'Hazel'), ('OTHER', 'Other')], max_length=20),
),
]
| bocaaust/CrowdCop | CrowdCop_test/crowdcop/crowdcop_web/migrations/0015_auto_20160412_1610.py | Python | apache-2.0 | 653 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# drewtils documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 5 01:45:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage']
autodoc_default_options = {"members": True}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'drewtils'
copyright = '2017-2020, Andrew Johnson'
author = 'Andrew Johnson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'drewtilsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'drewtils.tex', 'drewtils Documentation',
'Andrew Johnson', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'drewtils', 'drewtils Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'drewtils', 'drewtils Documentation',
author, 'drewtils', 'One line description of project.',
'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
}
| drewejohnson/drewtils | docs/conf.py | Python | mit | 5,093 |
"""resultAnalysis URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url, include
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/',include('resultAnalyser.api.urls',namespace='api-analysis')),
url(r'^newapi/',include('result.api.urls',namespace='newapi-analysis')),
url(r'^analysis/',include('resultAnalyser.urls',namespace='analysis')),
url(r'^account/',include('account.urls',namespace='account')),
url(r'^',include('result.urls',namespace='result')),
url(r'^xlsx/',include('xlsx.urls',namespace='xlsx')),
]
if settings.DEBUG is True:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| rpsingh21/resultanalysis | resultAnalysis/resultAnalysis/urls.py | Python | mit | 1,462 |
import os
import json
import datetime
import logging
from unittest import TestCase
from supervisor_remote_logging import SyslogFormatter, DockerJsonFormatter, SyslogDockerJsonFormatter
class SupervisorLoggingDateFormatTestCase(TestCase):
def test_default_date_format(self):
"""
Test default date format.
"""
date = datetime.datetime(2000, 1, 1, 1, 0, 0)
date_format = SyslogFormatter().date_format()
self.assertEqual(date.strftime(date_format), '2000-01-01T01:00:00')
def test_custom_date_format(self):
"""
Test custom date format.
"""
date = datetime.datetime(2000, 1, 1, 1, 0, 0)
os.environ['DATE_FORMAT'] = '%b %d %H:%M:%S'
date_format = SyslogFormatter().date_format()
self.assertEqual(date.strftime(date_format), 'Jan 01 01:00:00')
os.environ['DATE_FORMAT'] = SyslogFormatter.DEFAULT_DATE_FORMAT
class DockerJsonFormatterTestCase(TestCase):
def test_json_format(self):
record = logging.LogRecord(
name='foo',
level=logging.INFO,
pathname=None,
lineno=0,
msg="Test message",
args=(),
exc_info=None,
)
formatted = DockerJsonFormatter().format(record)
deserialized = json.loads(formatted)
self.assertEqual(deserialized['hostname'], DockerJsonFormatter.HOSTNAME)
self.assertEqual(deserialized['log'], record.msg)
self.assertTrue(deserialized['time'] != None)
class SyslogDockerJsonFormatterTestCase(TestCase):
def test_format(self):
record = logging.LogRecord(
name='foo',
level=logging.INFO,
pathname=None,
lineno=0,
msg="Test message",
args=(),
exc_info=None,
)
message = SyslogDockerJsonFormatter().format(record)
syslog_message, json_message = message.split(']: ')
syslog_parts = message.split(' ')
json_data = json.loads(json_message)
self.assertEqual(syslog_parts[0], json_data['time'])
self.assertEqual(syslog_parts[1], json_data['hostname'])
| newrelic/supervisor-remote-logging | tests/test_formatters.py | Python | bsd-2-clause | 2,184 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.hvac_templates import HvactemplateZoneFanCoil
log = logging.getLogger(__name__)
class TestHvactemplateZoneFanCoil(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_hvactemplatezonefancoil(self):
pyidf.validation_level = ValidationLevel.error
obj = HvactemplateZoneFanCoil()
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# object-list
var_template_thermostat_name = "object-list|Template Thermostat Name"
obj.template_thermostat_name = var_template_thermostat_name
# real
var_supply_air_maximum_flow_rate = 3.3
obj.supply_air_maximum_flow_rate = var_supply_air_maximum_flow_rate
# real
var_zone_heating_sizing_factor = 0.0
obj.zone_heating_sizing_factor = var_zone_heating_sizing_factor
# real
var_zone_cooling_sizing_factor = 0.0
obj.zone_cooling_sizing_factor = var_zone_cooling_sizing_factor
# alpha
var_outdoor_air_method = "Flow/Person"
obj.outdoor_air_method = var_outdoor_air_method
# real
var_outdoor_air_flow_rate_per_person = 7.7
obj.outdoor_air_flow_rate_per_person = var_outdoor_air_flow_rate_per_person
# real
var_outdoor_air_flow_rate_per_zone_floor_area = 8.8
obj.outdoor_air_flow_rate_per_zone_floor_area = var_outdoor_air_flow_rate_per_zone_floor_area
# real
var_outdoor_air_flow_rate_per_zone = 9.9
obj.outdoor_air_flow_rate_per_zone = var_outdoor_air_flow_rate_per_zone
# object-list
var_system_availability_schedule_name = "object-list|System Availability Schedule Name"
obj.system_availability_schedule_name = var_system_availability_schedule_name
# real
var_supply_fan_total_efficiency = 0.50005
obj.supply_fan_total_efficiency = var_supply_fan_total_efficiency
# real
var_supply_fan_delta_pressure = 0.0
obj.supply_fan_delta_pressure = var_supply_fan_delta_pressure
# real
var_supply_fan_motor_efficiency = 0.50005
obj.supply_fan_motor_efficiency = var_supply_fan_motor_efficiency
# real
var_supply_fan_motor_in_air_stream_fraction = 0.5
obj.supply_fan_motor_in_air_stream_fraction = var_supply_fan_motor_in_air_stream_fraction
# alpha
var_cooling_coil_type = "ChilledWater"
obj.cooling_coil_type = var_cooling_coil_type
# object-list
var_cooling_coil_availability_schedule_name = "object-list|Cooling Coil Availability Schedule Name"
obj.cooling_coil_availability_schedule_name = var_cooling_coil_availability_schedule_name
# real
var_cooling_coil_design_setpoint = 17.17
obj.cooling_coil_design_setpoint = var_cooling_coil_design_setpoint
# alpha
var_heating_coil_type = "HotWater"
obj.heating_coil_type = var_heating_coil_type
# object-list
var_heating_coil_availability_schedule_name = "object-list|Heating Coil Availability Schedule Name"
obj.heating_coil_availability_schedule_name = var_heating_coil_availability_schedule_name
# real
var_heating_coil_design_setpoint = 20.2
obj.heating_coil_design_setpoint = var_heating_coil_design_setpoint
# object-list
var_dedicated_outdoor_air_system_name = "object-list|Dedicated Outdoor Air System Name"
obj.dedicated_outdoor_air_system_name = var_dedicated_outdoor_air_system_name
# alpha
var_zone_cooling_design_supply_air_temperature_input_method = "SupplyAirTemperature"
obj.zone_cooling_design_supply_air_temperature_input_method = var_zone_cooling_design_supply_air_temperature_input_method
# real
var_zone_cooling_design_supply_air_temperature_difference = 23.23
obj.zone_cooling_design_supply_air_temperature_difference = var_zone_cooling_design_supply_air_temperature_difference
# alpha
var_zone_heating_design_supply_air_temperature_input_method = "SupplyAirTemperature"
obj.zone_heating_design_supply_air_temperature_input_method = var_zone_heating_design_supply_air_temperature_input_method
# real
var_zone_heating_design_supply_air_temperature_difference = 25.25
obj.zone_heating_design_supply_air_temperature_difference = var_zone_heating_design_supply_air_temperature_difference
# object-list
var_design_specification_outdoor_air_object_name = "object-list|Design Specification Outdoor Air Object Name"
obj.design_specification_outdoor_air_object_name = var_design_specification_outdoor_air_object_name
# object-list
var_design_specification_zone_air_distribution_object_name = "object-list|Design Specification Zone Air Distribution Object Name"
obj.design_specification_zone_air_distribution_object_name = var_design_specification_zone_air_distribution_object_name
# alpha
var_capacity_control_method = "ConstantFanVariableFlow"
obj.capacity_control_method = var_capacity_control_method
# real
var_low_speed_supply_air_flow_ratio = 0.0001
obj.low_speed_supply_air_flow_ratio = var_low_speed_supply_air_flow_ratio
# real
var_medium_speed_supply_air_flow_ratio = 0.0001
obj.medium_speed_supply_air_flow_ratio = var_medium_speed_supply_air_flow_ratio
# object-list
var_outdoor_air_schedule_name = "object-list|Outdoor Air Schedule Name"
obj.outdoor_air_schedule_name = var_outdoor_air_schedule_name
# alpha
var_baseboard_heating_type = "HotWater"
obj.baseboard_heating_type = var_baseboard_heating_type
# object-list
var_baseboard_heating_availability_schedule_name = "object-list|Baseboard Heating Availability Schedule Name"
obj.baseboard_heating_availability_schedule_name = var_baseboard_heating_availability_schedule_name
# real
var_baseboard_heating_capacity = 34.34
obj.baseboard_heating_capacity = var_baseboard_heating_capacity
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.hvactemplatezonefancoils[0].zone_name, var_zone_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].template_thermostat_name, var_template_thermostat_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_air_maximum_flow_rate, var_supply_air_maximum_flow_rate)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_heating_sizing_factor, var_zone_heating_sizing_factor)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_cooling_sizing_factor, var_zone_cooling_sizing_factor)
self.assertEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_method, var_outdoor_air_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_flow_rate_per_person, var_outdoor_air_flow_rate_per_person)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_flow_rate_per_zone_floor_area, var_outdoor_air_flow_rate_per_zone_floor_area)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_flow_rate_per_zone, var_outdoor_air_flow_rate_per_zone)
self.assertEqual(idf2.hvactemplatezonefancoils[0].system_availability_schedule_name, var_system_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_total_efficiency, var_supply_fan_total_efficiency)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_delta_pressure, var_supply_fan_delta_pressure)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_motor_efficiency, var_supply_fan_motor_efficiency)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].supply_fan_motor_in_air_stream_fraction, var_supply_fan_motor_in_air_stream_fraction)
self.assertEqual(idf2.hvactemplatezonefancoils[0].cooling_coil_type, var_cooling_coil_type)
self.assertEqual(idf2.hvactemplatezonefancoils[0].cooling_coil_availability_schedule_name, var_cooling_coil_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].cooling_coil_design_setpoint, var_cooling_coil_design_setpoint)
self.assertEqual(idf2.hvactemplatezonefancoils[0].heating_coil_type, var_heating_coil_type)
self.assertEqual(idf2.hvactemplatezonefancoils[0].heating_coil_availability_schedule_name, var_heating_coil_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].heating_coil_design_setpoint, var_heating_coil_design_setpoint)
self.assertEqual(idf2.hvactemplatezonefancoils[0].dedicated_outdoor_air_system_name, var_dedicated_outdoor_air_system_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].zone_cooling_design_supply_air_temperature_input_method, var_zone_cooling_design_supply_air_temperature_input_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_cooling_design_supply_air_temperature_difference, var_zone_cooling_design_supply_air_temperature_difference)
self.assertEqual(idf2.hvactemplatezonefancoils[0].zone_heating_design_supply_air_temperature_input_method, var_zone_heating_design_supply_air_temperature_input_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].zone_heating_design_supply_air_temperature_difference, var_zone_heating_design_supply_air_temperature_difference)
self.assertEqual(idf2.hvactemplatezonefancoils[0].design_specification_outdoor_air_object_name, var_design_specification_outdoor_air_object_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].design_specification_zone_air_distribution_object_name, var_design_specification_zone_air_distribution_object_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].capacity_control_method, var_capacity_control_method)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].low_speed_supply_air_flow_ratio, var_low_speed_supply_air_flow_ratio)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].medium_speed_supply_air_flow_ratio, var_medium_speed_supply_air_flow_ratio)
self.assertEqual(idf2.hvactemplatezonefancoils[0].outdoor_air_schedule_name, var_outdoor_air_schedule_name)
self.assertEqual(idf2.hvactemplatezonefancoils[0].baseboard_heating_type, var_baseboard_heating_type)
self.assertEqual(idf2.hvactemplatezonefancoils[0].baseboard_heating_availability_schedule_name, var_baseboard_heating_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezonefancoils[0].baseboard_heating_capacity, var_baseboard_heating_capacity) | rbuffat/pyidf | tests/test_hvactemplatezonefancoil.py | Python | apache-2.0 | 11,132 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class FontBitstreamSpeedo(Package):
"""X.org bitstream-speedo font."""
homepage = "http://cgit.freedesktop.org/xorg/font/bitstream-speedo"
url = "https://www.x.org/archive/individual/font/font-bitstream-speedo-1.0.2.tar.gz"
version('1.0.2', 'f0a777b351cf5adefefcf4823e0c1c01')
depends_on('font-util')
depends_on('fontconfig', type='build')
depends_on('mkfontdir', type='build')
depends_on('mkfontscale', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix))
make('install')
# `make install` copies the files to the font-util installation.
# Create a fake directory to convince Spack that we actually
# installed something.
mkdir(prefix.lib)
| krafczyk/spack | var/spack/repos/builtin/packages/font-bitstream-speedo/package.py | Python | lgpl-2.1 | 2,114 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
import os
from os.path import join as opj
from twisted.trial import unittest
from twisted.python import util
class CorrectComments(unittest.TestCase):
def testNoSlashSlashComments(self):
urlarg = util.sibpath(__file__, opj(os.pardir, 'protocols', '_c_urlarg.c'))
contents = file(urlarg).read()
self.assertEquals(contents.find('//'), -1)
| jxta/cc | vendor/Twisted-10.0.0/twisted/test/test_extensions.py | Python | apache-2.0 | 455 |
# imports - module imports
from frappe.model.document import Document
from frappe import _
import frappe
# imports - frappe module imports
from frappe.core.doctype.version.version import get_diff
from frappe.chat.doctype.chat_room import chat_room
from frappe.chat.util import (
safe_json_loads,
filter_dict,
dictify
)
session = frappe.session
class ChatProfile(Document):
def before_save(self):
if not self.is_new():
self.get_doc_before_save()
def on_update(self):
if not self.is_new():
b, a = self.get_doc_before_save(), self
diff = dictify(get_diff(a, b))
if diff:
user = session.user
fields = [changed[0] for changed in diff.changed]
if 'status' in fields:
rooms = chat_room.get(user, filters = ['Chat Room', 'type', '=', 'Direct'])
update = dict(user = user, data = dict(status = self.status))
for room in rooms:
frappe.publish_realtime('frappe.chat.profile:update', update, room = room.name, after_commit = True)
if 'enable_chat' in fields:
update = dict(user = user, data = dict(enable_chat = bool(self.enable_chat)))
frappe.publish_realtime('frappe.chat.profile:update', update, user = user, after_commit = True)
def authenticate(user):
if user != session.user:
frappe.throw(_("Sorry, you're not authorized."))
@frappe.whitelist()
def get(user, fields = None):
duser = frappe.get_doc('User', user)
dprof = frappe.get_doc('Chat Profile', user)
# If you're adding something here, make sure the client recieves it.
profile = dict(
# User
name = duser.name,
email = duser.email,
first_name = duser.first_name,
last_name = duser.last_name,
username = duser.username,
avatar = duser.user_image,
bio = duser.bio,
# Chat Profile
status = dprof.status,
chat_background = dprof.chat_background,
message_preview = bool(dprof.message_preview),
notification_tones = bool(dprof.notification_tones),
conversation_tones = bool(dprof.conversation_tones),
enable_chat = bool(dprof.enable_chat)
)
profile = filter_dict(profile, fields)
return dictify(profile)
@frappe.whitelist()
def create(user, exists_ok = False, fields = None):
authenticate(user)
exists_ok, fields = safe_json_loads(exists_ok, fields)
result = frappe.db.sql("""
SELECT *
FROM `tabChat Profile`
WHERE user = "{user}"
""".format(user = user))
if result:
if not exists_ok:
frappe.throw(_('Chat Profile for User {user} exists.'.format(user = user)))
else:
dprof = frappe.new_doc('Chat Profile')
dprof.user = user
dprof.save(ignore_permissions = True)
profile = get(user, fields = fields)
return profile
@frappe.whitelist()
def update(user, data):
authenticate(user)
data = safe_json_loads(data)
dprof = frappe.get_doc('Chat Profile', user)
dprof.update(data)
dprof.save(ignore_permissions = True) | StrellaGroup/frappe | frappe/chat/doctype/chat_profile/chat_profile.py | Python | mit | 3,304 |
import re
from collections import namedtuple
__author__ = 'Lucas Kjaero'
# Used in the process_cedict_line function. Do not change. Out here to avoid recompilation each call.
cedict_definition_pattern = re.compile("/(.*)/")
cedict_pinyin_pattern = re.compile("\[(.*)\] /")
DictionaryEntry = namedtuple("DictionaryEntry", ['traditional', 'simplified', 'pinyin', 'meaning'])
def process_cedict_line(line):
"""Process a line in cedict.
Returns (traditional, simplified, pinyin, meaning)
Throws an AssertionError if a line doesn't contain a definition.
If moving, don't forget to move regular expression patterns too."""
assert len(line) is not 0
assert line[0] is not "#"
split_line = line.split(" ")
traditional_val, simplified_val = split_line[0], split_line[1]
pinyin_val = cedict_pinyin_pattern.findall(line)[0]
meaning_val = cedict_definition_pattern.findall(line)[0]
entry = DictionaryEntry(traditional=traditional_val, simplified=simplified_val, pinyin=pinyin_val,
meaning=meaning_val)
return traditional_val, entry
def read_dict(filename="cedict_ts.u8", line_parser=process_cedict_line):
"""Processes a dictionary file into a dictionary of entries. Assumes one line per entry.
Default definitions come from cedict
Uses the provided line_parser function to parse each individual dictionary.
Ignores any lines that raise an AssertionError.
The format is:
{
word: (number_of_entries, entry_1, entry_2, ...),
...
}"""
working_dictionary = dict()
with open(filename) as chinese_dictionary:
for line in chinese_dictionary:
try:
key, entry = line_parser(line)
try:
old_entry = working_dictionary[key]
num_entries = old_entry[0]
working_dictionary[key] = (num_entries + 1,) + old_entry[1:] + (entry,)
except KeyError:
working_dictionary[key] = (1, entry)
except AssertionError:
pass
return working_dictionary
| lucaskjaero/Chinese-Vocabulary-Finder | Dictionary.py | Python | gpl-3.0 | 2,125 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Spanner operators."""
from typing import TYPE_CHECKING, List, Optional, Sequence, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.spanner import SpannerHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SpannerDeployInstanceOperator(BaseOperator):
"""
Creates a new Cloud Spanner instance, or if an instance with the same instance_id
exists in the specified project, updates the Cloud Spanner instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeployInstanceOperator`
:param instance_id: Cloud Spanner instance ID.
:param configuration_name: The name of the Cloud Spanner instance configuration
defining how the instance will be created. Required for
instances that do not yet exist.
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:param display_name: (Optional) The display name for the Cloud Spanner instance in
the Google Cloud Console. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, the name is the same as the instance ID.
:param project_id: Optional, the ID of the project which owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_deploy_template_fields]
template_fields: Sequence[str] = (
'project_id',
'instance_id',
'configuration_name',
'display_name',
'gcp_conn_id',
'impersonation_chain',
)
# [END gcp_spanner_deploy_template_fields]
def __init__(
self,
*,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.configuration_name = configuration_name
self.node_count = node_count
self.display_name = display_name
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
def execute(self, context: 'Context') -> None:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if not hook.get_instance(project_id=self.project_id, instance_id=self.instance_id):
self.log.info("Creating Cloud Spanner instance '%s'", self.instance_id)
func = hook.create_instance
else:
self.log.info("Updating Cloud Spanner instance '%s'", self.instance_id)
func = hook.update_instance
func(
project_id=self.project_id,
instance_id=self.instance_id,
configuration_name=self.configuration_name,
node_count=self.node_count,
display_name=self.display_name,
)
class SpannerDeleteInstanceOperator(BaseOperator):
"""
Deletes a Cloud Spanner instance. If an instance does not exist,
no action is taken and the operator succeeds.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeleteInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_delete_template_fields]
template_fields: Sequence[str] = (
'project_id',
'instance_id',
'gcp_conn_id',
'impersonation_chain',
)
# [END gcp_spanner_delete_template_fields]
def __init__(
self,
*,
instance_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
def execute(self, context: 'Context') -> Optional[bool]:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if hook.get_instance(project_id=self.project_id, instance_id=self.instance_id):
return hook.delete_instance(project_id=self.project_id, instance_id=self.instance_id)
else:
self.log.info(
"Instance '%s' does not exist in project '%s'. Aborting delete.",
self.instance_id,
self.project_id,
)
return True
class SpannerQueryDatabaseInstanceOperator(BaseOperator):
"""
Executes an arbitrary DML query (INSERT, UPDATE, DELETE).
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerQueryDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param database_id: The Cloud Spanner database ID.
:param query: The query or list of queries to be executed. Can be a path to a SQL
file.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_query_template_fields]
template_fields: Sequence[str] = (
'project_id',
'instance_id',
'database_id',
'query',
'gcp_conn_id',
'impersonation_chain',
)
template_ext: Sequence[str] = ('.sql',)
template_fields_renderers = {'query': 'sql'}
# [END gcp_spanner_query_template_fields]
def __init__(
self,
*,
instance_id: str,
database_id: str,
query: Union[str, List[str]],
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.query = query
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
if not self.query:
raise AirflowException("The required parameter 'query' is empty")
def execute(self, context: 'Context'):
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if isinstance(self.query, str):
queries = [x.strip() for x in self.query.split(';')]
self.sanitize_queries(queries)
else:
queries = self.query
self.log.info(
"Executing DML query(-ies) on projects/%s/instances/%s/databases/%s",
self.project_id,
self.instance_id,
self.database_id,
)
self.log.info(queries)
hook.execute_dml(
project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
queries=queries,
)
@staticmethod
def sanitize_queries(queries: List[str]) -> None:
"""
Drops empty query in queries.
:param queries: queries
:rtype: None
"""
if queries and queries[-1] == '':
del queries[-1]
class SpannerDeployDatabaseInstanceOperator(BaseOperator):
"""
Creates a new Cloud Spanner database, or if database exists,
the operator does nothing.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeployDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param database_id: The Cloud Spanner database ID.
:param ddl_statements: The string list containing DDL for the new database.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_database_deploy_template_fields]
template_fields: Sequence[str] = (
'project_id',
'instance_id',
'database_id',
'ddl_statements',
'gcp_conn_id',
'impersonation_chain',
)
template_ext: Sequence[str] = ('.sql',)
template_fields_renderers = {'ddl_statements': 'sql'}
# [END gcp_spanner_database_deploy_template_fields]
def __init__(
self,
*,
instance_id: str,
database_id: str,
ddl_statements: List[str],
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.ddl_statements = ddl_statements
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
def execute(self, context: 'Context') -> Optional[bool]:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if not hook.get_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
):
self.log.info(
"Creating Cloud Spanner database '%s' in project '%s' and instance '%s'",
self.database_id,
self.project_id,
self.instance_id,
)
return hook.create_database(
project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
ddl_statements=self.ddl_statements,
)
else:
self.log.info(
"The database '%s' in project '%s' and instance '%s'"
" already exists. Nothing to do. Exiting.",
self.database_id,
self.project_id,
self.instance_id,
)
return True
class SpannerUpdateDatabaseInstanceOperator(BaseOperator):
"""
Updates a Cloud Spanner database with the specified DDL statement.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerUpdateDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param database_id: The Cloud Spanner database ID.
:param ddl_statements: The string list containing DDL to apply to the database.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param operation_id: (Optional) Unique per database operation id that can
be specified to implement idempotency check.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_database_update_template_fields]
template_fields: Sequence[str] = (
'project_id',
'instance_id',
'database_id',
'ddl_statements',
'gcp_conn_id',
'impersonation_chain',
)
template_ext: Sequence[str] = ('.sql',)
template_fields_renderers = {'ddl_statements': 'sql'}
# [END gcp_spanner_database_update_template_fields]
def __init__(
self,
*,
instance_id: str,
database_id: str,
ddl_statements: List[str],
project_id: Optional[str] = None,
operation_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.ddl_statements = ddl_statements
self.operation_id = operation_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
if not self.ddl_statements:
raise AirflowException("The required parameter 'ddl_statements' is empty or None")
def execute(self, context: 'Context') -> None:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if not hook.get_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
):
raise AirflowException(
f"The Cloud Spanner database '{self.database_id}' in project '{self.project_id}' "
f"and instance '{self.instance_id}' is missing. "
f"Create the database first before you can update it."
)
else:
return hook.update_database(
project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
ddl_statements=self.ddl_statements,
operation_id=self.operation_id,
)
class SpannerDeleteDatabaseInstanceOperator(BaseOperator):
"""
Deletes a Cloud Spanner database.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeleteDatabaseInstanceOperator`
:param instance_id: Cloud Spanner instance ID.
:param database_id: Cloud Spanner database ID.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_database_delete_template_fields]
template_fields: Sequence[str] = (
'project_id',
'instance_id',
'database_id',
'gcp_conn_id',
'impersonation_chain',
)
# [END gcp_spanner_database_delete_template_fields]
def __init__(
self,
*,
instance_id: str,
database_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
def execute(self, context: 'Context') -> bool:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
database = hook.get_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
)
if not database:
self.log.info(
"The Cloud Spanner database was missing: "
"'%s' in project '%s' and instance '%s'. Assuming success.",
self.database_id,
self.project_id,
self.instance_id,
)
return True
else:
return hook.delete_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
)
| Acehaidrey/incubator-airflow | airflow/providers/google/cloud/operators/spanner.py | Python | apache-2.0 | 23,642 |
# Generated by Django 2.1.5 on 2019-01-29 10:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0003_auto_20160213_1206'),
]
operations = [
migrations.AlterField(
model_name='track',
name='artists',
field=models.ManyToManyField(to='music.Artist'),
),
]
| Menollo/menosic | menosic/music/migrations/0004_auto_20190129_1048.py | Python | gpl-3.0 | 394 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.8+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False, hide_stderr=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' variables were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = ""
parentdir_prefix = "camomile"
versionfile_source = "camomile/_version.py"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded variables.
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
except NameError:
return default
return (versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
| camomile-project/camomile-client-python | camomile/_version.py | Python | mit | 6,914 |
#-*- coding: utf-8 -*-
# django-sample-app - A Django app with setup, unittests, docs and demo
# Copyright (C) 2013, Daniel Rus Morales
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.shortcuts import render_to_response
def index(request):
return render_to_response("index.html")
def home(request):
return render_to_response("home.html")
def somewhere(request):
return render_to_response("somewhere.html")
| mwang2015/django-sample-app | sample_app/tests/views.py | Python | gpl-3.0 | 1,021 |
"""
Flask-MethodHack
----------------
Description goes here...
Links
`````
* `documentation <http://packages.python.org/Flask-MethodHack>`_
* `development version
<http://github.com/USERNAME/REPOSITORY/zipball/master#egg=Flask-MethodHack-dev>`_
"""
from setuptools import setup
setup(
name='Flask-MethodHack',
version='0.1',
url='<enter URL here>',
license='BSD',
author='kijun',
author_email='[email protected]',
description='<enter short description here>',
long_description=__doc__,
packages=['flaskext'],
namespace_packages=['flaskext'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| kijun/flask-methodhack | setup.py | Python | bsd-3-clause | 1,109 |
prob_start = {
'P': -3.14e+100,
'B': -3.14e+100,
'M': -3.14e+100,
'S': 0.0,
'X': -3.14e+100,
'L': -3.14e+100,
'F': -3.14e+100,
'W': -3.14e+100,
'D': -3.14e+100,
'G': -3.14e+100,
'K': -3.14e+100,
'I': -3.14e+100,
'A': -3.14e+100,
'Z': -3.14e+100,
'J': -3.14e+100,
'C': -3.14e+100,
}
| yuanlisky/linlp | linlp/algorithm/viterbiMat/prob_start_organization.py | Python | apache-2.0 | 282 |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php or see LICENSE file.
#
# Copyright (C) 2006 Fluendo, S.A. (www.fluendo.com).
# Copyright 2006, Frank Scholz <[email protected]>
# Copyright 2007-2008 Brisa Team <[email protected]>
""" Contains the MSearch class which can search for devices.
"""
from brisa.core import log
from brisa.core.network import parse_http_response
from brisa.core.network_senders import UDPTransport
from brisa.core.network_listeners import UDPListener
from brisa.utils.looping_call import LoopingCall
from brisa.upnp.upnp_defaults import UPnPDefaults
DEFAULT_SEARCH_TIME = UPnPDefaults.MSEARCH_DEFAULT_SEARCH_TIME
DEFAULT_SEARCH_TYPE = UPnPDefaults.MSEARCH_DEFAULT_SEARCH_TYPE
#DEFAULT_SEARCH_TYPE = "upnp:rootdevice"
class MSearch(object):
""" Represents a MSearch. Contains some control functions for starting and
stopping the search. While running, search will be repeated in regular
intervals specified at construction or passed to the start() method.
"""
msg_already_started = 'tried to start() MSearch when already started'
msg_already_stopped = 'tried to stop() MSearch when already stopped'
def __init__(self, ssdp, start=True, interval=DEFAULT_SEARCH_TIME,
ssdp_addr='239.255.255.250', ssdp_port=1900):
""" Constructor for the MSearch class.
@param ssdp: ssdp server instance that will receive new device events
and subscriptions
@param start: if True starts the search when constructed
@param interval: interval between searchs
@param ssdp_addr: ssdp address for listening (UDP)
@param ssdp_port: ssdp port for listening (UDP)
@type ssdp: SSDPServer
@type start: boolean
@type interval: float
@type ssdp_addr: string
@type ssdp_port integer
"""
self.ssdp = ssdp
self.ssdp_addr = ssdp_addr
self.ssdp_port = ssdp_port
self.search_type = DEFAULT_SEARCH_TYPE
self.udp_transport = UDPTransport()
# self.listen_udp = UDPListener(ssdp_addr, ssdp_port,
self.listen_udp = UDPListener(ssdp_addr, 2149, # WMP is not picked up if 1900 is used for source
data_callback=self._datagram_received,
shared_socket=self.udp_transport.socket)
self.loopcall = LoopingCall(self.double_discover)
if start:
self.start(interval)
def is_running(self):
""" Returns True if the search is running (it's being repeated in the
interval given).
@rtype: boolean
"""
return self.loopcall.is_running()
def start(self, interval=DEFAULT_SEARCH_TIME,
search_type=DEFAULT_SEARCH_TYPE):
""" Starts the search.
@param interval: interval between searchs. Default is 600.0 seconds
@param search_type: type of the search, default is "ssdp:all"
@type interval: float
@type search_type: string
"""
# interval = 30.0
if not self.is_running():
self.search_type = search_type
self.listen_udp.start()
# print ">>>>>>>>> interval: " + str(interval)
self.loopcall.start(interval, now=True)
log.debug('MSearch started')
else:
log.warning(self.msg_already_started)
def stop(self):
""" Stops the search.
"""
if self.is_running():
log.debug('MSearch stopped')
self.listen_udp.stop()
self.loopcall.stop()
else:
log.warning(self.msg_already_stopped)
def destroy(self):
""" Destroys and quits MSearch.
"""
if self.is_running():
self.stop()
self.listen_udp.destroy()
self.loopcall.destroy()
self._cleanup()
def double_discover(self, search_type=DEFAULT_SEARCH_TYPE):
""" Sends a MSearch imediatelly. Each call to this method will yield a
MSearch message, that is, it won't repeat automatically.
"""
# print "<<<<<<<<< start double discover >>>>>>>>>"
self.discover(search_type)
self.discover(search_type)
# print "<<<<<<<<< end double discover >>>>>>>>>"
def discover(self, type="ssdp:all"):
# def discover(self, type="upnp:rootdevice"):
""" Mounts and sends the discover message (MSearch).
@param type: search type
@type type: string
"""
# type = "urn:schemas-upnp-org:device:MediaServer:1"
type = "upnp:rootdevice"
# req = ['M-SEARCH * HTTP/1.1',
# 'HOST: %s:%d' % (self.ssdp_addr, self.ssdp_port),
# 'MAN: "ssdp:discover"',
# 'MX: 5',
# 'ST: ' + type, '', '']
# req = '\r\n'.join(req)
req = ['M-SEARCH * HTTP/1.1',
'HOST:%s:%d' % (self.ssdp_addr, self.ssdp_port),
'MAN:"ssdp:discover"',
# 'Host:%s:%d' % (self.ssdp_addr, self.ssdp_port),
# 'Man:"ssdp:discover"',
'MX:5',
'ST:' + type, '', '', '']
req = '\r\n'.join(req)
self.udp_transport.send_data(req, self.ssdp_addr, self.ssdp_port)
def _datagram_received(self, data, (host, port)):
""" Callback for the UDPListener when messages arrive.
@param data: raw data received
@param host: host where data came from
@param port: port where data came from
@type data: string
@type host: string
@type port: integer
"""
# print "datagram_received start"
cmd, headers = parse_http_response(data)
if cmd[0] == 'HTTP/1.1' and cmd[1] == '200':
if self.ssdp != None:
if not self.ssdp.is_known_device(headers['usn']):
log.debug('Received MSearch answer %s,%s from %s:%s',
headers['usn'], headers['st'], host, port)
# print "_datagram_received _register"
# print "_datagram_received headers: " + str(headers)
self.ssdp._register(headers['usn'],
headers['st'],
headers['location'],
headers['server'],
headers['cache-control'])
# print " datagram_received end"
def _cleanup(self):
""" Clean up references.
"""
self.ssdp = None
self.listen_udp = None
self.loopcall = None
| henkelis/sonospy | sonospy/brisa/upnp/control_point/msearch.py | Python | gpl-3.0 | 6,719 |
"""Storage for pytest objects during test runs
The objects in the module will change during the course of a test run,
so they have been stashed into the 'store' namespace
Usage:
# as pytest.store
import pytest
pytest.store.config, pytest.store.pluginmanager, pytest.store.session
# imported directly (store is pytest.store)
from fixtures.pytest_store import store
store.config, store.pluginmanager, store.session
The availability of these objects varies during a test run, but
all should be available in the collection and testing phases of a test run.
"""
import fauxfactory
import os
import sys
import pytest # NOQA: import to trigger initial pluginmanager
from _pytest.terminal import TerminalReporter
from cached_property import cached_property
from py.io import TerminalWriter
from cfme.utils import diaper
class FlexibleTerminalReporter(TerminalReporter):
"""A TerminalReporter stand-in that pretends to work even without a py.test config."""
def __init__(self, config=None, file=None):
if config:
# If we have a config, nothing more needs to be done
return TerminalReporter.__init__(self, config, file)
# Without a config, pretend to be a TerminalReporter
# hook-related functions (logreport, collection, etc) will be outrigt broken,
# but the line writers should still be usable
if file is None:
file = sys.stdout
self._tw = self.writer = TerminalWriter(file)
self.hasmarkup = self._tw.hasmarkup
self.reportchars = ''
self.currentfspath = None
class Store(object):
"""pytest object store
If a property isn't available for any reason (including being accessed outside of a pytest run),
it will be None.
"""
@property
def current_appliance(self):
# layz import due to loops and loops and loops
from cfme.utils import appliance
# TODO: concieve a better way to detect/log import-time missuse
# assert self.config is not None, 'current appliance not in scope'
return appliance.current_appliance
def __init__(self):
#: The py.test config instance, None if not in py.test
self.config = None
#: The current py.test session, None if not in a py.test session
self.session = None
#: Parallelizer role, None if not running a parallelized session
self.parallelizer_role = None
# Stash of the "real" terminal reporter once we get it,
# so we don't have to keep going through pluginmanager
self._terminalreporter = None
#: hack variable until we get a more sustainable solution
self.ssh_clients_to_close = []
self.uncollection_stats = {}
@property
def has_config(self):
return self.config is not None
def _maybe_get_plugin(self, name):
""" returns the plugin if the pluginmanager is availiable and the plugin exists"""
return self.pluginmanager and self.pluginmanager.getplugin(name)
@property
def in_pytest_session(self):
return self.session is not None
@property
def fixturemanager(self):
# "publicize" the fixturemanager
return self.session and self.session._fixturemanager
@property
def capturemanager(self):
return self._maybe_get_plugin('capturemanager')
@property
def pluginmanager(self):
# Expose this directly on the store for convenience in getting/setting plugins
return self.config and self.config.pluginmanager
@property
def terminalreporter(self):
if self._terminalreporter is not None:
return self._terminalreporter
reporter = self._maybe_get_plugin('terminalreporter')
if reporter and isinstance(reporter, TerminalReporter):
self._terminalreporter = reporter
return reporter
return FlexibleTerminalReporter(self.config)
@property
def terminaldistreporter(self):
return self._maybe_get_plugin('terminaldistreporter')
@property
def parallel_session(self):
return self._maybe_get_plugin('parallel_session')
@property
def slave_manager(self):
return self._maybe_get_plugin('slave_manager')
@property
def slaveid(self):
return getattr(self.slave_manager, 'slaveid', None)
@cached_property
def my_ip_address(self):
try:
# Check the environment first
return os.environ['CFME_MY_IP_ADDRESS']
except KeyError:
# Fall back to having an appliance tell us what it thinks our IP
# address is
return self.current_appliance.ssh_client.client_address()
def write_line(self, line, **kwargs):
return write_line(line, **kwargs)
store = Store()
def pytest_namespace():
# Expose the pytest store as pytest.store
return {'store': store}
def pytest_plugin_registered(manager):
# config will be set at the second call to this hook
if store.config is None:
store.config = manager.getplugin('pytestconfig')
def pytest_sessionstart(session):
store.session = session
def write_line(line, **kwargs):
"""A write-line helper that should *always* write a line to the terminal
It knows all of py.tests dirty tricks, including ones that we made, and works around them.
Args:
**kwargs: Normal kwargs for pytest line formatting, stripped from slave messages
"""
if store.slave_manager:
# We're a pytest slave! Write out the vnc info through the slave manager
store.slave_manager.message(line, **kwargs)
else:
# If py.test is supressing stdout/err, turn that off for a moment
with diaper:
store.capturemanager.suspendcapture()
# terminal reporter knows whether or not to write a newline based on currentfspath
# so stash it, then use rewrite to blow away the line that printed the current
# test name, then clear currentfspath so the test name is reprinted with the
# write_ensure_prefix call. shenanigans!
cfp = store.terminalreporter.currentfspath
# carriage return, write spaces for the whole line, carriage return, write the new line
store.terminalreporter.line('\r' + ' ' * store.terminalreporter._tw.fullwidth + '\r' + line,
**kwargs)
store.terminalreporter.currentfspath = fauxfactory.gen_alphanumeric(8)
store.terminalreporter.write_ensure_prefix(cfp)
# resume capturing
with diaper:
store.capturemanager.resumecapture()
| akarol/cfme_tests | fixtures/pytest_store.py | Python | gpl-2.0 | 6,628 |
# The Hazard Library
# Copyright (C) 2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy
from openquake.hazardlib.gsim.utils import (
mblg_to_mw_johnston_96, mblg_to_mw_atkinson_boore_87, clip_mean
)
from openquake.hazardlib.imt import PGA, SA
class MblgToMwTestCase(unittest.TestCase):
def test_mblg_to_mw_johnston_96(self):
mblg = 5
mw = mblg_to_mw_johnston_96(mblg)
self.assertAlmostEqual(mw, 4.6725)
def test_mblg_to_mw_atkinson_boore_87(self):
mblg = 5
mw = mblg_to_mw_atkinson_boore_87(mblg)
self.assertAlmostEqual(mw, 4.5050)
class ClipMeanTestCase(unittest.TestCase):
def test_clip_mean(self):
mean = numpy.array([0.1, 0.2, 0.6, 1.2])
imt = PGA()
clipped_mean = clip_mean(imt, mean)
numpy.testing.assert_allclose(
[0.1, 0.2, 0.405, 0.405], clipped_mean
)
mean = numpy.array([0.1, 0.2, 0.6, 1.2])
imt = SA(period=0.1, damping=5.)
clipped_mean = clip_mean(imt, mean)
numpy.testing.assert_allclose(
[0.1, 0.2, 0.6, 1.099], clipped_mean
)
mean = numpy.array([0.1, 0.2, 0.6, 1.2])
imt = SA(period=0.6, damping=5.)
clipped_mean = clip_mean(imt, mean)
numpy.testing.assert_allclose(
[0.1, 0.2, 0.6, 1.2], clipped_mean
)
mean = numpy.array([0.1, 0.2, 0.6, 1.2])
imt = SA(period=0.01, damping=5.)
clipped_mean = clip_mean(imt, mean)
numpy.testing.assert_allclose(
[0.1, 0.2, 0.6, 1.2], clipped_mean
)
| mmpagani/oq-hazardlib | openquake/hazardlib/tests/gsim/utils_test.py | Python | agpl-3.0 | 2,240 |
"""Implements scanners and plugins to find hypervisors in memory."""
from itertools import groupby
import struct
from rekall import plugin
from rekall import obj
from rekall import utils
from rekall import scan
from rekall import session as session_module
from rekall.plugins.addrspaces import amd64
from rekall.plugins.addrspaces import intel
from rekall.plugins.overlays import basic
KNOWN_REVISION_IDS = {
# Nested hypervisors
# VMware Workstation 10.X
0x01: "VMWARE_NESTED",
# KVM
0x11e57ed0: "KVM_NESTED",
# XEN
0xda0400: "XEN_NESTED",
# Intel VT-x microarchitectures.
0x0d: "PENRYN",
0x0e: "NEHALEM",
0x0f: "WESTMERE",
0x10: "SANDYBRIDGE",
0x12: "HASWELL",
}
# TODO: Find more abort codes.
KNOWN_ABORT_INDICATOR_CODES = {
'\x00\x00\x00\x00': "NO ABORT",
'\x05\x00\x00\x00': "MACHINE CHECK DURING VM EXIT",
'\x0d\x00\x00\x00': "TXT SHUTDOWN",
}
vmcs_overlay = {
'NEHALEM_VMCS' : [None, {
'IS_NESTED': lambda x: False,
}],
'SANDYBRIDGE_VMCS' : [None, {
'IS_NESTED': lambda x: False,
}],
'HASWELL_VMCS' : [None, {
'IS_NESTED': lambda x: False,
}],
'WESTMERE_VMCS' : [None, {
'IS_NESTED': lambda x: False,
}],
'PENRYN_VMCS' : [None, {
'IS_NESTED': lambda x: False,
}],
'VMWARE_NESTED_VMCS' : [None, {
'IS_NESTED': lambda x: True,
}],
'KVM_NESTED_VMCS' : [None, {
'IS_NESTED': lambda x: True,
}],
'XEN_NESTED_VMCS' : [None, {
'IS_NESTED': lambda x: True,
}],
}
class Error(Exception):
"""Base exception."""
class UnrelatedVmcsError(Error):
"""The provided VMCS is unrelated to the VM."""
class IncompatibleASError(Error):
"""An attempt was done at comparing VMCS from different address spaces."""
class InvalidVM(Error):
"""The provided VM is invalid."""
class VMCSProfile(basic.ProfileLP64):
"""Profile to parse hypervisor control structures.
We use the basic profile for 64 bit Linux systems to get the expected width
for each data type.
"""
@classmethod
def Initialize(cls, profile):
super(VMCSProfile, cls).Initialize(profile)
profile.add_overlay(vmcs_overlay)
class VMCSCheck(scan.ScannerCheck):
def check(self, buffer_as, offset):
# CHECK 1: Verify that the VMX-Abort indicator has a known value.
#
# The VMX-Abort indicator field is always at offset 4 in the VMCS
# and is a 32-bit field.
# This field should be 0 unless the memory image was taken while a
# VMX-abort occurred, which is fairly unlikely. Also, if a VMX-abort
# occurs, only a set of values are supposed to be set.
if buffer_as.read(offset+4, 4) not in KNOWN_ABORT_INDICATOR_CODES:
return False
# Obtain the Revision ID
(revision_id,) = struct.unpack_from("<I", buffer_as.read(offset, 4))
revision_id = revision_id & 0x7FFFFFFF
# Obtain a VMCS object based on the revision_id
platform = KNOWN_REVISION_IDS.get(revision_id)
if platform is None:
return False
try:
vmcs_obj = self.profile.Object("%s_VMCS" % platform,
offset=offset,
vm=buffer_as)
except (AttributeError, TypeError):
return False
# CHECK 2: Verify that the VMCS has the VMX flag enabled.
if not vmcs_obj.HOST_CR4 & 0x2000:
return False
# CHECK 3: Verify that VMCS_LINK_POINTER is
# 0xFFFFFFFFFFFFFFFF.
if vmcs_obj.VMCS_LINK_PTR_FULL != 0xFFFFFFFFFFFFFFFF:
return False
return True
class VMCSScanner(scan.BaseScanner):
"""Scans the memory attempting to find VMCS structures.
Uses the techniques discussed on "Hypervisor Memory Forensics"
(http://s3.eurecom.fr/docs/raid13_graziano.pdf) with slight changes
to identify VT-x hypervisors.
"""
overlap = 0
checks = [["VMCSCheck", {}]]
def __init__(self, **kwargs):
super(VMCSScanner, self).__init__(**kwargs)
self.profile = self.session.LoadProfile("VMCS")
def scan(self, offset=0, end=None, **_):
"""Returns instances of VMCS objects found."""
for offset in super(VMCSScanner, self).scan(offset=offset, end=end):
(revision_id,) = struct.unpack("<I",
self.address_space.read(offset, 4))
revision_id = revision_id & 0x7FFFFFFF
vmcs_obj = self.profile.Object(
"%s_VMCS" % KNOWN_REVISION_IDS.get(revision_id),
offset=offset, vm=self.address_space)
yield vmcs_obj
def skip(self, buffer_as, offset):
return 0x1000
class VirtualMachine(object):
"""Represents a virtual machine.
A virtual machine is made of VMCS. In Intel processors, each CPU that runs
a VM will have its own VMCS.
"""
def __init__(self, host_rip=None, ept=None, parent=None, name=None,
session=None):
self.ept = long(ept)
self.host_rip = long(host_rip)
self.parent = parent
self.name = name
self.base_session = session
self.vmcss = set()
# Dictionary where the key is a VMCS object and the value
# represents whether the VMCS is valid, or not.
self.vmcs_validation = dict()
self.virtual_machines = set()
@utils.safe_property
def is_valid(self):
"""A VM is valid if at least one of its VMCS is valid."""
if any([self.vmcs_validation.get(vmcs, False) for vmcs in self.vmcss]):
return True
return False
@utils.safe_property
def is_nested(self):
"""A VM is nested if it has a parent or all its VMCS are nested."""
return self.parent != None
@utils.safe_property
def hostname(self):
try:
session = self.GetSession()
return session.plugins.hostname().get_hostname()
except AttributeError:
return obj.NoneObject()
except InvalidVM:
return obj.NoneObject("**INVALID VM**")
@utils.safe_property
def num_cores(self):
"""The number of virtual cores of this VM."""
valid_vmcss = filter(self.is_valid_vmcs, self.vmcss)
# Count only unique VPIDs if the hypervisor uses them.
uniq_vpids = set([v.VPID for v in valid_vmcss])
if len(uniq_vpids) != 1:
return len(uniq_vpids)
else:
return len(valid_vmcss)
@utils.safe_property
def host_arch(self):
"""The architecture of the host that started this VM."""
all_host_as = set([self.get_vmcs_host_as_type(v) for v in self.vmcss
if self.is_valid_vmcs(v)])
if len(all_host_as) == 1:
return all_host_as.pop()
return "???"
@utils.safe_property
def guest_arch(self):
"""The architecture of the guest OS of the VM."""
all_guest_as = set([self.get_vmcs_guest_as_type(v) for v in self.vmcss
if self.is_valid_vmcs(v)])
if len(all_guest_as) == 1:
return all_guest_as.pop()
return "???"
@utils.safe_property
def ept_list(self):
"""The list of EPT values needed to instantiate VM guest physical AS.
This is used in conjunction with the VTxPagedMemory AS.
"""
if isinstance(self.parent, VirtualMachine):
ept_list = self.parent.ept_list
ept_list.extend([self.ept])
else:
ept_list = [self.ept]
return ept_list
@utils.safe_property
def physical_address_space(self):
"""The physical address space of this VM's guest."""
if self.is_nested:
base_as = self.parent.physical_address_space
else:
base_as = self.base_session.physical_address_space
return amd64.VTxPagedMemory(
session=self.base_session, ept=self.ept_list, base=base_as)
@classmethod
def get_vmcs_guest_as_type(cls, vmcs):
"""Returns the address space type of the guest of a VMCS.
One of I386, I386+PAE, AMD64 or None.
"""
if not vmcs.GUEST_CR4 & (1 << 5): # PAE bit
# No PAE
return "I386"
elif not vmcs.ENTRY_CONTROLS & (1 << 9): # long mode bit
# PAE and no long mode = 32bit PAE
return "I386+PAE"
elif vmcs.ENTRY_CONTROLS & (1 << 9): # long mode bit
# Long mode AND PAE = IA-32e
return "AMD64"
else:
# We don't have an address space for other paging modes
return None
@classmethod
def get_vmcs_host_as_type(cls, vmcs):
"""Returns the address space type of the host of a VMCS.
One of I386, I386+PAE, AMD64 or None.
"""
if not vmcs.HOST_CR4 & (1 << 5): # PAE bit
# No PAE
return "I386"
elif not vmcs.EXIT_CONTROLS & (1 << 9): # long mode bit
# PAE and no long mode = 32bit PAE
return "I386+PAE"
elif vmcs.EXIT_CONTROLS & (1 << 9): # long mode bit
# Long mode AND PAE = IA-32e
return "AMD64"
else:
# We don't have an address space for other paging modes
return None
@classmethod
def get_vmcs_host_address_space(cls, vmcs, base_as=None):
"""Returns the address_space of the host of the VMCS."""
return cls.get_vmcs_address_space(vmcs, host=True, base_as=base_as)
@classmethod
def get_vmcs_guest_address_space(cls, vmcs, base_as=None):
"""Returns the address_space of the guest of the VMCS."""
return cls.get_vmcs_address_space(vmcs, host=False, base_as=base_as)
@classmethod
def get_vmcs_address_space(cls, vmcs, host=True, base_as=None):
"""Returns the address_space of the host or guest process of a VMCS."""
address_space = None
base_as = base_as or vmcs.obj_vm
if host:
cr4 = vmcs.HOST_CR4
cr3 = vmcs.HOST_CR3
controls = vmcs.EXIT_CONTROLS
else:
cr4 = vmcs.GUEST_CR4
cr3 = vmcs.GUEST_CR3
controls = vmcs.ENTRY_CONTROLS
if not cr4 & (1 << 5): # PAE bit
# No PAE
address_space = intel.IA32PagedMemory(dtb=cr3, base=base_as)
elif not controls & (1 << 9): # long mode bit
# PAE and no long mode = 32bit PAE
address_space = intel.IA32PagedMemoryPae(dtb=cr3, base=base_as)
elif controls & (1 << 9): # long mode bit
# Long mode AND PAE = IA-32e
address_space = amd64.AMD64PagedMemory(dtb=cr3, base=base_as)
return address_space
def add_vmcs(self, vmcs, validate=True):
"""Add a VMCS to this virtual machine.
Raises:
UnrelatedVmcsError if the VMCS doesn't match the VM's HOST_RIP or EPT.
"""
if self.host_rip == None:
self.host_rip = long(vmcs.HOST_RIP)
if self.ept == None:
self.ept = long(vmcs.m("EPT_POINTER_FULL"))
if self.host_rip != vmcs.HOST_RIP:
raise UnrelatedVmcsError("VMCS HOST_RIP differ from the VM's")
if vmcs.m("EPT_POINTER_FULL") != self.ept:
raise UnrelatedVmcsError("VMCS EPT differs from the VM's")
if validate:
self.validate_vmcs(vmcs)
self.vmcss.add(vmcs)
def set_parent(self, parent):
"""Sets the parent of this VM and resets the validation cache."""
if self.parent != parent:
self.parent = parent
self.vmcs_validation.clear()
def unset_parent(self):
self.set_parent(None)
def validate_vmcs(self, vmcs):
"""Validates a VMCS and returns if it's valid in this VM's context.
A VMCS is valid if the page where it's mapped is found in the HOST_CR3
that it points to. The result of this validation is cached. Use
the _reset_validation_state method if you need to invalidate cache
entries.
A VMCS object will only validate properly if its defined in the context
of the address space of the physical AS of the parent of the VM.
"""
if vmcs in self.vmcs_validation:
return self.vmcs_validation.get(vmcs)
validated = False
# EPTP bits 11:6 are reserved and must be set to 0
# and the page_walk_length, bits 5:3, must be 3 (4 - 1)
#
# Ref: Intel(r) 64 and IA-32 Architectures Software Developer's Manual -
# System Programming Guide Volume 3B, 21-20, 21.6.11
page_walk_length = (vmcs.EPT_POINTER_FULL & 0b111000) >> 3
if (vmcs.EPT_POINTER_FULL & 0b111111000000 or
page_walk_length != 3):
self.vmcs_validation[vmcs] = validated
return validated
# If we are dealing with L1 VMCS, the address space to validate
# is the same as the VMCS.
try:
validation_as = self.get_vmcs_host_address_space(vmcs)
except TypeError:
return False
for run in validation_as.get_mappings():
if self.base_session:
self.base_session.report_progress(
"Validating VMCS %08X @ %08X" % (
vmcs.obj_offset, run.start))
if (vmcs.obj_offset >= run.file_offset and
vmcs.obj_offset < run.file_offset + run.length):
validated = True
break
self.vmcs_validation[vmcs] = validated
return validated
def is_valid_vmcs(self, vmcs):
"""Returns whether the vmcs is valid or None if it wasn't validated.
Doesn't force validation.
"""
return self.vmcs_validation.get(vmcs)
def GetSession(self):
"""Returns a session valid for this VM."""
if not self.is_valid:
raise InvalidVM()
session_override = {
"ept": self.ept_list,
"profile": None,
"session_name": u"VM %s" % u','.join(
[u'0x%X' % s for s in self.ept_list]),
}
return self.base_session.clone(**session_override)
def RunPlugin(self, plugin_name, *args, **kwargs):
"""Runs a plugin in the context of this virtual machine."""
vm_sess = self.GetSession()
return vm_sess.RunPlugin(plugin_name, *args, **kwargs)
def add_nested_vms(self, vm_list, validate_all=True):
"""Tries to add the list of VMs as nested VMs of this one.
To validate nested VMs, we need to see if its identifying VMCS are
mapped in our physical AS and then try to validate them via HOST_CR3
in our context.
"""
_ = validate_all # TODO: Not currently implemented.
if not vm_list:
return
# If a VM is running under us, its VMCS has to be mapped in our
# physical address space.
phys_as = self.physical_address_space
for run in phys_as.get_mappings():
for vm in vm_list:
if self.base_session:
self.base_session.report_progress(
u"Validating VM(%X) > VM(%X) @ %#X",
self.ept, vm.ept, run.file_offset)
for vmcs in vm.vmcss:
# Skip VMCS that we already validated
if vm.is_valid_vmcs(vmcs):
continue
# This step makes sure the VMCS is mapped in the
# Level1 guest physical memory (us).
if (run.file_offset <= vmcs.obj_offset and
vmcs.obj_offset < run.file_offset + run.length):
# Now we need to validate the VMCS under our context.
# For this we need to fix the VMCS AS and its offset.
vm.set_parent(self)
vmcs_stored_vm = vmcs.obj_vm
vmcs_stored_offset = vmcs.obj_offset
# Change the VMCS to be mapped in this VM's physical AS.
vmcs.obj_vm = self.physical_address_space
# The new offset is the run.start + the offset within
# the physical page. We need to do this when we're
# dealing with large/huge pages.
# Note that run.start here really means the physical
# address of the L1 guest. run.file_offset means the
# physical address of the base AS (the host).
vmcs.obj_offset = (run.start +
(run.file_offset - vmcs.obj_offset))
if vm.validate_vmcs(vmcs):
# This steps validates that the VMCS is mapped in
# the Level1 guest hypervisor AS.
self.virtual_machines.update([vm])
else:
# Reset the VMCS settings
vmcs.obj_vm = vmcs_stored_vm
vmcs.obj_offset = vmcs_stored_offset
# If any of the VMs was found to be nested, remove it from the vm_list
for vm in self.virtual_machines:
try:
vm_list.remove(vm)
except ValueError:
pass
def _reset_validation_state(self, vmcs):
"""Invalidates the vmcs validation cache entry for vmcs."""
self.vmcs_validation.pop(vmcs, None)
def __str__(self):
return "VirtualMachine(Hypervisor=%#X, EPT=%#X)" % (
self.host_rip, self.ept)
class VmScan(plugin.PhysicalASMixin,
plugin.TypedProfileCommand, plugin.Command):
"""Scan the physical memory attempting to find hypervisors.
Once EPT values are found, you can use them to inspect virtual machines
with any of the rekall modules by using the --ept parameter and
specifying the guest virtual machine profile.
Supports the detection of the following virtualization techonlogies:
* Intel VT-X with EPT. Microarchitectures:
+ Westmere
+ Nehalem
+ Sandybridge
+ Ivy Bridge
+ Haswell
* Intel VT-X without EPT (unsupported page translation in rekall).
+ Penryn
For the specific processor models that support EPT, please check:
http://ark.intel.com/products/virtualizationtechnology.
"""
__name = "vmscan"
__args = [
dict(name="quick", type="Boolean",
help="Perform quick VM detection."),
dict(name="no_nested", type="Boolean",
help="Don't do nested VM detection."),
dict(name="offset", type="IntParser", default=0,
help="Offset in the physical image to start the scan."),
dict(name="show_all", default=False, type="Boolean",
help="Also show VMs that failed validation."),
dict(name="image_is_guest", default=False, type="Boolean",
help="The image is for a guest VM, not the host."),
dict(name="no_validation", default=False, type="Boolean",
help="[DEBUG SETTING] Disable validation of VMs.")
]
def __init__(self, *args, **kwargs):
super(VmScan, self).__init__(*args, **kwargs)
if self.plugin_args.no_validate:
self.plugin_args.show_all = True
def get_vms(self):
"""Finds virtual machines in physical memory and returns a list of them.
"""
all_vmcs = VMCSScanner(
address_space=self.physical_address_space,
session=self.session,
profile=obj.NoneObject).scan(
offset=self.plugin_args.offset,
end=self.physical_address_space.end())
host_vms = []
nested_vms = []
# == HOST VM validation
# Group the host VMCSs by (HOST_RIP, EPTP) and validate if requested.
# You could use (HOST_RIP, HOST_CR3), but VMWare 10.X uses a different
# HOST_CR3 per core. The EPTP, however, is the same and hardly any
# virtualization product would ever want to have different EPTP's per
# core because more page tables would have to be maintained for the
# same VM.
for host_rip, rip_vmcs_list in groupby(
sorted(all_vmcs, key=lambda x: long(x.HOST_RIP)),
lambda x: long(x.HOST_RIP)):
sorted_rip_vmcs_list = sorted(
rip_vmcs_list, key=lambda x: long(x.m("EPT_POINTER_FULL")))
for ept, rip_ept_vmcs_list in groupby(
sorted_rip_vmcs_list,
lambda x: long(x.m("EPT_POINTER_FULL"))):
vm = VirtualMachine(host_rip=host_rip, ept=ept,
session=self.session)
for vmcs in rip_ept_vmcs_list:
try:
# If a VMCS is nested we cannot do validation at this
# step unless the memory image is for a guest VM or the
# physical address_space is a VTxPagedMemory. The
# physical AS is a VTxPagedMemory when you specify the
# --ept parameter on the command line.
if vmcs.IS_NESTED:
if (self.plugin_args.image_is_guest or
self.physical_address_space.metadata(
"ept")):
vm.add_vmcs(
vmcs,
validate=not self.plugin_args.no_validate)
else:
vm.add_vmcs(vmcs, validate=False)
else:
vm.add_vmcs(
vmcs,
validate=not self.plugin_args.no_validate)
if vm.is_valid_vmcs(vmcs):
if self.plugin_args.quick:
break
except UnrelatedVmcsError:
# This may happen when we analyze live memory. When
# the HOST_RIP/EPT that we grouped with has changed
# between finding it and adding it to a vm, add_vmcs
# may raise an UnrelatedVmcsError.
# Not much we can do other than skipping this VMCS.
continue
# Discard empty VMs, which can happen if we skipped vmcss.
if not vm.vmcss:
continue
# We need to split nested and host VMs here. However, we
# cannot use the is_nested method of VirtualMachine, because the
# potentially nested VMs aren't technically nested yet
# (i.e: don't have a parent). So we resort to checking if
# all the VMCSs are of nested-type.
may_be_nested = all([v.IS_NESTED for v in vm.vmcss])
if may_be_nested and not vm.is_valid:
# Only add as nested VMs ones that haven't been validated
# yet. This covers the case there image_is_guest is True
# and they were validated as hosts.
nested_vms.append(vm)
else:
host_vms.append(vm)
if self.plugin_args.no_nested:
return host_vms
# == NESTED VM validation
# Only 1 level of nesting supported at the moment.
#
# TODO: Detect turtles-type VMCSs and relate them to the proper VM.
# https://www.usenix.org/event/osdi10/tech/full_papers/Ben-Yehuda.pdf
#
# These should show up as another valid VM.
if not self.plugin_args.no_validate:
candidate_hosts = [vm for vm in host_vms if vm.is_valid]
else:
candidate_hosts = []
# This step validates nested VMs. We try all candidate nested vms
# against all candidate hosts.
for candidate_host_vm in candidate_hosts:
candidate_host_vm.add_nested_vms(
nested_vms, validate_all=not self.plugin_args.quick)
# Add all remaining VMs that we weren't able to guess the hierarchy of
# to the output vm list.
host_vms.extend(nested_vms)
return host_vms
def render(self, renderer=None):
renderer.table_header([
dict(name="Description", type="TreeNode", max_depth=5, child=dict(
type="VirtualizationNode", style="light",
quick=self.plugin_args.quick)),
("Type", "type", ">20s"),
("Valid", "valid", ">8s"),
("EPT", "ept", "s")
])
virtual_machines = self.get_vms()
# At this point the hierarchy has been discovered.
for vm in virtual_machines:
# Skip invalid VMs.
if not self.plugin_args.show_all and not vm.is_valid:
continue
self.render_vm(renderer, vm, indent_level=0)
if self.plugin_args.verbosity > 2:
for vm in virtual_machines:
for vmcs in vm.vmcss:
if (not self.plugin_args.show_all and
not vm.is_valid_vmcs(vmcs)):
continue
renderer.section("VMCS @ %#x" % vmcs.obj_offset)
renderer.table_header([("Details", "details", "s")])
self.session.plugins.p(vmcs).render(renderer)
for nested_vm in vm.virtual_machines:
for vmcs in nested_vm.vmcss:
if (not self.plugin_args.show_all and
not vm.is_valid_vmcs(vmcs)):
continue
renderer.section("VMCS @ %#x" % vmcs.obj_offset)
renderer.table_header([("Details", "details", "s")])
self.session.plugins.p(vmcs).render(renderer)
def render_vm(self, renderer, vm, indent_level=0):
vm_ept = ','.join(["0x%X" % e for e in vm.ept_list])
renderer.table_row(vm, 'VM', vm.is_valid, vm_ept, depth=indent_level)
if vm.is_valid and isinstance(
self.session, session_module.InteractiveSession):
self.session.session_list.append(vm.GetSession())
if self.plugin_args.verbosity > 1:
for vmcs in sorted(vm.vmcss,
key=lambda x: x.m("VPID")):
if not self.plugin_args.show_all and not vm.is_valid_vmcs(vmcs):
continue
valid = vm.is_valid_vmcs(vmcs)
renderer.table_row(
vmcs,
vmcs.obj_name, valid, '', depth=indent_level+1)
for nested_vm in vm.virtual_machines:
if not self.plugin_args.show_all and not nested_vm.is_valid:
continue
self.render_vm(renderer, nested_vm, indent_level=indent_level+1)
| rlugojr/rekall | rekall-core/rekall/plugins/hypervisors.py | Python | gpl-2.0 | 27,401 |
# -*- coding: utf-8 -*-
"""
This module is used for testing the functions within the pyhpeimc.plat.netassets module.
"""
from unittest import TestCase
from nose.plugins.skip import SkipTest
from pyhpeimc.plat.netassets import *
from test_machine import *
##### Test get_dev_asset_details function for multiple vendors
### Switches
#CW3_Switch
class TestGet_dev_asset_detailsCW3_Switch(TestCase):
def test_get_dev_asset_details_type(self):
if CW3_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(CW3_Switch, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if CW3_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(CW3_Switch, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 27) # TODO Modified len from 28 to 27 need to investigate
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
# self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
#CW5_Switch
class TestGet_dev_asset_detailsCW5_Switch(TestCase):
def test_get_dev_asset_details_type(self):
if CW5_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(CW5_Switch, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if CW5_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(CW5_Switch, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 28)
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
#CW7_Switch
class TestGet_dev_asset_detailsCW7_Switch(TestCase):
def test_get_dev_asset_details_type(self):
if CW7_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(CW7_Switch, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if CW7_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(CW7_Switch, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 28)
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
#Cisco_Switch
class TestGet_dev_asset_detailsCisco_Switch(TestCase):
def test_get_dev_asset_details_type(self):
if Cisco_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(Cisco_Switch, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if Cisco_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(Cisco_Switch, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 28)
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
#Juniper_Switch
class TestGet_dev_asset_detailsJuniper_Switch(TestCase):
def test_get_dev_asset_details_type(self):
if Juniper_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(Juniper_Switch, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if Juniper_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(Juniper_Switch, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 28)
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
#Arista_Switch
class TestGet_dev_asset_detailsArista_Switch(TestCase):
def test_get_dev_asset_details_type(self):
if Arista_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(Arista_Switch, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if Arista_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(Arista_Switch, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 27) # TODO modified len from 28 to 27 need to investigate
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
# self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
#ArubaOS_Switch (Formerly Provision)
class TestGet_dev_asset_detailsArubaOS_Switch(TestCase):
def test_get_dev_asset_details_type(self):
if ArubaOS_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(ArubaOS_Switch, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if ArubaOS_Switch is None:
raise SkipTest
single_asset = get_dev_asset_details(ArubaOS_Switch, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 28)
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
###Routers
#Cisco_Router
class TestGet_dev_asset_detailsCisco_Router(TestCase):
def test_get_dev_asset_details_type(self):
if Cisco_Router is None:
raise SkipTest
single_asset = get_dev_asset_details(Cisco_Router, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if Cisco_Router is None:
raise SkipTest
single_asset = get_dev_asset_details(Cisco_Router, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 27) # TODO Modified len from 28 to 27 need to investigate
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
# self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
#CW5_Router
class TestGet_dev_asset_detailsCW5_Router(TestCase):
def test_get_dev_asset_details_type(self):
if CW5_Router is None:
raise SkipTest
single_asset = get_dev_asset_details(CW5_Router, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if CW5_Router is None:
raise SkipTest
single_asset = get_dev_asset_details(CW5_Router, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 28)
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
#Juniper_Router (SRV)
class TestGet_dev_asset_detailsJuniper_Router(TestCase):
def test_get_dev_asset_details_type(self):
if Juniper_Router is None:
raise SkipTest
single_asset = get_dev_asset_details(Juniper_Router, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if Juniper_Router is None:
raise SkipTest
single_asset = get_dev_asset_details(Juniper_Router, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 28)
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
####Servers
#Windows_Server
class TestGet_dev_asset_detailsWindows_Server(TestCase):
def test_get_dev_asset_details_type(self):
if Windows_Server is None:
raise SkipTest
single_asset = get_dev_asset_details(Windows_Server, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if Windows_Server is None:
raise SkipTest
single_asset = get_dev_asset_details(Windows_Server, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 27) # TODO Modified len from 28 to 27 need to investigate
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
# self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
#Linux_Server
class TestGet_dev_asset_detailsLinux_Server(TestCase):
def test_get_dev_asset_details_type(self):
if Linux_Server is None:
raise SkipTest
single_asset = get_dev_asset_details(Linux_Server, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if Linux_Server is None:
raise SkipTest
single_asset = get_dev_asset_details(Linux_Server, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 27) # TODO Modified len from 28 to 27 Need to investigate
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
# self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
###Hypervisors
#ESX
class TestGet_dev_asset_detailsESX(TestCase):
def test_get_dev_asset_details_type(self):
if ESX is None:
raise SkipTest
single_asset = get_dev_asset_details(ESX, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if ESX is None:
raise SkipTest
single_asset = get_dev_asset_details(ESX, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 27) # TODO Modified len from 28 to 27 need to investigate
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
# self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
#HyperV
class TestGet_dev_asset_detailsHyperV(TestCase):
def test_get_dev_asset_details_type(self):
if HyperV is None:
raise SkipTest
single_asset = get_dev_asset_details(HyperV, auth.creds, auth.url)
self.assertIs(type(single_asset), list)
def test_get_dev_asset_details_content(self):
if HyperV is None:
raise SkipTest
single_asset = get_dev_asset_details(HyperV, auth.creds, auth.url)
self.assertIs(len(single_asset[0]), 28)
self.assertIn('asset', single_asset[0])
self.assertIn('phyClass', single_asset[0])
self.assertIn('beginDate', single_asset[0])
self.assertIn('devId', single_asset[0])
self.assertIn('hardVersion', single_asset[0])
self.assertIn('isFRU', single_asset[0])
self.assertIn('deviceIp', single_asset[0])
self.assertIn('cleiCode', single_asset[0])
self.assertIn('physicalFlag', single_asset[0])
self.assertIn('mfgName', single_asset[0])
self.assertIn('firmwareVersion', single_asset[0])
self.assertIn('buildInfo', single_asset[0])
self.assertIn('relPos', single_asset[0])
self.assertIn('boardNum', single_asset[0])
self.assertIn('alias', single_asset[0])
self.assertIn('deviceName', single_asset[0])
self.assertIn('softVersion', single_asset[0])
self.assertIn('bom', single_asset[0])
self.assertIn('name', single_asset[0])
self.assertIn('containedIn', single_asset[0])
self.assertIn('assetNumber', single_asset[0])
self.assertIn('model', single_asset[0])
self.assertIn('vendorType', single_asset[0])
self.assertIn('serialNum', single_asset[0])
self.assertIn('remark', single_asset[0])
self.assertIn('desc', single_asset[0])
self.assertIn('phyIndex', single_asset[0])
self.assertIn('serverDate', single_asset[0])
class TestGet_dev_asset_details_doesnt_exist(TestCase):
def test_get_dev_asset_details_doesnt_exist(self):
if DoesntExist is None:
raise SkipTest
asset_doesnt_exist = get_dev_asset_details(DoesntExist, auth.creds, auth.url)
self.assertIs(type(asset_doesnt_exist), int)
self.assertEqual(asset_doesnt_exist, 403)
##### Test get_dev_asset_details_all function
#TODO Remarked out Failing test
class TestGet_dev_asset_details_all(TestCase):
def test_get_dev_asset_details_all_type(self):
all_assets = get_dev_asset_details_all(auth.creds, auth.url)
self.assertIs(type(all_assets), list)
def test_get_dev_asset_details_all_content(self):
all_assets = get_dev_asset_details_all(auth.creds, auth.url)
#self.assertIs(len(all_assets[0]), 28)
self.assertIn('asset', all_assets[0])
self.assertIn('phyClass', all_assets[0])
#self.assertIn('beginDate', all_assets[0])
self.assertIn('devId', all_assets[0])
self.assertIn('hardVersion', all_assets[0])
self.assertIn('isFRU', all_assets[0])
self.assertIn('deviceIp', all_assets[0])
self.assertIn('cleiCode', all_assets[0])
self.assertIn('physicalFlag', all_assets[0])
self.assertIn('mfgName', all_assets[0])
self.assertIn('firmwareVersion', all_assets[0])
self.assertIn('buildInfo', all_assets[0])
self.assertIn('relPos', all_assets[0])
self.assertIn('boardNum', all_assets[0])
self.assertIn('alias', all_assets[0])
self.assertIn('deviceName', all_assets[0])
self.assertIn('softVersion', all_assets[0])
self.assertIn('bom', all_assets[0])
self.assertIn('name', all_assets[0])
self.assertIn('containedIn', all_assets[0])
self.assertIn('assetNumber', all_assets[0])
self.assertIn('model', all_assets[0])
self.assertIn('vendorType', all_assets[0])
self.assertIn('serialNum', all_assets[0])
self.assertIn('remark', all_assets[0])
self.assertIn('desc', all_assets[0])
self.assertIn('phyIndex', all_assets[0])
self.assertIn('serverDate', all_assets[0])
| netmanchris/PYHPEIMC | tests/test_pyhpeimc_plat_netassets.py | Python | apache-2.0 | 30,488 |
import os
import shutil
import tempfile
import unittest
from mock import Mock, patch, call, MagicMock
from pulp.devel.unit.util import touch
from pulp.plugins.conduits.repo_publish import RepoPublishConduit
from pulp.plugins.model import Repository
from pulp.plugins.util.publish_step import PublishStep
from pulp_openstack.plugins.distributors import glance_publish_steps
class TestPublishImagesStep(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.working_directory = os.path.join(self.temp_dir, 'working')
self.content_directory = os.path.join(self.temp_dir, 'content')
os.makedirs(self.working_directory)
os.makedirs(self.content_directory)
repo = Repository('foo_repo_id', working_dir=self.working_directory)
config = Mock()
config.get_config.return_value = "mocked-config-value"
conduit = RepoPublishConduit(repo.id, 'foo_repo')
self.parent = PublishStep('test-step', repo, conduit, config)
def tearDown(self):
shutil.rmtree(self.working_directory)
@patch('pulp_openstack.common.openstack_utils.OpenstackUtils')
def test_process_unit(self, mock_ou):
mock_config = Mock()
mock_config.get_config.return_value = "mocked-config-value"
step = glance_publish_steps.PublishImagesStep()
step.config = mock_config
step.initialize()
self.assertEquals(step.images_processed, [])
step.parent = self.parent
fake_image_filename = 'fake-zero-byte-image.qcow2'
touch(os.path.join(self.content_directory, fake_image_filename))
unit = Mock(unit_key={'image_checksum': 'd41d8cd98f00b204e9800998ecf8427e'},
metadata={'image_size': 100, 'image_name': 'fake-image-name'},
storage_path=os.path.join(self.content_directory, fake_image_filename))
step.get_working_dir = Mock(return_value=self.working_directory)
step.process_unit(unit)
expected_calls = [call().find_image('foo_repo_id', 'd41d8cd98f00b204e9800998ecf8427e'),
call().create_image(os.path.join(self.content_directory,
fake_image_filename),
'foo_repo_id',
checksum='d41d8cd98f00b204e9800998ecf8427e',
name='fake-image-name', size=100)]
mock_ou.assert_has_calls(expected_calls, any_order=True)
@patch('pulp_openstack.common.openstack_utils.OpenstackUtils')
def test_process_unit_already_exists(self, mock_openstackutil):
mock_ou = mock_openstackutil.return_value
mock_ou.find_image.return_value = iter(['an-image'])
mock_config = Mock()
mock_config.get_config.return_value = "mocked-config-value"
step = glance_publish_steps.PublishImagesStep()
step.config = mock_config
step.initialize()
step.parent = self.parent
fake_image_filename = 'fake-zero-byte-image.qcow2'
touch(os.path.join(self.content_directory, fake_image_filename))
unit = Mock(unit_key={'image_checksum': 'd41d8cd98f00b204e9800998ecf8427e'},
metadata={'image_size': 100, 'image_name': 'fake-image-name'},
storage_path=os.path.join(self.content_directory, fake_image_filename))
step.get_working_dir = Mock(return_value=self.working_directory)
step.process_unit(unit)
unexpected_call = call().create_image(os.path.join(self.content_directory,
fake_image_filename),
'foo_repo_id',
checksum='d41d8cd98f00b204e9800998ecf8427e',
name='fake-image-name', size=100)
# make sure "create_image" was not called
self.assertTrue(unexpected_call not in mock_ou.mock_calls)
@patch('pulp_openstack.common.openstack_utils.OpenstackUtils')
def test_process_unit_found_multiple_images(self, mock_openstackutil):
mock_ou = mock_openstackutil.return_value
mock_ou.find_image.return_value = iter(['an-image', 'image2'])
mock_config = Mock()
mock_config.get_config.return_value = "mocked-config-value"
step = glance_publish_steps.PublishImagesStep()
step.config = mock_config
step.initialize()
step.parent = self.parent
fake_image_filename = 'fake-zero-byte-image.qcow2'
touch(os.path.join(self.content_directory, fake_image_filename))
unit = Mock(unit_key={'image_checksum': 'd41d8cd98f00b204e9800998ecf8427e'},
metadata={'image_size': 100, 'image_name': 'fake-image-name'},
storage_path=os.path.join(self.content_directory, fake_image_filename))
step.get_working_dir = Mock(return_value=self.working_directory)
try:
step.process_unit(unit)
self.assertTrue(False, "RuntimeError not thrown")
except RuntimeError:
pass
@patch('pulp_openstack.common.openstack_utils.OpenstackUtils')
def test_finalize_no_deletes(self, mock_ou):
mock_config = Mock()
mock_config.get_config.return_value = "mocked-config-value"
step = glance_publish_steps.PublishImagesStep()
step.config = mock_config
step.initialize()
self.assertEquals(step.images_processed, [])
step.parent = self.parent
step.finalize()
expected_calls = [call().find_repo_images('foo_repo_id')]
mock_ou.assert_has_calls(expected_calls, any_order=True)
@patch('pulp_openstack.common.openstack_utils.OpenstackUtils')
def test_finalize_with_deletes(self, mock_openstackutil):
mock_ou = mock_openstackutil.return_value
mock_image = MagicMock()
mock_image.id = "mock_image_id"
mock_ou.find_repo_images.return_value = iter([mock_image])
mock_config = Mock()
mock_config.get_config.return_value = "mocked-config-value"
step = glance_publish_steps.PublishImagesStep()
step.config = mock_config
step.initialize()
self.assertEquals(step.images_processed, [])
step.parent = self.parent
step.finalize()
mock_ou.delete_image.assert_called_once()
@patch('pulp_openstack.common.openstack_utils.OpenstackUtils')
def test_finalize_no_deletes_with_images(self, mock_openstackutil):
mock_ou = mock_openstackutil.return_value
mock_image = MagicMock()
mock_image.id = "mock_image_id"
mock_image.checksum = "mock_image_checksum"
mock_ou.find_repo_images.return_value = iter([mock_image])
mock_config = Mock()
mock_config.get_config.return_value = "mocked-config-value"
step = glance_publish_steps.PublishImagesStep()
step.config = mock_config
step.initialize()
self.assertEquals(step.images_processed, [])
step.parent = self.parent
step.images_processed = [mock_image.checksum]
step.finalize()
self.assertEquals(mock_ou.delete_image.called, 0)
@patch('pulp_openstack.common.openstack_utils.OpenstackUtils')
def test_finalize_bad_push(self, mock_openstackutil):
"""
Tests that if an image didn't make it from pulp to glance for some
reason, we do not perform any deletions at all for that repo.
"""
mock_ou = mock_openstackutil.return_value
mock_image = MagicMock()
mock_image.id = "mock_image_id"
mock_image.checksum = "mock_image_checksum"
unpushed_mock_image = MagicMock()
unpushed_mock_image.id = "unpushed_mock_image_id"
unpushed_mock_image.checksum = "unpushed_mock_image_id_checksum"
mock_ou.find_repo_images.return_value = iter([mock_image])
mock_config = Mock()
mock_config.get_config.return_value = "mocked-config-value"
step = glance_publish_steps.PublishImagesStep()
step.config = mock_config
step.initialize()
self.assertEquals(step.images_processed, [])
step.parent = self.parent
step.images_processed = [mock_image.checksum, unpushed_mock_image.checksum]
try:
step.finalize()
self.assertTrue(False, "finalize should have thrown RuntimeError")
except RuntimeError:
self.assertEquals(mock_ou.delete_image.called, 0)
class TestGlancePublisher(unittest.TestCase):
def setUp(self):
self.working_directory = tempfile.mkdtemp()
self.master_dir = os.path.join(self.working_directory, 'master')
self.working_temp = os.path.join(self.working_directory, 'work')
self.repo = Mock(id='foo', working_dir=self.working_temp)
def tearDown(self):
shutil.rmtree(self.working_directory)
@patch('pulp_openstack.plugins.distributors.glance_publish_steps.PublishImagesStep')
def test_init(self, mock_glance_publish_step):
mock_conduit = Mock()
mock_config = {}
publisher = glance_publish_steps.GlancePublisher(self.repo, mock_conduit, mock_config)
self.assertEquals(publisher.children, [mock_glance_publish_step.return_value])
| pulp/pulp_openstack | plugins/test/unit/plugins/distributors/test_glance_publish_steps.py | Python | gpl-2.0 | 9,368 |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core.cache import cache
from model_mommy import mommy
from fabric_bolt.projects import models
from . import backend
User = get_user_model()
class BackendTests(TestCase):
def test_build_command_injection(self):
deployment = mommy.make(models.Deployment, task__name='test_env')
cache.delete_many(['project_{}_fabfile_tasks'.format(deployment.stage.project_id),
'project_{}_fabfile_path'.format(deployment.stage.project_id)])
configuration = mommy.make(models.Configuration, key='foo=bar -i /path/to/keyfile --set foo2', value='bar')
deployment.stage.configuration_set.add(configuration)
command = backend.build_command(deployment.stage.project, deployment, {})
fabfile_path, active_loc = backend.get_fabfile_path(deployment.stage.project)
self.assertEqual(
command,
'fab test_env --set "foo\\=bar -i /path/to/keyfile --set foo2=bar" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
configuration = mommy.make(models.Configuration, key='dummy_key', value='dummy_value')
deployment.stage.configuration_set.add(configuration)
command = backend.build_command(deployment.stage.project, deployment, {})
self.assertEqual(
command,
'fab test_env --set "foo\=bar -i /path/to/keyfile --set foo2=bar,dummy_key=dummy_value" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
deployment.stage.configuration_set.clear()
configuration = mommy.make(models.Configuration, key='dummy_key=test" | ls #', value='dummy_value')
deployment.stage.configuration_set.add(configuration)
command = backend.build_command(deployment.stage.project, deployment, {})
self.assertEqual(
command,
'fab test_env --set "dummy_key\=test\\" | ls #=dummy_value" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
deployment.stage.configuration_set.clear()
configuration = mommy.make(models.Configuration, key='dummy_key', value='dummy_value,x=y')
deployment.stage.configuration_set.add(configuration)
command = backend.build_command(deployment.stage.project, deployment, {})
self.assertEqual(
command,
'fab test_env --set "dummy_key=dummy_value\,x\=y" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
deployment.stage.configuration_set.clear()
configuration = mommy.make(models.Configuration, key='dummy_key=blah,x', value='dummy_value')
deployment.stage.configuration_set.add(configuration)
command = backend.build_command(deployment.stage.project, deployment, {})
self.assertEqual(
command,
'fab test_env --set "dummy_key\=blah\,x=dummy_value" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
deployment.stage.configuration_set.clear()
configuration = mommy.make(models.Configuration, key='key_filename', value='my_ssh_key')
deployment.stage.configuration_set.add(configuration)
command = backend.build_command(deployment.stage.project, deployment, {})
self.assertEqual(
command,
'fab test_env -i my_ssh_key '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
def test_build_command_with_args(self):
deployment = mommy.make(models.Deployment, task__name='test_env')
configuration = mommy.make(models.Configuration, key='arg', value='arg_value', task_argument=True,
task_name='test_env')
deployment.stage.configuration_set.add(configuration)
command = backend.build_command(deployment.stage.project, deployment, {})
fabfile_path, active_loc = backend.get_fabfile_path(deployment.stage.project)
self.assertEqual(
command,
'fab test_env:arg="arg_value" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
def test_parse_task_details(self):
output = """Displaying detailed information for task 'test_env':
No docstring provided
Arguments: arg, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20, arg21, arg22, arg23, arg24, arg25, arg26, arg27, arg28, arg29, arg30
"""
details = backend.parse_task_details('test_env', output)
self.assertEqual(len(details), 3)
self.assertEqual(details[0], 'test_env')
self.assertEqual(details[1], None)
self.assertListEqual(details[2], ['arg', 'arg2', 'arg3', 'arg4', 'arg5', 'arg6', 'arg7', 'arg8', 'arg9', 'arg10', 'arg11', 'arg12', 'arg13', 'arg14', 'arg15', 'arg16', 'arg17', 'arg18', 'arg19', 'arg20', 'arg21', 'arg22', 'arg23', 'arg24', 'arg25', 'arg26', 'arg27', 'arg28', 'arg29', 'arg30'])
output = """Displaying detailed information for task 'do_nothing':
Awesome docstring
Arguments: test='default'
"""
details = backend.parse_task_details('do_nothing', output)
self.assertEqual(len(details), 3)
self.assertEqual(details[0], 'do_nothing')
self.assertEqual(details[1], 'Awesome docstring')
self.assertEqual(len(details[2]), 1)
self.assertIsInstance(details[2][0], tuple)
self.assertTupleEqual(details[2][0], ('test', 'default'))
output = """Displaying detailed information for task 'do_nothing':
Awesome docstring
Arguments: test='default', test2
"""
details = backend.parse_task_details('do_nothing', output)
self.assertEqual(len(details), 3)
self.assertEqual(details[0], 'do_nothing')
self.assertEqual(details[1], 'Awesome docstring')
self.assertEqual(len(details[2]), 2)
self.assertIsInstance(details[2][0], tuple)
self.assertTupleEqual(details[2][0], ('test', 'default'))
self.assertIsInstance(details[2][1], str)
self.assertEqual(details[2][1], 'test2')
output = """Displaying detailed information for task 's':
Set the Site that we're deploying or bootstrapping. For example s:production
:param site_alias:
:return:
Arguments: site_alias
"""
details = backend.parse_task_details('s', output)
self.assertEqual(len(details), 3)
self.assertEqual(details[0], 's')
self.assertEqual(details[1], "Set the Site that we're deploying or bootstrapping. For example s:production\n:param site_alias:\n:return:")
self.assertEqual(len(details[2]), 1)
self.assertIsInstance(details[2][0], str)
self.assertEqual(details[2][0], 'site_alias')
output = """Displaying detailed information for task 'deploy':
Pulls code, updates pip, syncs, migrates, collects static, resets permissions and reloads supervisor and nginx
:param hard:
:return:
Arguments: hard=False
"""
details = backend.parse_task_details('deploy', output)
self.assertEqual(len(details), 3)
self.assertEqual(details[0], 'deploy')
self.assertEqual(details[1], "Pulls code, updates pip, syncs, migrates, collects static, resets permissions and reloads supervisor and nginx\n:param hard:\n:return:")
self.assertEqual(len(details[2]), 1)
self.assertIsInstance(details[2][0], str)
self.assertEqual(details[2][0], 'hard') | leominov/fabric-bolt | fabric_bolt/task_runners/tests.py | Python | mit | 7,768 |
#!/usr/bin/env python
import sys
import os
import Pipeline.settings.BiotoolsSettings as BiotoolsSettings
import DPyGetOpt
from Pipeline.core.PipelineTemplate import PipelineTemplate
import Pipeline.core.PipelineUtil as PipelineUtil
from Pipeline.core.PipelineError import PipelineError
from Pipeline.core.PipelineClusterJob import PipelineClusterJob
#http://ipython.org/ipython-doc/rel-0.10.2/html/api/generated/IPython.DPyGetOpt.html
#http://www.artima.com/weblogs/viewpost.jsp?thread=4829
class Usage(Exception):
def __init__(self, msg=None, err=True):
#msg is an error message to post before the usage info
usage="Usage: %s (options)\n" % sys.argv[0]
usage +="Options:\n"
usage +="\t--template |-T=string : set template name\n"
usage +="\t--clearsuffixes|-C : set flag to force suffix reset post-module\n"
usage +="\t--cross |-c : set flag to inform SJM generator that this is a crossjob\n"
usage +="\t (depends on pairs of input files from different samples)\n"
usage +="\t--variable |-V=string : add pipeline variable to be used during variable replacement (multi-use)\n"
usage +="\t--suffix |-S=string : set suffix to be used post-module\n"
usage +="\t--subjob |-s=string : add a subjob to template\n"
usage +="for help use --help\n"
if msg is not None:
self.msg = msg.strip() +"\n" + usage
else:
self.msg = usage
self.exit_code=None
if err == True:
self.exit_code = 2
else:
self.exit_code = 0
def main(argv=None):
if argv is None:
argv = sys.argv
try:
#try to parse option arguments
try:
opts=[]
opts.append("variable|V=s@")
opts.append("clearsuffixes|C")
opts.append("suffix|S=s")
opts.append("template|T=s")
opts.append("subjob|J=s@")
opts.append("cross|c")
opts.append("help|h")
opt_parser=DPyGetOpt.DPyGetOpt()
opt_parser.setIgnoreCase(False)
opt_parser.setAllowAbbreviations(False)
opt_parser.setPosixCompliance(True)
opt_parser.parseConfiguration(opts)
opt_parser.processArguments(sys.argv)
pipeline_vars=opt_parser.valueForOption("variable")
pipeline_clearsuffixes=bool(opt_parser.valueForOption("clearsuffixes"))
pipeline_crossjob=bool(opt_parser.valueForOption("cross"))
pipeline_suffix=opt_parser.valueForOption("suffix")
pipeline_templateName=opt_parser.valueForOption("template")
pipeline_subjobs=opt_parser.valueForOption("subjob")
help_flag=bool(opt_parser.valueForOption("help"))
if help_flag:
raise Usage(err=False)
argv=opt_parser.freeValues
print("defined vars:")
if pipeline_vars is None:
print("\t(No vars defined)")
else:
for var in pipeline_vars:
print("\t%s" % var)
if pipeline_subjobs is None:
raise Usage("Must define at least one subjob",err=True)
else:
print("defined subjob commands:")
for job in pipeline_subjobs:
print("\t%s" % job)
print("suffixes cleared after template:")
print("\t%s" % pipeline_clearsuffixes)
print("Is a CrossJob:")
print("\t%s" % pipeline_crossjob)
if pipeline_suffix is None:
raise Usage("Must Specify a template suffix",err=True)
print("Template Suffix:")
print("\t%s" % pipeline_suffix)
if pipeline_templateName is None:
raise Usage("Must Specify a template name",err=True)
print("Template Name:")
print("\t%s" % pipeline_templateName)
#TODO method stub
temp=PipelineTemplate()
temp.suffix=pipeline_suffix;
temp.clearsuffixes=pipeline_clearsuffixes;
temp.isCrossJob=pipeline_crossjob;
temp.name=pipeline_templateName;
parseVars(temp,pipeline_vars);
parseSubJobs(temp,pipeline_subjobs);
#temp.ClusterJobs=[];
#temp.vars={};
#temp.var_keys=[];
temp.writeTemplate()
except DPyGetOpt.ArgumentError as DPyGetOptArgErr:
raise Usage("DPyGetOptArgErr: " + DPyGetOptArgErr.__str__())
except DPyGetOpt.SpecificationError as DPyGetOptSpecErr:
raise Usage("DPyGetOptSpecErr: " + DPyGetOptSpecErr.__str__())
except DPyGetOpt.TerminationError as DPyGetOptTermErr:
raise Usage("DPyGetOptTermErr: " + DPyGetOptTermErr.__str__())
except DPyGetOpt.Error as DPyGetOptErr:
raise Usage("DPyGetOptErr: " + DPyGetOptErr.__str__())
except PipelineError as pipe_err:
sys.stderr.write (pipe_err.msg);
return -1;
print("PROGRAM EXECUTION REACHED END OF MAIN")
return 0;
except Usage as err:
sys.stderr.write(err.msg)
return err.exit_code
def parseVars(template,Vars):
if template is None:
raise PipelineError("[PipelineTemplateGenerator.parseVars] template object is None");
if Vars is not None:
#print("Vars: %s" % Vars)
for Var in Vars:
eqsplit=Var.split("=")
if (len(eqsplit)!=2):
raise PipelineError("[PipelineTemplateGenerator.parseVars] Incorrect syntax for var definition: "+ Var);
if eqsplit[0] in template.vars:
raise PipelineError("[PipelineTemplateGenerator.parseVars] defined same var twice: "+ eqsplit[0]);
template.vars[eqsplit[0]]=eqsplit[1];
template.var_keys.append(eqsplit[0]);
# print("var keys: %s" % template.var_keys)
# print("vars: %s" % template.vars)
def parseSubJobs(template,subjobs):
if template is None:
raise PipelineError("[PipelineTemplateGenerator.parseVars] template object is None");
if subjobs is None:
raise PipelineError("[PipelineTemplateGenerator.parseVars] No subjobs provided");
for subjobopt in subjobs:
clusterjob=template.getNewClusterJob();
parseSubJob(subjobopt,clusterjob)
def parseSubJob(subjobopt,clusterjob):
#subjobvars={};
commasplit=subjobopt.split(",");
for commaItem in commasplit:
eqsplit=commaItem.split("=")
# print("split on equal sign: %s" % eqsplit)
if (len(eqsplit)!=2):
raise PipelineError("[PipelineTemplateGenerator.parseVars] invalid argument syntax! should have 2 elements separated by '=', have: %d" % len(eqsplit));
attrib_name=str(eqsplit[0].strip())
attrib_val=str(eqsplit[1])
# print("attrib_name:")
# print(attrib_name)
# print("attrib_val:")
# print(attrib_val)
if attrib_name == "order_after":
# print("found order_after!!!");
# print("parsing: " + attrib_val);
if ':' in attrib_val:
arr=attrib_val.split(":");
# print("split order after: " + arr);
clusterjob.order_after.append(arr);
else:
# print("order after: " + attrib_val);
clusterjob.order_after.append(attrib_val);
elif attrib_name == "cmd":
# print("found cmd!!!");
# print("split cmd: " + attrib_val);
clusterjob.cmd.append(attrib_val);
else:
# print("found " + attrib_name + " !!!")
setattr(clusterjob, attrib_name, attrib_val)
if clusterjob.module is None:
clusterjob.module=BiotoolsSettings.getValue("MODULEFILE")
if clusterjob.directory is None:
clusterjob.directory=BiotoolsSettings.getValue("CURDIR")
if clusterjob.queue is None:
clusterjob.queue=BiotoolsSettings.getValue("JOBQUEUE")
if __name__ == "__main__":
sys.exit(main()) | kotoroshinoto/Cluster_SimpleJob_Generator | pybin/Pipeline/commands/PipelineTemplateGenerator.py | Python | unlicense | 8,154 |
class VNXObject(object):
def __init__(self):
self._props = {}
def __setattr__(self, key, value):
try:
super(VNXObject, self).__setattr__(key, value)
except AttributeError:
self._props[key] = value
# def __getattr__(self, item):
# try:
# super(VNXObject, self).__getattribute__(item)
# except AttributeError:
# # v = self.__props[item]
# # print "__get__: ", v
# # if callable(v):
# # return v()
# # return v
# raise
class VNXObject1(object):
props = {}
def __setattr__(self, key, value):
self.props[key] = value
def __getattr__(self, item):
try:
super(VNXObject1, self).__getattr__(item)
except AttributeError:
v = self.props[item]
print "__get__: ", v
if callable(v):
return v()
return v
import mock
#
# obj =VNXObject()
#
# obj.test= '111'
# print obj.test
#
# setattr(obj, 'hello', "hello, peter")
# print obj.hello
#
# setattr(obj, 'mock', mock.PropertyMock(return_value='hello,mock'))
# print obj.mock
vnx1 = VNXObject1()
vnx1.test= 'hello'
print vnx1.test | peter-wangxu/python_play | test/mock_test/test_attr.py | Python | apache-2.0 | 1,244 |
#!/usr/bin/env python
# Copyright (C) 2006-2019 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import numpy as np
from math import *
from essentia_test import *
from essentia import array as esarr
class TestClickDetector(TestCase):
def testZero(self):
self.assertEqual(SNR()(esarr(np.zeros(512)))[1], -np.inf)
def testOnes(self):
self.assertEqual(SNR()(esarr(np.ones(512)))[1], np.inf)
def testInvalidParam(self):
self.assertConfigureFails(SNR(), {'sampleRate': -1})
self.assertConfigureFails(SNR(), {'frameSize': 0})
self.assertConfigureFails(SNR(), {'noiseThreshold': 1})
self.assertConfigureFails(SNR(), {'MMSEAlpha': 2})
self.assertConfigureFails(SNR(), {'MAAlpha': 2})
self.assertConfigureFails(SNR(), {'NoiseAlpha': 2})
def testSinusoidalPlusNoise(self):
from essentia import instantPower
from essentia import db2amp
frameSize = 512
hopSize = frameSize // 2
fs = 44100.
time = 5. # s
time_axis = np.arange(0, time, 1 / fs)
nsamples = len(time_axis)
noise = np.random.randn(nsamples)
noise /= np.std(noise)
noise_only = 1
signal = np.sin(2 * pi * 5000 * time_axis)
signal_db = -22.
noise_db = -50.
signal[:int(noise_only * fs)] = np.zeros(int(noise_only * fs))
snr_gt = 10. * np.log10(
(instantPower(esarr(db2amp(signal_db) * signal[int(noise_only * fs):]))) /
(instantPower(esarr(db2amp(noise_db) * noise[int(noise_only * fs):]))))\
- 10. * np.log10(fs / 2.)
signal_and_noise = esarr(db2amp(signal_db) * signal + db2amp(noise_db) * noise)
noiseThreshold = -30
algo = SNR(frameSize=frameSize, noiseThreshold=noiseThreshold)
for frame in FrameGenerator(signal_and_noise, frameSize=frameSize, hopSize=hopSize):
_, snr, _ = algo(frame)
self.assertAlmostEqual(snr, snr_gt, 1e-1)
def testBroadbandNoiseCorrection(self):
from essentia import instantPower
from essentia import db2amp
frameSize = 512
hopSize = frameSize // 2
fs = 44100.
time = 1. # s
time_axis = np.arange(0, time, 1 / fs)
nsamples = len(time_axis)
noise = np.random.randn(nsamples)
noise /= np.std(noise)
noise_only = .2
signal = np.sin(2 * pi * 5000 * time_axis)
signal_db = -22.
noise_db = -50.
signal[:int(noise_only * fs)] = np.zeros(int(noise_only * fs))
signal_and_noise = esarr(db2amp(signal_db) * signal + db2amp(noise_db) * noise)
noiseThreshold = -30
corrected = SNR(frameSize=frameSize, noiseThreshold=noiseThreshold)
notCorrected = SNR(frameSize=frameSize, noiseThreshold=noiseThreshold,
useBroadbadNoiseCorrection=False)
for frame in FrameGenerator(signal_and_noise, frameSize=frameSize, hopSize=hopSize):
_, snrCorrected, _ = corrected(frame)
_, snrNotCorrected, _ = notCorrected(frame)
self.assertAlmostEqual(snrCorrected, snrNotCorrected - 10. * np.log10(fs / 2), 1e-4)
suite = allTests(TestClickDetector)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| carthach/essentia | test/src/unittests/audioproblems/test_snr.py | Python | agpl-3.0 | 4,003 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-18 12:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0029_auto_20180111_0845'),
]
operations = [
migrations.AlterModelOptions(
name='ordernote',
options={'ordering': ('date',)},
),
migrations.AddField(
model_name='ordernote',
name='is_public',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='ordernote',
name='date',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
]
| UITools/saleor | saleor/order/migrations/0030_auto_20180118_0605.py | Python | bsd-3-clause | 756 |
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Comments and reviews for records """
__revision__ = "$Id$"
# non Invenio imports:
import time
import math
import os
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
# Invenio imports:
from invenio.dbquery import run_sql
from invenio.config import CFG_PREFIX, \
CFG_SITE_LANG, \
CFG_WEBALERT_ALERT_ENGINE_EMAIL,\
CFG_SITE_SUPPORT_EMAIL,\
CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL,\
CFG_SITE_URL,\
CFG_SITE_NAME,\
CFG_WEBCOMMENT_ALLOW_REVIEWS,\
CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS,\
CFG_WEBCOMMENT_ALLOW_COMMENTS,\
CFG_WEBCOMMENT_ADMIN_NOTIFICATION_LEVEL,\
CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN,\
CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS,\
CFG_WEBCOMMENT_DEFAULT_MODERATOR
from invenio.webmessage_mailutils import \
email_quote_txt, \
email_quoted_txt2html
from invenio.webuser import get_user_info, get_email, collect_user_info
from invenio.dateutils import convert_datetext_to_dategui, \
datetext_default, \
convert_datestruct_to_datetext
from invenio.mailutils import send_email
from invenio.messages import wash_language, gettext_set_language
from invenio.urlutils import wash_url_argument
from invenio.webcomment_config import CFG_WEBCOMMENT_ACTION_CODE, \
CFG_WEBCOMMENT_EMAIL_REPLIES_TO, \
CFG_WEBCOMMENT_ROUND_DATAFIELD, \
CFG_WEBCOMMENT_RESTRICTION_DATAFIELD, \
CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH
from invenio.access_control_engine import acc_authorize_action
from invenio.search_engine import \
guess_primary_collection_of_a_record, \
check_user_can_view_record, \
get_fieldvalues, \
get_collection_reclist, \
get_colID
from invenio.webcomment_washer import EmailWasher
try:
import invenio.template
webcomment_templates = invenio.template.load('webcomment')
except:
pass
def perform_request_display_comments_or_remarks(req, recID, display_order='od', display_since='all', nb_per_page=100, page=1, ln=CFG_SITE_LANG, voted=-1, reported=-1, subscribed=0, reviews=0, uid=-1, can_send_comments=False, can_attach_files=False, user_is_subscribed_to_discussion=False, user_can_unsubscribe_from_discussion=False, display_comment_rounds=None):
"""
Returns all the comments (reviews) of a specific internal record or external basket record.
@param recID: record id where (internal record IDs > 0) or (external basket record IDs < -100)
@param display_order: hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param display_since: all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param nb_per_page: number of results per page
@param page: results page
@param voted: boolean, active if user voted for a review, see perform_request_vote function
@param reported: boolean, active if user reported a certain comment/review, perform_request_report function
@param subscribed: int, 1 if user just subscribed to discussion, -1 if unsubscribed
@param reviews: boolean, enabled if reviews, disabled for comments
@param uid: the id of the user who is reading comments
@param can_send_comments: if user can send comment or not
@param can_attach_files: if user can attach file to comment or not
@param user_is_subscribed_to_discussion: True if user already receives new comments by email
@param user_can_unsubscribe_from_discussion: True is user is allowed to unsubscribe from discussion
@return html body.
"""
errors = []
warnings = []
nb_reviews = 0
nb_comments = 0
# wash arguments
recID = wash_url_argument(recID, 'int')
ln = wash_language(ln)
display_order = wash_url_argument(display_order, 'str')
display_since = wash_url_argument(display_since, 'str')
nb_per_page = wash_url_argument(nb_per_page, 'int')
page = wash_url_argument(page, 'int')
voted = wash_url_argument(voted, 'int')
reported = wash_url_argument(reported, 'int')
reviews = wash_url_argument(reviews, 'int')
# vital argument check
(valid, error_body) = check_recID_is_in_range(recID, warnings, ln)
if not(valid):
return (error_body, errors, warnings)
# CERN hack begins: filter out ATLAS comments
from invenio.config import CFG_CERN_SITE
if CFG_CERN_SITE:
restricted_comments_p = False
for report_number in get_fieldvalues(recID, '088__a'):
if report_number.startswith("ATL-"):
restricted_comments_p = True
break
if restricted_comments_p:
err_code, err_msg = acc_authorize_action(uid, 'viewrestrcoll',
collection='ATLAS Communications')
if err_code:
return (err_msg, errors, warnings)
# CERN hack ends
# Query the database and filter results
user_info = collect_user_info(uid)
res = query_retrieve_comments_or_remarks(recID, display_order, display_since, reviews, user_info=user_info)
res2 = query_retrieve_comments_or_remarks(recID, display_order, display_since, not reviews, user_info=user_info)
nb_res = len(res)
if reviews:
nb_reviews = nb_res
nb_comments = len(res2)
else:
nb_reviews = len(res2)
nb_comments = nb_res
# checking non vital arguemnts - will be set to default if wrong
#if page <= 0 or page.lower() != 'all':
if page < 0:
page = 1
warnings.append(('WRN_WEBCOMMENT_INVALID_PAGE_NB',))
if nb_per_page < 0:
nb_per_page = 100
warnings.append(('WRN_WEBCOMMENT_INVALID_NB_RESULTS_PER_PAGE',))
if CFG_WEBCOMMENT_ALLOW_REVIEWS and reviews:
if display_order not in ['od', 'nd', 'hh', 'lh', 'hs', 'ls']:
display_order = 'hh'
warnings.append(('WRN_WEBCOMMENT_INVALID_REVIEW_DISPLAY_ORDER',))
else:
if display_order not in ['od', 'nd']:
display_order = 'od'
warnings.append(('WRN_WEBCOMMENT_INVALID_DISPLAY_ORDER',))
if not display_comment_rounds:
display_comment_rounds = []
# filter results according to page and number of reults per page
if nb_per_page > 0:
if nb_res > 0:
last_page = int(math.ceil(nb_res / float(nb_per_page)))
else:
last_page = 1
if page > last_page:
page = 1
warnings.append(("WRN_WEBCOMMENT_INVALID_PAGE_NB",))
if nb_res > nb_per_page: # if more than one page of results
if page < last_page:
res = res[(page-1)*(nb_per_page) : (page*nb_per_page)]
else:
res = res[(page-1)*(nb_per_page) : ]
else: # one page of results
pass
else:
last_page = 1
# Send to template
avg_score = 0.0
if not CFG_WEBCOMMENT_ALLOW_COMMENTS and not CFG_WEBCOMMENT_ALLOW_REVIEWS: # comments not allowed by admin
errors.append(('ERR_WEBCOMMENT_COMMENTS_NOT_ALLOWED',))
if reported > 0:
warnings.append(('WRN_WEBCOMMENT_FEEDBACK_RECORDED',))
elif reported == 0:
warnings.append(('WRN_WEBCOMMENT_ALREADY_REPORTED',))
elif reported == -2:
warnings.append(('WRN_WEBCOMMENT_INVALID_REPORT',))
if CFG_WEBCOMMENT_ALLOW_REVIEWS and reviews:
avg_score = calculate_avg_score(res)
if voted > 0:
warnings.append(('WRN_WEBCOMMENT_FEEDBACK_RECORDED',))
elif voted == 0:
warnings.append(('WRN_WEBCOMMENT_ALREADY_VOTED',))
if subscribed == 1:
warnings.append(('WRN_WEBCOMMENT_SUBSCRIBED',))
elif subscribed == -1:
warnings.append(('WRN_WEBCOMMENT_UNSUBSCRIBED',))
grouped_comments = group_comments_by_round(res, reviews)
# Clean list of comments round names
if not display_comment_rounds:
display_comment_rounds = []
elif 'all' in display_comment_rounds:
display_comment_rounds = [cmtgrp[0] for cmtgrp in grouped_comments]
elif 'latest' in display_comment_rounds:
if grouped_comments:
display_comment_rounds.append(grouped_comments[-1][0])
display_comment_rounds.remove('latest')
body = webcomment_templates.tmpl_get_comments(req,
recID,
ln,
nb_per_page, page, last_page,
display_order, display_since,
CFG_WEBCOMMENT_ALLOW_REVIEWS,
grouped_comments, nb_comments, avg_score,
warnings,
border=0,
reviews=reviews,
total_nb_reviews=nb_reviews,
uid=uid,
can_send_comments=can_send_comments,
can_attach_files=can_attach_files,
user_is_subscribed_to_discussion=\
user_is_subscribed_to_discussion,
user_can_unsubscribe_from_discussion=\
user_can_unsubscribe_from_discussion,
display_comment_rounds=display_comment_rounds)
return (body, errors, warnings)
def perform_request_vote(cmt_id, client_ip_address, value, uid=-1):
"""
Vote positively or negatively for a comment/review
@param cmt_id: review id
@param value: +1 for voting positively
-1 for voting negatively
@return: integer 1 if successful, integer 0 if not
"""
cmt_id = wash_url_argument(cmt_id, 'int')
client_ip_address = wash_url_argument(client_ip_address, 'str')
value = wash_url_argument(value, 'int')
uid = wash_url_argument(uid, 'int')
if cmt_id > 0 and value in [-1, 1] and check_user_can_vote(cmt_id, client_ip_address, uid):
action_date = convert_datestruct_to_datetext(time.localtime())
action_code = CFG_WEBCOMMENT_ACTION_CODE['VOTE']
query = """INSERT INTO cmtACTIONHISTORY (id_cmtRECORDCOMMENT,
id_bibrec, id_user, client_host, action_time,
action_code)
VALUES (%s, NULL ,%s, inet_aton(%s), %s, %s)"""
params = (cmt_id, uid, client_ip_address, action_date, action_code)
run_sql(query, params)
return query_record_useful_review(cmt_id, value)
else:
return 0
def check_user_can_comment(recID, client_ip_address, uid=-1):
""" Check if a user hasn't already commented within the last seconds
time limit: CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS
@param recID: record id
@param client_ip_address: IP => use: str(req.remote_ip)
@param uid: user id, as given by invenio.webuser.getUid(req)
"""
recID = wash_url_argument(recID, 'int')
client_ip_address = wash_url_argument(client_ip_address, 'str')
uid = wash_url_argument(uid, 'int')
max_action_time = time.time() - CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS
max_action_time = convert_datestruct_to_datetext(time.localtime(max_action_time))
action_code = CFG_WEBCOMMENT_ACTION_CODE['ADD_COMMENT']
query = """SELECT id_bibrec
FROM cmtACTIONHISTORY
WHERE id_bibrec=%s AND
action_code=%s AND
action_time>%s
"""
params = (recID, action_code, max_action_time)
if uid < 0:
query += " AND client_host=inet_aton(%s)"
params += (client_ip_address,)
else:
query += " AND id_user=%s"
params += (uid,)
res = run_sql(query, params)
return len(res) == 0
def check_user_can_review(recID, client_ip_address, uid=-1):
""" Check if a user hasn't already reviewed within the last seconds
time limit: CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_REVIEWS_IN_SECONDS
@param recID: record ID
@param client_ip_address: IP => use: str(req.remote_ip)
@param uid: user id, as given by invenio.webuser.getUid(req)
"""
action_code = CFG_WEBCOMMENT_ACTION_CODE['ADD_REVIEW']
query = """SELECT id_bibrec
FROM cmtACTIONHISTORY
WHERE id_bibrec=%s AND
action_code=%s
"""
params = (recID, action_code)
if uid < 0:
query += " AND client_host=inet_aton(%s)"
params += (client_ip_address,)
else:
query += " AND id_user=%s"
params += (uid,)
res = run_sql(query, params)
return len(res) == 0
def check_user_can_vote(cmt_id, client_ip_address, uid=-1):
""" Checks if a user hasn't already voted
@param cmt_id: comment id
@param client_ip_address: IP => use: str(req.remote_ip)
@param uid: user id, as given by invenio.webuser.getUid(req)
"""
cmt_id = wash_url_argument(cmt_id, 'int')
client_ip_address = wash_url_argument(client_ip_address, 'str')
uid = wash_url_argument(uid, 'int')
query = """SELECT id_cmtRECORDCOMMENT
FROM cmtACTIONHISTORY
WHERE id_cmtRECORDCOMMENT=%s"""
params = (cmt_id,)
if uid < 0:
query += " AND client_host=inet_aton(%s)"
params += (client_ip_address,)
else:
query += " AND id_user=%s"
params += (uid, )
res = run_sql(query, params)
return (len(res) == 0)
def get_comment_collection(cmt_id):
"""
Extract the collection where the comment is written
"""
query = "SELECT id_bibrec FROM cmtRECORDCOMMENT WHERE id=%s"
recid = run_sql(query, (cmt_id,))
record_primary_collection = guess_primary_collection_of_a_record(recid[0][0])
return record_primary_collection
def get_collection_moderators(collection):
"""
Return the list of comment moderators for the given collection.
"""
from invenio.access_control_engine import acc_get_authorized_emails
res = list(acc_get_authorized_emails('moderatecomments', collection=collection))
if not res:
return [CFG_WEBCOMMENT_DEFAULT_MODERATOR,]
return res
def perform_request_report(cmt_id, client_ip_address, uid=-1):
"""
Report a comment/review for inappropriate content.
Will send an email to the administrator if number of reports is a multiple of CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN
@param cmt_id: comment id
@return: integer 1 if successful, integer 0 if not. -2 if comment does not exist
"""
cmt_id = wash_url_argument(cmt_id, 'int')
if cmt_id <= 0:
return 0
(query_res, nb_abuse_reports) = query_record_report_this(cmt_id)
if query_res == 0:
return 0
elif query_res == -2:
return -2
if not(check_user_can_report(cmt_id, client_ip_address, uid)):
return 0
action_date = convert_datestruct_to_datetext(time.localtime())
action_code = CFG_WEBCOMMENT_ACTION_CODE['REPORT_ABUSE']
query = """INSERT INTO cmtACTIONHISTORY (id_cmtRECORDCOMMENT, id_bibrec,
id_user, client_host, action_time, action_code)
VALUES (%s, NULL, %s, inet_aton(%s), %s, %s)"""
params = (cmt_id, uid, client_ip_address, action_date, action_code)
run_sql(query, params)
if nb_abuse_reports % CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN == 0:
(cmt_id2,
id_bibrec,
id_user,
cmt_body,
cmt_date,
cmt_star,
cmt_vote, cmt_nb_votes_total,
cmt_title,
cmt_reported,
round_name,
restriction) = query_get_comment(cmt_id)
(user_nb_abuse_reports,
user_votes,
user_nb_votes_total) = query_get_user_reports_and_votes(int(id_user))
(nickname, user_email, last_login) = query_get_user_contact_info(id_user)
from_addr = '%s Alert Engine <%s>' % (CFG_SITE_NAME, CFG_WEBALERT_ALERT_ENGINE_EMAIL)
comment_collection = get_comment_collection(cmt_id)
to_addrs = get_collection_moderators(comment_collection)
subject = "A comment has been reported as inappropriate by a user"
body = '''
The following comment has been reported a total of %(cmt_reported)s times.
Author: nickname = %(nickname)s
email = %(user_email)s
user_id = %(uid)s
This user has:
total number of reports = %(user_nb_abuse_reports)s
%(votes)s
Comment: comment_id = %(cmt_id)s
record_id = %(id_bibrec)s
date written = %(cmt_date)s
nb reports = %(cmt_reported)s
%(review_stuff)s
body =
---start body---
%(cmt_body)s
---end body---
Please go to the record page %(comment_admin_link)s to delete this message if necessary. A warning will be sent to the user in question.''' % \
{ 'cfg-report_max' : CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN,
'nickname' : nickname,
'user_email' : user_email,
'uid' : id_user,
'user_nb_abuse_reports' : user_nb_abuse_reports,
'user_votes' : user_votes,
'votes' : CFG_WEBCOMMENT_ALLOW_REVIEWS and \
"total number of positive votes\t= %s\n\t\ttotal number of negative votes\t= %s" % \
(user_votes, (user_nb_votes_total - user_votes)) or "\n",
'cmt_id' : cmt_id,
'id_bibrec' : id_bibrec,
'cmt_date' : cmt_date,
'cmt_reported' : cmt_reported,
'review_stuff' : CFG_WEBCOMMENT_ALLOW_REVIEWS and \
"star score\t= %s\n\treview title\t= %s" % (cmt_star, cmt_title) or "",
'cmt_body' : cmt_body,
'comment_admin_link' : CFG_SITE_URL + "/record/" + str(id_bibrec) + '/comments#' + str(cmt_id),
'user_admin_link' : "user_admin_link" #! FIXME
}
#FIXME to be added to email when websession module is over:
#If you wish to ban the user, you can do so via the User Admin Panel %(user_admin_link)s.
send_email(from_addr, to_addrs, subject, body)
return 1
def check_user_can_report(cmt_id, client_ip_address, uid=-1):
""" Checks if a user hasn't already reported a comment
@param cmt_id: comment id
@param client_ip_address: IP => use: str(req.remote_ip)
@param uid: user id, as given by invenio.webuser.getUid(req)
"""
cmt_id = wash_url_argument(cmt_id, 'int')
client_ip_address = wash_url_argument(client_ip_address, 'str')
uid = wash_url_argument(uid, 'int')
query = """SELECT id_cmtRECORDCOMMENT
FROM cmtACTIONHISTORY
WHERE id_cmtRECORDCOMMENT=%s"""
params = (uid,)
if uid < 0:
query += " AND client_host=inet_aton(%s)"
params += (client_ip_address,)
else:
query += " AND id_user=%s"
params += (uid,)
res = run_sql(query, params)
return (len(res) == 0)
def query_get_user_contact_info(uid):
"""
Get the user contact information
@return: tuple (nickname, email, last_login), if none found return ()
Note: for the moment, if no nickname, will return email address up to the '@'
"""
query1 = """SELECT nickname, email,
DATE_FORMAT(last_login, '%%Y-%%m-%%d %%H:%%i:%%s')
FROM user WHERE id=%s"""
params1 = (uid,)
res1 = run_sql(query1, params1)
if res1:
return res1[0]
else:
return ()
def query_get_user_reports_and_votes(uid):
"""
Retrieve total number of reports and votes of a particular user
@param uid: user id
@return: tuple (total_nb_reports, total_nb_votes_yes, total_nb_votes_total)
if none found return ()
"""
query1 = """SELECT nb_votes_yes,
nb_votes_total,
nb_abuse_reports
FROM cmtRECORDCOMMENT
WHERE id_user=%s"""
params1 = (uid,)
res1 = run_sql(query1, params1)
if len(res1) == 0:
return ()
nb_votes_yes = nb_votes_total = nb_abuse_reports = 0
for cmt_tuple in res1:
nb_votes_yes += int(cmt_tuple[0])
nb_votes_total += int(cmt_tuple[1])
nb_abuse_reports += int(cmt_tuple[2])
return (nb_abuse_reports, nb_votes_yes, nb_votes_total)
def query_get_comment(comID):
"""
Get all fields of a comment
@param comID: comment id
@return: tuple (comID, id_bibrec, id_user, body, date_creation, star_score, nb_votes_yes, nb_votes_total, title, nb_abuse_reports, round_name, restriction)
if none found return ()
"""
query1 = """SELECT id,
id_bibrec,
id_user,
body,
DATE_FORMAT(date_creation, '%%Y-%%m-%%d %%H:%%i:%%s'),
star_score,
nb_votes_yes,
nb_votes_total,
title,
nb_abuse_reports,
round_name,
restriction
FROM cmtRECORDCOMMENT
WHERE id=%s"""
params1 = (comID,)
res1 = run_sql(query1, params1)
if len(res1)>0:
return res1[0]
else:
return ()
def query_record_report_this(comID):
"""
Increment the number of reports for a comment
@param comID: comment id
@return: tuple (success, new_total_nb_reports_for_this_comment) where
success is integer 1 if success, integer 0 if not, -2 if comment does not exist
"""
#retrieve nb_abuse_reports
query1 = "SELECT nb_abuse_reports FROM cmtRECORDCOMMENT WHERE id=%s"
params1 = (comID,)
res1 = run_sql(query1, params1)
if len(res1) == 0:
return (-2, 0)
#increment and update
nb_abuse_reports = int(res1[0][0]) + 1
query2 = "UPDATE cmtRECORDCOMMENT SET nb_abuse_reports=%s WHERE id=%s"
params2 = (nb_abuse_reports, comID)
res2 = run_sql(query2, params2)
return (int(res2), nb_abuse_reports)
def query_record_useful_review(comID, value):
"""
private funciton
Adjust the number of useful votes and number of total votes for a comment.
@param comID: comment id
@param value: +1 or -1
@return: integer 1 if successful, integer 0 if not
"""
# retrieve nb_useful votes
query1 = "SELECT nb_votes_total, nb_votes_yes FROM cmtRECORDCOMMENT WHERE id=%s"
params1 = (comID,)
res1 = run_sql(query1, params1)
if len(res1)==0:
return 0
# modify and insert new nb_useful votes
nb_votes_yes = int(res1[0][1])
if value >= 1:
nb_votes_yes = int(res1[0][1]) + 1
nb_votes_total = int(res1[0][0]) + 1
query2 = "UPDATE cmtRECORDCOMMENT SET nb_votes_total=%s, nb_votes_yes=%s WHERE id=%s"
params2 = (nb_votes_total, nb_votes_yes, comID)
res2 = run_sql(query2, params2)
return int(res2)
def query_retrieve_comments_or_remarks(recID, display_order='od', display_since='0000-00-00 00:00:00',
ranking=0, limit='all', user_info=None):
"""
Private function
Retrieve tuple of comments or remarks from the database
@param recID: record id
@param display_order: hh = highest helpful score
lh = lowest helpful score
hs = highest star score
ls = lowest star score
od = oldest date
nd = newest date
@param display_since: datetime, e.g. 0000-00-00 00:00:00
@param ranking: boolean, enabled if reviews, disabled for comments
@param limit: number of comments/review to return
@return: tuple of comment where comment is
tuple (nickname, uid, date_creation, body, status, id) if ranking disabled or
tuple (nickname, uid, date_creation, body, status, nb_votes_yes, nb_votes_total, star_score, title, id)
Note: for the moment, if no nickname, will return email address up to '@'
"""
display_since = calculate_start_date(display_since)
order_dict = { 'hh' : "cmt.nb_votes_yes/(cmt.nb_votes_total+1) DESC, cmt.date_creation DESC ",
'lh' : "cmt.nb_votes_yes/(cmt.nb_votes_total+1) ASC, cmt.date_creation ASC ",
'ls' : "cmt.star_score ASC, cmt.date_creation DESC ",
'hs' : "cmt.star_score DESC, cmt.date_creation DESC ",
'nd' : "cmt.reply_order_cached_data DESC ",
'od' : "cmt.reply_order_cached_data ASC "
}
# Ranking only done for comments and when allowed
if ranking and recID > 0:
try:
display_order = order_dict[display_order]
except:
display_order = order_dict['od']
else:
# in case of recID > 0 => external record => no ranking!
ranking = 0
try:
if display_order[-1] == 'd':
display_order = order_dict[display_order]
else:
display_order = order_dict['od']
except:
display_order = order_dict['od']
#display_order = order_dict['nd']
query = """SELECT user.nickname,
cmt.id_user,
DATE_FORMAT(cmt.date_creation, '%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s'),
cmt.body,
cmt.status,
cmt.nb_abuse_reports,
%(ranking)s cmt.id,
cmt.round_name,
cmt.restriction,
%(reply_to_column)s
FROM cmtRECORDCOMMENT cmt LEFT JOIN user ON
user.id=cmt.id_user
WHERE cmt.id_bibrec=%%s
%(ranking_only)s
%(display_since)s
ORDER BY %(display_order)s
""" % {'ranking' : ranking and ' cmt.nb_votes_yes, cmt.nb_votes_total, cmt.star_score, cmt.title, ' or '',
'ranking_only' : ranking and ' AND cmt.star_score>0 ' or ' AND cmt.star_score=0 ',
# 'id_bibrec' : recID > 0 and 'cmt.id_bibrec' or 'cmt.id_bibrec_or_bskEXTREC',
# 'table' : recID > 0 and 'cmtRECORDCOMMENT' or 'bskRECORDCOMMENT',
'display_since' : display_since == '0000-00-00 00:00:00' and ' ' or 'AND cmt.date_creation>=\'%s\' ' % display_since,
'display_order': display_order,
'reply_to_column': recID > 0 and 'cmt.in_reply_to_id_cmtRECORDCOMMENT' or 'cmt.in_reply_to_id_bskRECORDCOMMENT'}
params = (recID,)
res = run_sql(query, params)
# return res
new_limit = limit
comments_list = []
for row in res:
if ranking:
# when dealing with reviews, row[12] holds restriction info:
restriction = row[12]
else:
# when dealing with comments, row[8] holds restriction info:
restriction = row[8]
if user_info and check_user_can_view_comment(user_info, None, restriction)[0] != 0:
# User cannot view comment. Look further
continue
comments_list.append(row)
if limit.isdigit():
new_limit -= 1
if limit < 1:
break
if comments_list:
if limit.isdigit():
return comments_list[:limit]
else:
return comments_list
return ()
## def get_comment_children(comID):
## """
## Returns the list of children (i.e. direct descendants) ordered by time of addition.
## @param comID: the ID of the comment for which we want to retrieve children
## @type comID: int
## @return the list of children
## @rtype: list
## """
## res = run_sql("SELECT id FROM cmtRECORDCOMMENT WHERE in_reply_to_id_cmtRECORDCOMMENT=%s", (comID,))
## return [row[0] for row in res]
## def get_comment_descendants(comID, depth=None):
## """
## Returns the list of descendants of the given comment, orderd from
## oldest to newest ("top-down"), down to depth specified as parameter.
## @param comID: the ID of the comment for which we want to retrieve descendant
## @type comID: int
## @param depth: the max depth down to which we want to retrieve
## descendants. Specify None for no limit, 1 for direct
## children only, etc.
## @return the list of ancestors
## @rtype: list(tuple(comment ID, descendants comments IDs))
## """
## if depth == 0:
## return (comID, [])
## res = run_sql("SELECT id FROM cmtRECORDCOMMENT WHERE in_reply_to_id_cmtRECORDCOMMENT=%s", (comID,))
## if res:
## children_comID = [row[0] for row in res]
## children_descendants = []
## if depth:
## depth -= 1
## children_descendants = [get_comment_descendants(child_comID, depth) for child_comID in children_comID]
## return (comID, children_descendants)
## else:
## return (comID, [])
def get_comment_ancestors(comID, depth=None):
"""
Returns the list of ancestors of the given comment, ordered from
oldest to newest ("top-down": direct parent of comID is at last position),
up to given depth
@param comID: the ID of the comment for which we want to retrieve ancestors
@type comID: int
@param depth: the maximum of levels up from the given comment we
want to retrieve ancestors. None for no limit, 1 for
direct parent only, etc.
@type depth: int
@return the list of ancestors
@rtype: list
"""
if depth == 0:
return []
res = run_sql("SELECT in_reply_to_id_cmtRECORDCOMMENT FROM cmtRECORDCOMMENT WHERE id=%s", (comID,))
if res:
parent_comID = res[0][0]
if parent_comID == 0:
return []
parent_ancestors = []
if depth:
depth -= 1
parent_ancestors = get_comment_ancestors(parent_comID, depth)
parent_ancestors.append(parent_comID)
return parent_ancestors
else:
return []
def get_reply_order_cache_data(comid):
"""
Prepare a representation of the comment ID given as parameter so
that it is suitable for byte ordering in MySQL.
"""
return "%s%s%s%s" % (chr((comid >> 24) % 256), chr((comid >> 16) % 256),
chr((comid >> 8) % 256), chr(comid % 256))
def query_add_comment_or_remark(reviews=0, recID=0, uid=-1, msg="",
note="", score=0, priority=0,
client_ip_address='', editor_type='textarea',
req=None, reply_to=None, attached_files=None):
"""
Private function
Insert a comment/review or remarkinto the database
@param recID: record id
@param uid: user id
@param msg: comment body
@param note: comment title
@param score: review star score
@param priority: remark priority #!FIXME
@param editor_type: the kind of editor used to submit the comment: 'textarea', 'fckeditor'
@param req: request object. If provided, email notification are sent after we reply to user request.
@param reply_to: the id of the comment we are replying to with this inserted comment.
@return: integer >0 representing id if successful, integer 0 if not
"""
current_date = calculate_start_date('0d')
#change utf-8 message into general unicode
msg = msg.decode('utf-8')
note = note.decode('utf-8')
#change general unicode back to utf-8
msg = msg.encode('utf-8')
note = note.encode('utf-8')
(restriction, round_name) = get_record_status(recID)
if attached_files is None:
attached_files = {}
if reply_to and CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH >= 0:
# Check that we have not reached max depth
comment_ancestors = get_comment_ancestors(reply_to)
if len(comment_ancestors) >= CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH:
if CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH == 0:
reply_to = None
else:
reply_to = comment_ancestors[CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH - 1]
# Inherit restriction and group/round of 'parent'
comment = query_get_comment(reply_to)
if comment:
(round_name, restriction) = comment[10:12]
if editor_type == 'fckeditor':
# Here we remove the line feeds introduced by FCKeditor (they
# have no meaning for the user) and replace the HTML line
# breaks by linefeeds, so that we are close to an input that
# would be done without the FCKeditor. That's much better if a
# reply to a comment is made with a browser that does not
# support FCKeditor.
msg = msg.replace('\n', '').replace('\r', '').replace('<br />', '\n')
query = """INSERT INTO cmtRECORDCOMMENT (id_bibrec,
id_user,
body,
date_creation,
star_score,
nb_votes_total,
title,
round_name,
restriction,
in_reply_to_id_cmtRECORDCOMMENT)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
params = (recID, uid, msg, current_date, score, 0, note, round_name, restriction, reply_to or 0)
res = run_sql(query, params)
if res:
new_comid = int(res)
move_attached_files_to_storage(attached_files, recID, new_comid)
parent_reply_order = run_sql("""SELECT reply_order_cached_data from cmtRECORDCOMMENT where id=%s""", (reply_to,))
if not parent_reply_order or parent_reply_order[0][0] is None:
# This is not a reply, but a first 0-level comment
parent_reply_order = ''
else:
parent_reply_order = parent_reply_order[0][0]
run_sql("""UPDATE cmtRECORDCOMMENT SET reply_order_cached_data=%s WHERE id=%s""",
(parent_reply_order + get_reply_order_cache_data(new_comid), new_comid))
action_code = CFG_WEBCOMMENT_ACTION_CODE[reviews and 'ADD_REVIEW' or 'ADD_COMMENT']
action_time = convert_datestruct_to_datetext(time.localtime())
query2 = """INSERT INTO cmtACTIONHISTORY (id_cmtRECORDCOMMENT,
id_bibrec, id_user, client_host, action_time, action_code)
VALUES (%s, %s, %s, inet_aton(%s), %s, %s)"""
params2 = (res, recID, uid, client_ip_address, action_time, action_code)
run_sql(query2, params2)
def notify_subscribers_callback(data):
"""
Define a callback that retrieves subscribed users, and
notify them by email.
@param data: contains the necessary parameters in a tuple:
(recid, uid, comid, msg, note, score, editor_type, reviews)
"""
recid, uid, comid, msg, note, score, editor_type, reviews = data
# Email this comment to 'subscribers'
(subscribers_emails1, subscribers_emails2) = \
get_users_subscribed_to_discussion(recid)
email_subscribers_about_new_comment(recid, reviews=reviews,
emails1=subscribers_emails1,
emails2=subscribers_emails2,
comID=comid, msg=msg,
note=note, score=score,
editor_type=editor_type, uid=uid)
# Register our callback to notify subscribed people after
# having replied to our current user.
data = (recID, uid, res, msg, note, score, editor_type, reviews)
if req:
req.register_cleanup(notify_subscribers_callback, data)
else:
notify_subscribers_callback(data)
return int(res)
def move_attached_files_to_storage(attached_files, recID, comid):
"""
Move the files that were just attached to a new comment to their
final location.
@param attached_files: the mappings of desired filename to attach
and path where to find the original file
@type attached_files: dict {filename, filepath}
@param recID: the record ID to which we attach the files
@param comid: the comment ID to which we attach the files
"""
for filename, filepath in attached_files.iteritems():
os.renames(filepath,
os.path.join(CFG_PREFIX, 'var', 'data', 'comments',
str(recID), str(comid), filename))
def get_attached_files(recid, comid):
"""
Returns a list with tuples (filename, filepath, fileurl)
@param recid: the recid to which the comment belong
@param comid: the commment id for which we want to retrieve files
"""
base_dir = os.path.join(CFG_PREFIX, 'var', 'data', 'comments',
str(recid), str(comid))
if os.path.isdir(base_dir):
filenames = os.listdir(base_dir)
return [(filename, os.path.join(CFG_PREFIX, 'var', 'data', 'comments',
str(recid), str(comid), filename),
CFG_SITE_URL + '/record/' + str(recid) + '/comments/attachments/get/' + str(comid) + '/' + filename) \
for filename in filenames]
else:
return []
def subscribe_user_to_discussion(recID, uid):
"""
Subscribe a user to a discussion, so the she receives by emails
all new new comments for this record.
@param recID: record ID corresponding to the discussion we want to
subscribe the user
@param uid: user id
"""
query = """INSERT INTO cmtSUBSCRIPTION (id_bibrec, id_user, creation_time)
VALUES (%s, %s, %s)"""
params = (recID, uid, convert_datestruct_to_datetext(time.localtime()))
try:
run_sql(query, params)
except:
return 0
return 1
def unsubscribe_user_from_discussion(recID, uid):
"""
Unsubscribe users from a discussion.
@param recID: record ID corresponding to the discussion we want to
unsubscribe the user
@param uid: user id
@return 1 if successful, 0 if not
"""
query = """DELETE FROM cmtSUBSCRIPTION
WHERE id_bibrec=%s AND id_user=%s"""
params = (recID, uid)
try:
res = run_sql(query, params)
except:
return 0
if res > 0:
return 1
return 0
def get_user_subscription_to_discussion(recID, uid):
"""
Returns the type of subscription for the given user to this
discussion. This does not check authorizations (for eg. if user
was subscribed, but is suddenly no longer authorized).
@param recID: record ID
@param uid: user id
@return:
- 0 if user is not subscribed to discussion
- 1 if user is subscribed, and is allowed to unsubscribe
- 2 if user is subscribed, but cannot unsubscribe
"""
user_email = get_email(uid)
(emails1, emails2) = get_users_subscribed_to_discussion(recID, check_authorizations=False)
if user_email in emails1:
return 1
elif user_email in emails2:
return 2
else:
return 0
def get_users_subscribed_to_discussion(recID, check_authorizations=True):
"""
Returns the lists of users subscribed to a given discussion.
Two lists are returned: the first one is the list of emails for
users who can unsubscribe from the discussion, the second list
contains the emails of users who cannot unsubscribe (for eg. author
of the document, etc).
Users appear in only one list. If a user has manually subscribed
to a discussion AND is an automatic recipients for updates, it
will only appear in the second list.
@param recID: record ID for which we want to retrieve subscribed users
@param check_authorizations: if True, check again if users are authorized to view comment
@return tuple (emails1, emails2)
"""
subscribers_emails = {}
# Get users that have subscribed to this discussion
query = """SELECT id_user FROM cmtSUBSCRIPTION WHERE id_bibrec=%s"""
params = (recID,)
res = run_sql(query, params)
for row in res:
uid = row[0]
if check_authorizations:
user_info = collect_user_info(uid)
(auth_code, auth_msg) = check_user_can_view_comments(user_info, recID)
else:
# Don't check and grant access
auth_code = False
if auth_code:
# User is no longer authorized to view comments.
# Delete subscription
unsubscribe_user_from_discussion(recID, uid)
else:
email = get_email(uid)
if '@' in email:
subscribers_emails[email] = True
# Get users automatically subscribed, based on the record metadata
collections_with_auto_replies = CFG_WEBCOMMENT_EMAIL_REPLIES_TO.keys()
for collection in collections_with_auto_replies:
if (get_colID(collection) is not None) and \
(recID in get_collection_reclist(collection)):
fields = CFG_WEBCOMMENT_EMAIL_REPLIES_TO[collection]
for field in fields:
emails = get_fieldvalues(recID, field)
for email in emails:
if not '@' in email:
# Is a group: add domain name
subscribers_emails[email + '@' + \
CFG_SITE_SUPPORT_EMAIL.split('@')[1]] = False
else:
subscribers_emails[email] = False
return ([email for email, can_unsubscribe_p \
in subscribers_emails.iteritems() if can_unsubscribe_p],
[email for email, can_unsubscribe_p \
in subscribers_emails.iteritems() if not can_unsubscribe_p] )
def email_subscribers_about_new_comment(recID, reviews, emails1,
emails2, comID, msg="",
note="", score=0,
editor_type='textarea',
ln=CFG_SITE_LANG, uid=-1):
"""
Notify subscribers that a new comment was posted.
FIXME: consider recipient preference to send email in correct language.
@param recID: record id
@param emails1: list of emails for users who can unsubscribe from discussion
@param emails2: list of emails for users who cannot unsubscribe from discussion
@param comID: the comment id
@param msg: comment body
@param note: comment title
@param score: review star score
@param editor_type: the kind of editor used to submit the comment: 'textarea', 'fckeditor'
@rtype: bool
@return: True if email was sent okay, False if it was not.
"""
_ = gettext_set_language(ln)
if not emails1 and not emails2:
return 0
# Get title
titles = get_fieldvalues(recID, "245__a")
if not titles:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
title = ''
if titles:
title = titles[0]
else:
title = _("Record %i") % recID
# Get report number
report_numbers = get_fieldvalues(recID, "037__a")
if not report_numbers:
report_numbers = get_fieldvalues(recID, "088__a")
if not report_numbers:
report_numbers = get_fieldvalues(recID, "021__a")
# Prepare email subject and body
if reviews:
email_subject = _('%(report_number)s"%(title)s" has been reviewed') % \
{'report_number': report_numbers and ('[' + report_numbers[0] + '] ') or '',
'title': title}
else:
email_subject = _('%(report_number)s"%(title)s" has been commented') % \
{'report_number': report_numbers and ('[' + report_numbers[0] + '] ') or '',
'title': title}
washer = EmailWasher()
msg = washer.wash(msg)
email_content = msg
if note:
email_content = note + email_content
# Send emails to people who can unsubscribe
email_header = webcomment_templates.tmpl_email_new_comment_header(recID,
title,
reviews,
comID,
report_numbers,
can_unsubscribe=True,
ln=ln,
uid=uid)
email_footer = webcomment_templates.tmpl_email_new_comment_footer(recID,
title,
reviews,
comID,
report_numbers,
can_unsubscribe=True,
ln=ln)
res1 = True
if emails1:
res1 = send_email(fromaddr=CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL,
toaddr=emails1,
subject=email_subject,
content=email_content,
header=email_header,
footer=email_footer,
ln=ln)
# Then send email to people who have been automatically
# subscribed to the discussion (they cannot unsubscribe)
email_header = webcomment_templates.tmpl_email_new_comment_header(recID,
title,
reviews,
comID,
report_numbers,
can_unsubscribe=False,
ln=ln,
uid=uid)
email_footer = webcomment_templates.tmpl_email_new_comment_footer(recID,
title,
reviews,
comID,
report_numbers,
can_unsubscribe=False,
ln=ln)
res2 = True
if emails2:
res2 = send_email(fromaddr=CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL,
toaddr=emails2,
subject=email_subject,
content=email_content,
header=email_header,
footer=email_footer,
ln=ln)
return res1 and res2
def get_record_status(recid):
"""
Returns the current status of the record, i.e. current restriction to apply for newly submitted
comments, and current commenting round.
The restriction to apply can be found in the record metadata, in
field(s) defined by config CFG_WEBCOMMENT_RESTRICTION_DATAFIELD. The restriction is empty string ""
in cases where the restriction has not explicitely been set, even
if the record itself is restricted.
@param recid: the record id
@type recid: int
@return tuple(restriction, round_name), where 'restriction' is empty string when no restriction applies
@rtype (string, int)
"""
collections_with_rounds = CFG_WEBCOMMENT_ROUND_DATAFIELD.keys()
commenting_round = ""
for collection in collections_with_rounds:
# Find the first collection defines rounds field for this
# record
if get_colID(collection) is not None and \
(recid in get_collection_reclist(collection)):
commenting_rounds = get_fieldvalues(recid, CFG_WEBCOMMENT_ROUND_DATAFIELD.get(collection, ""))
if commenting_rounds:
commenting_round = commenting_rounds[0]
break
collections_with_restrictions = CFG_WEBCOMMENT_RESTRICTION_DATAFIELD.keys()
restriction = ""
for collection in collections_with_restrictions:
# Find the first collection that defines restriction field for
# this record
if get_colID(collection) is not None and \
recid in get_collection_reclist(collection):
restrictions = get_fieldvalues(recid, CFG_WEBCOMMENT_RESTRICTION_DATAFIELD.get(collection, ""))
if restrictions:
restriction = restrictions[0]
break
return (restriction, commenting_round)
def calculate_start_date(display_since):
"""
Private function
Returns the datetime of display_since argument in MYSQL datetime format
calculated according to the local time.
@param display_since: = all= no filtering
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit number
@return: string of wanted datetime.
If 'all' given as argument, will return datetext_default
datetext_default is defined in miscutils/lib/dateutils and
equals 0000-00-00 00:00:00 => MySQL format
If bad arguement given, will return datetext_default
"""
time_types = {'d':0, 'w':0, 'm':0, 'y':0}
today = datetime.today()
try:
nb = int(display_since[:-1])
except:
return datetext_default
if display_since in [None, 'all']:
return datetext_default
if str(display_since[-1]) in time_types:
time_type = str(display_since[-1])
else:
return datetext_default
# year
if time_type == 'y':
if (int(display_since[:-1]) > today.year - 1) or (int(display_since[:-1]) < 1):
# 1 < nb years < 2008
return datetext_default
else:
final_nb_year = today.year - nb
yesterday = today.replace(year=final_nb_year)
# month
elif time_type == 'm':
# obtain only the date: yyyy-mm-dd
date_today = datetime.now().date()
final_date = date_today - relativedelta(months=nb)
yesterday = today.replace(year=final_date.year, month=final_date.month, day=final_date.day)
# week
elif time_type == 'w':
delta = timedelta(weeks=nb)
yesterday = today - delta
# day
elif time_type == 'd':
delta = timedelta(days=nb)
yesterday = today - delta
return yesterday.strftime("%Y-%m-%d %H:%M:%S")
def count_comments(recID):
"""
Returns the number of comments made on a record.
"""
recID = int(recID)
query = """SELECT count(id) FROM cmtRECORDCOMMENT
WHERE id_bibrec=%s AND star_score=0"""
return run_sql(query, (recID,))[0][0]
def count_reviews(recID):
"""
Returns the number of reviews made on a record.
"""
recID = int(recID)
query = """SELECT count(id) FROM cmtRECORDCOMMENT
WHERE id_bibrec=%s AND star_score>0"""
return run_sql(query, (recID,))[0][0]
def get_first_comments_or_remarks(recID=-1,
ln=CFG_SITE_LANG,
nb_comments='all',
nb_reviews='all',
voted=-1,
reported=-1,
user_info=None):
"""
Gets nb number comments/reviews or remarks.
In the case of comments, will get both comments and reviews
Comments and remarks sorted by most recent date, reviews sorted by highest helpful score
@param recID: record id
@param ln: language
@param nb_comments: number of comment or remarks to get
@param nb_reviews: number of reviews or remarks to get
@param voted: 1 if user has voted for a remark
@param reported: 1 if user has reported a comment or review
@return: if comment, tuple (comments, reviews) both being html of first nb comments/reviews
if remark, tuple (remakrs, None)
"""
warnings = []
errors = []
voted = wash_url_argument(voted, 'int')
reported = wash_url_argument(reported, 'int')
## check recID argument
if type(recID) is not int:
return ()
if recID >= 1: #comment or review. NB: suppressed reference to basket (handled in webbasket)
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
res_reviews = query_retrieve_comments_or_remarks(recID=recID, display_order="hh", ranking=1,
limit=nb_comments, user_info=user_info)
nb_res_reviews = len(res_reviews)
## check nb argument
if type(nb_reviews) is int and nb_reviews < len(res_reviews):
first_res_reviews = res_reviews[:nb_reviews]
else:
first_res_reviews = res_reviews
if CFG_WEBCOMMENT_ALLOW_COMMENTS:
res_comments = query_retrieve_comments_or_remarks(recID=recID, display_order="od", ranking=0,
limit=nb_reviews, user_info=user_info)
nb_res_comments = len(res_comments)
## check nb argument
if type(nb_comments) is int and nb_comments < len(res_comments):
first_res_comments = res_comments[:nb_comments]
else:
first_res_comments = res_comments
else: #error
errors.append(('ERR_WEBCOMMENT_RECID_INVALID', recID)) #!FIXME dont return error anywhere since search page
# comment
if recID >= 1:
comments = reviews = ""
if reported > 0:
warnings.append(('WRN_WEBCOMMENT_FEEDBACK_RECORDED_GREEN_TEXT',))
elif reported == 0:
warnings.append(('WRN_WEBCOMMENT_FEEDBACK_NOT_RECORDED_RED_TEXT',))
if CFG_WEBCOMMENT_ALLOW_COMMENTS: # normal comments
grouped_comments = group_comments_by_round(first_res_comments, ranking=0)
comments = webcomment_templates.tmpl_get_first_comments_without_ranking(recID, ln, grouped_comments, nb_res_comments, warnings)
if CFG_WEBCOMMENT_ALLOW_REVIEWS: # ranked comments
#calculate average score
avg_score = calculate_avg_score(res_reviews)
if voted > 0:
warnings.append(('WRN_WEBCOMMENT_FEEDBACK_RECORDED_GREEN_TEXT',))
elif voted == 0:
warnings.append(('WRN_WEBCOMMENT_FEEDBACK_NOT_RECORDED_RED_TEXT',))
grouped_reviews = group_comments_by_round(first_res_reviews, ranking=0)
reviews = webcomment_templates.tmpl_get_first_comments_with_ranking(recID, ln, grouped_reviews, nb_res_reviews, avg_score, warnings)
return (comments, reviews)
# remark
else:
return(webcomment_templates.tmpl_get_first_remarks(first_res_comments, ln, nb_res_comments), None)
def group_comments_by_round(comments, ranking=0):
"""
Group comments by the round to which they belong
"""
comment_rounds = {}
ordered_comment_round_names = []
for comment in comments:
comment_round_name = ranking and comment[11] or comment[7]
if not comment_rounds.has_key(comment_round_name):
comment_rounds[comment_round_name] = []
ordered_comment_round_names.append(comment_round_name)
comment_rounds[comment_round_name].append(comment)
return [(comment_round_name, comment_rounds[comment_round_name]) \
for comment_round_name in ordered_comment_round_names]
def calculate_avg_score(res):
"""
private function
Calculate the avg score of reviews present in res
@param res: tuple of tuple returned from query_retrieve_comments_or_remarks
@return: a float of the average score rounded to the closest 0.5
"""
c_star_score = 6
avg_score = 0.0
nb_reviews = 0
for comment in res:
if comment[c_star_score] > 0:
avg_score += comment[c_star_score]
nb_reviews += 1
if nb_reviews == 0:
return 0.0
avg_score = avg_score / nb_reviews
avg_score_unit = avg_score - math.floor(avg_score)
if avg_score_unit < 0.25:
avg_score = math.floor(avg_score)
elif avg_score_unit > 0.75:
avg_score = math.floor(avg_score) + 1
else:
avg_score = math.floor(avg_score) + 0.5
if avg_score > 5:
avg_score = 5.0
return avg_score
def perform_request_add_comment_or_remark(recID=0,
uid=-1,
action='DISPLAY',
ln=CFG_SITE_LANG,
msg=None,
score=None,
note=None,
priority=None,
reviews=0,
comID=-1,
client_ip_address=None,
editor_type='textarea',
can_attach_files=False,
subscribe=False,
req=None,
attached_files=None,
warnings=None,
errors=None):
"""
Add a comment/review or remark
@param recID: record id
@param uid: user id
@param action: 'DISPLAY' to display add form
'SUBMIT' to submit comment once form is filled
'REPLY' to reply to an existing comment
@param ln: language
@param msg: the body of the comment/review or remark
@param score: star score of the review
@param note: title of the review
@param priority: priority of remark (int)
@param reviews: boolean, if enabled will add a review, if disabled will add a comment
@param comID: if replying, this is the comment id of the commetn are replying to
@param editor_type: the kind of editor/input used for the comment: 'textarea', 'fckeditor'
@param can_attach_files: if user can attach files to comments or not
@param subscribe: if True, subscribe user to receive new comments by email
@param req: request object. Used to register callback to send email notification
@param attached_files: newly attached files to this comment, mapping filename to filepath
@type attached_files: dict
@param warning_msgs: list of standard warnings that should be considered
@param errors_msgs: list of standard errors that should be considered
@return:
- html add form if action is display or reply
- html successful added form if action is submit
"""
if warnings is None:
warnings = []
if errors is None:
errors = []
actions = ['DISPLAY', 'REPLY', 'SUBMIT']
_ = gettext_set_language(ln)
## check arguments
check_recID_is_in_range(recID, warnings, ln)
if uid <= 0:
errors.append(('ERR_WEBCOMMENT_UID_INVALID', uid))
return ('', errors, warnings)
if attached_files is None:
attached_files = {}
user_contact_info = query_get_user_contact_info(uid)
nickname = ''
if user_contact_info:
if user_contact_info[0]:
nickname = user_contact_info[0]
# show the form
if action == 'DISPLAY':
if reviews and CFG_WEBCOMMENT_ALLOW_REVIEWS:
return (webcomment_templates.tmpl_add_comment_form_with_ranking(recID, uid, nickname, ln, msg, score, note, warnings, can_attach_files=can_attach_files), errors, warnings)
elif not reviews and CFG_WEBCOMMENT_ALLOW_COMMENTS:
return (webcomment_templates.tmpl_add_comment_form(recID, uid, nickname, ln, msg, warnings, can_attach_files=can_attach_files), errors, warnings)
else:
errors.append(('ERR_WEBCOMMENT_COMMENTS_NOT_ALLOWED',))
elif action == 'REPLY':
if reviews and CFG_WEBCOMMENT_ALLOW_REVIEWS:
errors.append(('ERR_WEBCOMMENT_REPLY_REVIEW',))
return (webcomment_templates.tmpl_add_comment_form_with_ranking(recID, uid, nickname, ln, msg, score, note, warnings, can_attach_files=can_attach_files), errors, warnings)
elif not reviews and CFG_WEBCOMMENT_ALLOW_COMMENTS:
textual_msg = msg
if comID > 0:
comment = query_get_comment(comID)
if comment:
user_info = get_user_info(comment[2])
if user_info:
date_creation = convert_datetext_to_dategui(str(comment[4]))
# Build two msg: one mostly textual, the other one with HTML markup, for the FCKeditor.
msg = _("%(x_name)s wrote on %(x_date)s:")% {'x_name': user_info[2], 'x_date': date_creation}
textual_msg = msg
# 1 For FCKeditor input
msg += '<br /><br />'
msg += comment[3]
msg = email_quote_txt(text=msg)
msg = email_quoted_txt2html(text=msg)
msg = '<br/>' + msg + '<br/>'
# 2 For textarea input
textual_msg += "\n\n"
textual_msg += comment[3]
textual_msg = email_quote_txt(text=textual_msg)
return (webcomment_templates.tmpl_add_comment_form(recID, uid, nickname, ln, msg, warnings, textual_msg, can_attach_files=can_attach_files, reply_to=comID), errors, warnings)
else:
errors.append(('ERR_WEBCOMMENT_COMMENTS_NOT_ALLOWED',))
# check before submitting form
elif action == 'SUBMIT':
if reviews and CFG_WEBCOMMENT_ALLOW_REVIEWS:
if note.strip() in ["", "None"] and not CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS:
warnings.append(('WRN_WEBCOMMENT_ADD_NO_TITLE',))
if score == 0 or score > 5:
warnings.append(("WRN_WEBCOMMENT_ADD_NO_SCORE",))
if msg.strip() in ["", "None"] and not CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS:
warnings.append(('WRN_WEBCOMMENT_ADD_NO_BODY',))
# if no warnings, submit
if len(warnings) == 0:
if reviews:
if check_user_can_review(recID, client_ip_address, uid):
success = query_add_comment_or_remark(reviews, recID=recID, uid=uid, msg=msg,
note=note, score=score, priority=0,
client_ip_address=client_ip_address,
editor_type=editor_type,
req=req,
reply_to=comID)
else:
warnings.append('WRN_WEBCOMMENT_CANNOT_REVIEW_TWICE')
success = 1
else:
if check_user_can_comment(recID, client_ip_address, uid):
success = query_add_comment_or_remark(reviews, recID=recID, uid=uid, msg=msg,
note=note, score=score, priority=0,
client_ip_address=client_ip_address,
editor_type=editor_type,
req=req,
reply_to=comID, attached_files=attached_files)
if success > 0 and subscribe:
subscribe_user_to_discussion(recID, uid)
else:
warnings.append('WRN_WEBCOMMENT_TIMELIMIT')
success = 1
if success > 0:
if CFG_WEBCOMMENT_ADMIN_NOTIFICATION_LEVEL > 0:
notify_admin_of_new_comment(comID=success)
return (webcomment_templates.tmpl_add_comment_successful(recID, ln, reviews, warnings, success), errors, warnings)
else:
errors.append(('ERR_WEBCOMMENT_DB_INSERT_ERROR'))
# if are warnings or if inserting comment failed, show user where warnings are
if reviews and CFG_WEBCOMMENT_ALLOW_REVIEWS:
return (webcomment_templates.tmpl_add_comment_form_with_ranking(recID, uid, nickname, ln, msg, score, note, warnings, can_attach_files=can_attach_files), errors, warnings)
else:
return (webcomment_templates.tmpl_add_comment_form(recID, uid, nickname, ln, msg, warnings, can_attach_files=can_attach_files), errors, warnings)
# unknown action send to display
else:
warnings.append(('WRN_WEBCOMMENT_ADD_UNKNOWN_ACTION',))
if reviews and CFG_WEBCOMMENT_ALLOW_REVIEWS:
return (webcomment_templates.tmpl_add_comment_form_with_ranking(recID, uid, ln, msg, score, note, warnings, can_attach_files=can_attach_files), errors, warnings)
else:
return (webcomment_templates.tmpl_add_comment_form(recID, uid, ln, msg, warnings, can_attach_files=can_attach_files), errors, warnings)
return ('', errors, warnings)
def notify_admin_of_new_comment(comID):
"""
Sends an email to the admin with details regarding comment with ID = comID
"""
comment = query_get_comment(comID)
if len(comment) > 0:
(comID2,
id_bibrec,
id_user,
body,
date_creation,
star_score, nb_votes_yes, nb_votes_total,
title,
nb_abuse_reports, round_name, restriction) = comment
else:
return
user_info = query_get_user_contact_info(id_user)
if len(user_info) > 0:
(nickname, email, last_login) = user_info
if not len(nickname) > 0:
nickname = email.split('@')[0]
else:
nickname = email = last_login = "ERROR: Could not retrieve"
review_stuff = '''
Star score = %s
Title = %s''' % (star_score, title)
washer = EmailWasher()
body = washer.wash(body)
record_info = webcomment_templates.tmpl_email_new_comment_admin(id_bibrec)
out = '''
The following %(comment_or_review)s has just been posted (%(date)s).
AUTHOR:
Nickname = %(nickname)s
Email = %(email)s
User ID = %(uid)s
RECORD CONCERNED:
Record ID = %(recID)s
URL = <%(siteurl)s/record/%(recID)s/%(comments_or_reviews)s/>
%(record_details)s
%(comment_or_review_caps)s:
%(comment_or_review)s ID = %(comID)s %(review_stuff)s
Body =
<--------------->
%(body)s
<--------------->
ADMIN OPTIONS:
To moderate the %(comment_or_review)s go to %(siteurl)s/record/%(recID)s/%(comments_or_reviews)s/display?%(arguments)s
''' % \
{ 'comment_or_review' : star_score > 0 and 'review' or 'comment',
'comment_or_review_caps': star_score > 0 and 'REVIEW' or 'COMMENT',
'comments_or_reviews' : star_score > 0 and 'reviews' or 'comments',
'date' : date_creation,
'nickname' : nickname,
'email' : email,
'uid' : id_user,
'recID' : id_bibrec,
'record_details' : record_info,
'comID' : comID2,
'review_stuff' : star_score > 0 and review_stuff or "",
'body' : body.replace('<br />','\n'),
'siteurl' : CFG_SITE_URL,
'arguments' : 'ln=en&do=od#%s' % comID
}
from_addr = '%s WebComment <%s>' % (CFG_SITE_NAME, CFG_WEBALERT_ALERT_ENGINE_EMAIL)
comment_collection = get_comment_collection(comID)
to_addrs = get_collection_moderators(comment_collection)
rec_collection = guess_primary_collection_of_a_record(id_bibrec)
report_nums = get_fieldvalues(id_bibrec, "037__a")
report_nums += get_fieldvalues(id_bibrec, "088__a")
report_nums = ', '.join(report_nums)
subject = "A new comment/review has just been posted [%s|%s]" % (rec_collection, report_nums)
send_email(from_addr, to_addrs, subject, out)
def check_recID_is_in_range(recID, warnings=[], ln=CFG_SITE_LANG):
"""
Check that recID is >= 0
Append error messages to errors listi
@param recID: record id
@param warnings: the warnings list of the calling function
@return: tuple (boolean, html) where boolean (1=true, 0=false)
and html is the body of the page to display if there was a problem
"""
# Make errors into a list if needed
if type(warnings) is not list:
errors = [warnings]
try:
recID = int(recID)
except:
pass
if type(recID) is int:
if recID > 0:
from invenio.search_engine import record_exists
success = record_exists(recID)
if success == 1:
return (1,"")
else:
warnings.append(('ERR_WEBCOMMENT_RECID_INEXISTANT', recID))
return (0, webcomment_templates.tmpl_record_not_found(status='inexistant', recID=recID, ln=ln))
elif recID == 0:
warnings.append(('ERR_WEBCOMMENT_RECID_MISSING',))
return (0, webcomment_templates.tmpl_record_not_found(status='missing', recID=recID, ln=ln))
else:
warnings.append(('ERR_WEBCOMMENT_RECID_INVALID', recID))
return (0, webcomment_templates.tmpl_record_not_found(status='invalid', recID=recID, ln=ln))
else:
warnings.append(('ERR_WEBCOMMENT_RECID_NAN', recID))
return (0, webcomment_templates.tmpl_record_not_found(status='nan', recID=recID, ln=ln))
def check_int_arg_is_in_range(value, name, errors, gte_value, lte_value=None):
"""
Check that variable with name 'name' >= gte_value and optionally <= lte_value
Append error messages to errors list
@param value: variable value
@param name: variable name
@param errors: list of error tuples (error_id, value)
@param gte_value: greater than or equal to value
@param lte_value: less than or equal to value
@return: boolean (1=true, 0=false)
"""
# Make errors into a list if needed
if type(errors) is not list:
errors = [errors]
if type(value) is not int or type(gte_value) is not int:
errors.append(('ERR_WEBCOMMENT_PROGRAMNING_ERROR',))
return 0
if type(value) is not int:
errors.append(('ERR_WEBCOMMENT_ARGUMENT_NAN', value))
return 0
if value < gte_value:
errors.append(('ERR_WEBCOMMENT_ARGUMENT_INVALID', value))
return 0
if lte_value:
if type(lte_value) is not int:
errors.append(('ERR_WEBCOMMENT_PROGRAMNING_ERROR',))
return 0
if value > lte_value:
errors.append(('ERR_WEBCOMMENT_ARGUMENT_INVALID', value))
return 0
return 1
def get_mini_reviews(recid, ln=CFG_SITE_LANG):
"""
Returns the web controls to add reviews to a record from the
detailed record pages mini-panel.
@param recid: the id of the displayed record
@param ln: the user's language
"""
if CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS:
action = 'SUBMIT'
else:
action = 'DISPLAY'
reviews = query_retrieve_comments_or_remarks(recid, ranking=1)
return webcomment_templates.tmpl_mini_review(recid, ln, action=action,
avg_score=calculate_avg_score(reviews),
nb_comments_total=len(reviews))
def check_user_can_view_comments(user_info, recid):
"""Check if the user is authorized to view comments for given
recid.
Returns the same type as acc_authorize_action
"""
# Check user can view the record itself first
(auth_code, auth_msg) = check_user_can_view_record(user_info, recid)
if auth_code:
return (auth_code, auth_msg)
# Check if user can view the comments
## But first can we find an authorization for this case action,
## for this collection?
record_primary_collection = guess_primary_collection_of_a_record(recid)
return acc_authorize_action(user_info, 'viewcomment', authorized_if_no_roles=True, collection=record_primary_collection)
def check_user_can_view_comment(user_info, comid, restriction=None):
"""Check if the user is authorized to view a particular comment,
given the comment restriction. Note that this function does not
check if the record itself is restricted to the user, which would
mean that the user should not see the comment.
You can omit 'comid' if you already know the 'restriction'
@param user_info: the user info object
@param comid: the comment id of that we want to check
@param restriction: the restriction applied to given comment (if known. Otherwise retrieved automatically)
@return: the same type as acc_authorize_action
"""
if restriction is None:
comment = query_get_comment(comid)
if comment:
restriction = comment[11]
else:
return (1, 'Comment %i does not exist' % comid)
if restriction == "":
return (0, '')
return acc_authorize_action(user_info, 'viewrestrcomment', status=restriction)
def check_user_can_send_comments(user_info, recid):
"""Check if the user is authorized to comment the given
recid. This function does not check that user can view the record
or view the comments
Returns the same type as acc_authorize_action
"""
## First can we find an authorization for this case, action + collection
record_primary_collection = guess_primary_collection_of_a_record(recid)
return acc_authorize_action(user_info, 'sendcomment', authorized_if_no_roles=True, collection=record_primary_collection)
def check_user_can_attach_file_to_comments(user_info, recid):
"""Check if the user is authorized to attach a file to comments
for given recid. This function does not check that user can view
the comments or send comments.
Returns the same type as acc_authorize_action
"""
## First can we find an authorization for this case action, for
## this collection?
record_primary_collection = guess_primary_collection_of_a_record(recid)
return acc_authorize_action(user_info, 'attachcommentfile', authorized_if_no_roles=False, collection=record_primary_collection)
| valkyriesavage/invenio | modules/webcomment/lib/webcomment.py | Python | gpl-2.0 | 77,954 |
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.automate.explorer import Class, Domain, Method, Namespace
from cfme.automate.simulation import simulate
from cfme.common.provider import cleanup_vm
from cfme.services import requests
from cfme.services.catalogs.service_catalogs import ServiceCatalogs
from cfme.services.myservice import MyService
from utils import testgen
from utils.log import logger
from utils.wait import wait_for
pytestmark = [
test_requirements.service,
pytest.mark.usefixtures("vm_name"),
pytest.mark.usefixtures("catalog_item"),
pytest.mark.usefixtures('uses_infra_providers'),
pytest.mark.long_running,
pytest.mark.meta(server_roles="+automate"),
pytest.mark.tier(3)
]
pytest_generate_tests = testgen.generate(testgen.provider_by_type, ['virtualcenter'],
scope="module")
@pytest.fixture(scope="function")
def copy_domain(request):
domain = Domain(name=fauxfactory.gen_alphanumeric(), enabled=True)
domain.create()
request.addfinalizer(lambda: domain.delete() if domain.exists() else None)
return domain
@pytest.fixture
def myservice(setup_provider, provider, catalog_item, request):
vm_name = catalog_item.provisioning_data["vm_name"]
request.addfinalizer(lambda: cleanup_vm(vm_name + "_0001", provider))
catalog_item.create()
service_catalogs = ServiceCatalogs("service_name")
service_catalogs.order(catalog_item.catalog.name, catalog_item)
logger.info('Waiting for cfme provision request for service %s', catalog_item.name)
row_description = catalog_item.name
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells, True],
fail_func=requests.reload, num_sec=900, delay=20)
assert row.request_state.text == 'Finished'
return MyService(catalog_item.name, vm_name)
def test_add_vm_to_service(myservice, request, copy_domain):
"""Tests adding vm to service
Metadata:
test_flag: provision
"""
method_torso = """
def add_to_service
vm = $evm.root['vm']
service = $evm.vmdb('service').find_by_name('{}')
user = $evm.root['user']
if service && vm
$evm.log('info', "XXXXXXXX Attaching Service to VM: [#{{service.name}}][#{{vm.name}}]")
vm.add_to_service(service)
vm.owner = user if user
vm.group = user.miq_group if user
end
end
$evm.log("info", "Listing Root Object Attributes:")
$evm.log("info", "===========================================")
add_to_service
""".format(myservice.service_name)
method = Method(
name="InspectMe",
data=method_torso,
cls=Class(
name="Request",
namespace=Namespace(
name="System",
parent=copy_domain
)
)
)
method.create()
request.addfinalizer(lambda: method.delete() if method.exists() else None)
simulate(
instance="Request",
message="create",
request=method.name,
attribute=["VM and Instance", "auto_test_services"], # Random selection, does not matter
execute_methods=True
)
myservice.check_vm_add("auto_test_services")
request.addfinalizer(lambda: myservice.delete(myservice.service_name))
| kzvyahin/cfme_tests | cfme/tests/services/test_add_remove_vm_to_service.py | Python | gpl-2.0 | 3,341 |
#
# Copyright (c) 2010 Mikhail Gusarov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# The benchmark is distributed under the Creative Commons,
# Attribution-NonCommercial-NoDerivatives. This license includes the benchmark database
# and its derivatives. For attribution, please cite this page, and our publications below.
# This data is provided free of charge for non-commercial and academic benchmarking and
# experimentation use. If you would like to contribute to the benchmark, please contact us.
# If you believe you intended usage may be restricted by the license,
# please contact us and we can discuss the possibilities.
"""
path.py - An object representing a path to a file or directory.
https://github.com/jaraco/path.py
Example::
from path import Path
d = Path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0o755)
"""
from __future__ import unicode_literals
import contextlib
import errno
import functools
import glob
import hashlib
import importlib
import io
import operator
import shutil
import sys
import tempfile
import codecs
import fnmatch
import os
import re
import warnings
from distutils import dir_util
try:
import win32security
except ImportError:
pass
try:
import pwd
except ImportError:
pass
try:
import grp
except ImportError:
pass
##############################################################################
# Python 2/3 support
PY3 = sys.version_info >= (3,)
PY2 = not PY3
string_types = str,
text_type = str
getcwdu = os.getcwd
def surrogate_escape(error):
"""
Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only.
"""
chars = error.object[error.start:error.end]
assert len(chars) == 1
val = ord(chars)
val += 0xdc00
return __builtin__.unichr(val), error.end
if PY2:
import __builtin__
string_types = __builtin__.basestring,
text_type = __builtin__.unicode
getcwdu = os.getcwdu
codecs.register_error('surrogateescape', surrogate_escape)
@contextlib.contextmanager
def io_error_compat():
try:
yield
except IOError as io_err:
# On Python 2, io.open raises IOError; transform to OSError for
# future compatibility.
os_err = OSError(*io_err.args)
os_err.filename = getattr(io_err, 'filename', None)
raise os_err
##############################################################################
__all__ = ['Path', 'CaseInsensitivePattern']
LINESEPS = ['\r\n', '\r', '\n']
U_LINESEPS = LINESEPS + ['\u0085', '\u2028', '\u2029']
NEWLINE = re.compile('|'.join(LINESEPS))
U_NEWLINE = re.compile('|'.join(U_LINESEPS))
NL_END = re.compile(r'(?:{0})$'.format(NEWLINE.pattern))
U_NL_END = re.compile(r'(?:{0})$'.format(U_NEWLINE.pattern))
try:
import pkg_resources
__version__ = pkg_resources.require('path.py')[0].version
except ImportError:
__version__ = 'unknown'
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
class TreeWalkWarning(Warning):
pass
# from jaraco.functools
def compose(*funcs):
compose_two = lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs))
return functools.reduce(compose_two, funcs)
def simple_cache(func):
"""
Save results for the :meth:'path.using_module' classmethod.
When Python 3.2 is available, use functools.lru_cache instead.
"""
saved_results = {}
def wrapper(cls, module):
if module in saved_results:
return saved_results[module]
saved_results[module] = func(cls, module)
return saved_results[module]
return wrapper
class ClassProperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class multimethod(object):
"""
Acts like a classmethod when invoked from the class and like an
instancemethod when invoked from the instance.
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
return (
functools.partial(self.func, owner) if instance is None
else functools.partial(self.func, owner, instance)
)
class Path(text_type):
"""
Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in :mod:`os.path`.
Some methods are additionally included from :mod:`shutil`.
The functions are linked directly into the class namespace
such that they will be bound to the Path instance. For example,
``Path(src).copy(target)`` is equivalent to
``shutil.copy(src, target)``. Therefore, when referencing
the docs for these methods, assume `src` references `self`,
the Path instance.
"""
module = os.path
""" The path module to use for path operations.
.. seealso:: :mod:`os.path`
"""
def __init__(self, other=''):
if other is None:
raise TypeError("Invalid initial value for path: None")
@classmethod
@simple_cache
def using_module(cls, module):
subclass_name = cls.__name__ + '_' + module.__name__
if PY2:
subclass_name = str(subclass_name)
bases = (cls,)
ns = {'module': module}
return type(subclass_name, bases, ns)
@ClassProperty
@classmethod
def _next_class(cls):
"""
What class should be used to construct new instances from this class
"""
return cls
@classmethod
def _always_unicode(cls, path):
"""
Ensure the path as retrieved from a Python API, such as :func:`os.listdir`,
is a proper Unicode string.
"""
if PY3 or isinstance(path, text_type):
return path
return path.decode(sys.getfilesystemencoding(), 'surrogateescape')
# --- Special Python methods.
def __repr__(self):
return '%s(%s)' % (type(self).__name__, super(Path, self).__repr__())
# Adding a Path and a string yields a Path.
def __add__(self, more):
try:
return self._next_class(super(Path, self).__add__(more))
except TypeError: # Python bug
return NotImplemented
def __radd__(self, other):
if not isinstance(other, string_types):
return NotImplemented
return self._next_class(other.__add__(self))
# The / operator joins Paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
.. seealso:: :func:`os.path.join`
"""
return self._next_class(self.module.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
# The / operator joins Paths the other way around
def __rdiv__(self, rel):
""" fp.__rdiv__(rel) == rel / fp
Join two path components, adding a separator character if
needed.
.. seealso:: :func:`os.path.join`
"""
return self._next_class(self.module.join(rel, self))
# Make the / operator work even when true division is enabled.
__rtruediv__ = __rdiv__
def __enter__(self):
self._old_dir = self.getcwd()
os.chdir(self)
return self
def __exit__(self, *_):
os.chdir(self._old_dir)
@classmethod
def getcwd(cls):
""" Return the current working directory as a path object.
.. seealso:: :func:`os.getcwdu`
"""
return cls(getcwdu())
#
# --- Operations on Path strings.
def abspath(self):
""" .. seealso:: :func:`os.path.abspath` """
return self._next_class(self.module.abspath(self))
def normcase(self):
""" .. seealso:: :func:`os.path.normcase` """
return self._next_class(self.module.normcase(self))
def normpath(self):
""" .. seealso:: :func:`os.path.normpath` """
return self._next_class(self.module.normpath(self))
def realpath(self):
""" .. seealso:: :func:`os.path.realpath` """
return self._next_class(self.module.realpath(self))
def expanduser(self):
""" .. seealso:: :func:`os.path.expanduser` """
return self._next_class(self.module.expanduser(self))
def expandvars(self):
""" .. seealso:: :func:`os.path.expandvars` """
return self._next_class(self.module.expandvars(self))
def dirname(self):
""" .. seealso:: :attr:`parent`, :func:`os.path.dirname` """
return self._next_class(self.module.dirname(self))
def basename(self):
""" .. seealso:: :attr:`name`, :func:`os.path.basename` """
return self._next_class(self.module.basename(self))
def expand(self):
""" Clean up a filename by calling :meth:`expandvars()`,
:meth:`expanduser()`, and :meth:`normpath()` on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
@property
def namebase(self):
""" The same as :meth:`name`, but with one file extension stripped off.
For example,
``Path('/home/guido/python.tar.gz').name == 'python.tar.gz'``,
but
``Path('/home/guido/python.tar.gz').namebase == 'python.tar'``.
"""
base, ext = self.module.splitext(self.name)
return base
@property
def ext(self):
""" The file extension, for example ``'.py'``. """
f, ext = self.module.splitext(self)
return ext
@property
def drive(self):
""" The drive specifier, for example ``'C:'``.
This is always empty on systems that don't use drive specifiers.
"""
drive, r = self.module.splitdrive(self)
return self._next_class(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new Path object.
For example,
``Path('/usr/local/lib/libpython.so').parent ==
Path('/usr/local/lib')``
.. seealso:: :meth:`dirname`, :func:`os.path.dirname`
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example,
``Path('/usr/local/lib/libpython.so').name == 'libpython.so'``
.. seealso:: :meth:`basename`, :func:`os.path.basename`
""")
def splitpath(self):
""" p.splitpath() -> Return ``(p.parent, p.name)``.
.. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split`
"""
parent, child = self.module.split(self)
return self._next_class(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return ``(p.drive, <the rest of p>)``.
Split the drive specifier from this path. If there is
no drive specifier, :samp:`{p.drive}` is empty, so the return value
is simply ``(Path(''), p)``. This is always the case on Unix.
.. seealso:: :func:`os.path.splitdrive`
"""
drive, rel = self.module.splitdrive(self)
return self._next_class(drive), rel
def splitext(self):
""" p.splitext() -> Return ``(p.stripext(), p.ext)``.
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from ``'.'`` to the end of the
last path segment. This has the property that if
``(a, b) == p.splitext()``, then ``a + b == p``.
.. seealso:: :func:`os.path.splitext`
"""
filename, ext = self.module.splitext(self)
return self._next_class(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, ``Path('/home/guido/python.tar.gz').stripext()``
returns ``Path('/home/guido/python.tar')``.
"""
return self.splitext()[0]
def splitunc(self):
""" .. seealso:: :func:`os.path.splitunc` """
unc, rest = self.module.splitunc(self)
return self._next_class(unc), rest
@property
def uncshare(self):
"""
The UNC mount point for this path.
This is empty for paths on local drives.
"""
unc, r = self.module.splitunc(self)
return self._next_class(unc)
@multimethod
def joinpath(cls, first, *others):
"""
Join first to zero or more :class:`Path` components, adding a separator
character (:samp:`{first}.module.sep`) if needed. Returns a new instance of
:samp:`{first}._next_class`.
.. seealso:: :func:`os.path.join`
"""
if not isinstance(first, cls):
first = cls(first)
return first._next_class(first.module.join(first, *others))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a Path. Its value will be
either :data:`os.curdir`, :data:`os.pardir`, empty, or the root
directory of this path (for example, ``'/'`` or ``'C:\\'``). The
other items in the list will be strings.
``path.Path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self, start='.'):
""" Return this path as a relative path,
based from `start`, which defaults to the current working directory.
"""
cwd = self._next_class(start)
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from `self` to `dest`.
If there is no relative path from `self` to `dest`, for example if
they reside on different drives in Windows, then this returns
``dest.abspath()``.
"""
origin = self.abspath()
dest = self._next_class(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != self.module.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != self.module.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = self.module.join(*segments)
return self._next_class(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use :meth:`files` or :meth:`dirs` instead if you want a listing
of just files or just subdirectories.
The elements of the list are Path objects.
With the optional `pattern` argument, this only lists
items whose names match the given pattern.
.. seealso:: :meth:`files`, :meth:`dirs`
"""
if pattern is None:
pattern = '*'
return [
self / child
for child in map(self._always_unicode, os.listdir(self))
if self._next_class(child).fnmatch(pattern)
]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are Path objects.
This does not walk recursively into subdirectories
(but see :meth:`walkdirs`).
With the optional `pattern` argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
With the optional `pattern` argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields Path objects naming each child item of
this directory and its descendants. This requires that
``D.isdir()``.
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The `errors=` keyword argument controls behavior when an
error occurs. The default is ``'strict'``, which causes an
exception. Other allowed values are ``'warn'`` (which
reports the error via :func:`warnings.warn()`), and ``'ignore'``.
`errors` may also be an arbitrary callable taking a msg parameter.
"""
class Handlers:
def strict(msg):
raise
def warn(msg):
warnings.warn(msg, TreeWalkWarning)
def ignore(msg):
pass
if not callable(errors) and errors not in vars(Handlers):
raise ValueError("invalid errors parameter")
errors = vars(Handlers).get(errors, errors)
try:
childList = self.listdir()
except Exception:
exc = sys.exc_info()[1]
tmpl = "Unable to list directory '%(self)s': %(exc)s"
msg = tmpl % locals()
errors(msg)
return
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
exc = sys.exc_info()[1]
tmpl = "Unable to access '%(child)s': %(exc)s"
msg = tmpl % locals()
errors(msg)
isdir = False
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional `pattern` argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in ``'test'``.
The `errors=` keyword argument controls behavior when an
error occurs. The default is ``'strict'``, which causes an
exception. The other allowed values are ``'warn'`` (which
reports the error via :func:`warnings.warn()`), and ``'ignore'``.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument `pattern` limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp``
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern, normcase=None):
""" Return ``True`` if `self.name` matches the given `pattern`.
`pattern` - A filename pattern with wildcards,
for example ``'*.py'``. If the pattern contains a `normcase`
attribute, it is applied to the name and path prior to comparison.
`normcase` - (optional) A function used to normalize the pattern and
filename before matching. Defaults to :meth:`self.module`, which defaults
to :meth:`os.path.normcase`.
.. seealso:: :func:`fnmatch.fnmatch`
"""
default_normcase = getattr(pattern, 'normcase', self.module.normcase)
normcase = normcase or default_normcase
name = normcase(self.name)
pattern = normcase(pattern)
return fnmatch.fnmatchcase(name, pattern)
def glob(self, pattern):
""" Return a list of Path objects that match the pattern.
`pattern` - a path relative to this directory, with wildcards.
For example, ``Path('/users').glob('*/bin/*')`` returns a list
of all the files users have in their :file:`bin` directories.
.. seealso:: :func:`glob.glob`
"""
cls = self._next_class
return [cls(s) for s in glob.glob(self / pattern)]
#
# --- Reading or writing an entire file at once.
def open(self, *args, **kwargs):
""" Open this file and return a corresponding :class:`file` object.
Keyword arguments work as in :func:`io.open`. If the file cannot be
opened, an :class:`~exceptions.OSError` is raised.
"""
with io_error_compat():
return io.open(self, *args, **kwargs)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
with self.open('rb') as f:
return f.read()
def chunks(self, size, *args, **kwargs):
""" Returns a generator yielding chunks of the file, so it can
be read piece by piece with a simple for loop.
Any argument you pass after `size` will be passed to :meth:`open`.
:example:
>>> hash = hashlib.md5()
>>> for chunk in Path("path.py").chunks(8192, mode='rb'):
... hash.update(chunk)
This will read the file by chunks of 8192 bytes.
"""
with self.open(*args, **kwargs) as f:
for chunk in iter(lambda: f.read(size) or None, None):
yield chunk
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call ``p.write_bytes(bytes, append=True)`` to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
with self.open(mode) as f:
f.write(bytes)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
All newline sequences are converted to ``'\n'``. Keyword arguments
will be passed to :meth:`open`.
.. seealso:: :meth:`lines`
"""
with self.open(mode='r', encoding=encoding, errors=errors) as f:
return U_NEWLINE.sub('\n', f.read())
def write_text(self, text, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the `append=True` keyword argument.
There are two differences between :meth:`write_text` and
:meth:`write_bytes`: newline handling and Unicode handling.
See below.
Parameters:
`text` - str/unicode - The text to be written.
`encoding` - str - The Unicode encoding that will be used.
This is ignored if `text` isn't a Unicode string.
`errors` - str - How to handle Unicode encoding errors.
Default is ``'strict'``. See ``help(unicode.encode)`` for the
options. This is ignored if `text` isn't a Unicode
string.
`linesep` - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
:data:`os.linesep`. You can also specify ``None`` to
leave all newlines as they are in `text`.
`append` - keyword argument - bool - Specifies what to do if
the file already exists (``True``: append to the end of it;
``False``: overwrite it.) The default is ``False``.
--- Newline handling.
``write_text()`` converts all standard end-of-line sequences
(``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
the end-of-line marker is ``'\r\n'``).
If you don't like your platform's default, you can override it
using the `linesep=` keyword argument. If you specifically want
``write_text()`` to preserve the newlines as-is, use ``linesep=None``.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``.
(This is slightly different from when you open a file for
writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')``
in Python.)
--- Unicode
If `text` isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The `encoding` and
`errors` arguments are not used and must be omitted.
If `text` is Unicode, it is first converted to :func:`bytes` using the
specified `encoding` (or the default encoding if `encoding`
isn't specified). The `errors` argument applies only to this
conversion.
"""
if isinstance(text, text_type):
if linesep is not None:
text = U_NEWLINE.sub(linesep, text)
text = text.encode(encoding or sys.getdefaultencoding(), errors)
else:
assert encoding is None
text = NEWLINE.sub(linesep, text)
self.write_bytes(text, append=append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
`encoding` - The Unicode encoding (or character set) of
the file. The default is ``None``, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
`errors` - How to handle Unicode errors; see help(str.decode)
for the options. Default is ``'strict'``.
`retain` - If ``True``, retain newline characters; but all newline
character combinations (``'\r'``, ``'\n'``, ``'\r\n'``) are
translated to ``'\n'``. If ``False``, newline characters are
stripped off. Default is ``True``.
This uses ``'U'`` mode.
.. seealso:: :meth:`text`
"""
if encoding is None and retain:
with self.open('U') as f:
return f.readlines()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See `linesep` below.
`lines` - A list of strings.
`encoding` - A Unicode encoding to use. This applies only if
`lines` contains any Unicode strings.
`errors` - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``,
``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will
be stripped off and this will be used instead. The
default is os.linesep, which is platform-dependent
(``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.).
Specify ``None`` to write the lines as-is, like
:meth:`file.writelines`.
Use the keyword argument ``append=True`` to append lines to the
file. The default is to overwrite the file.
.. warning ::
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the `encoding=` parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
with self.open('ab' if append else 'wb') as f:
for l in lines:
isUnicode = isinstance(l, text_type)
if linesep is not None:
pattern = U_NL_END if isUnicode else NL_END
l = pattern.sub('', l) + linesep
if isUnicode:
l = l.encode(encoding or sys.getdefaultencoding(), errors)
f.write(l)
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
.. seealso:: :meth:`read_hash`
"""
return self.read_hash('md5')
def _hash(self, hash_name):
""" Returns a hash object for the file at the current path.
`hash_name` should be a hash algo name (such as ``'md5'`` or ``'sha1'``)
that's available in the :mod:`hashlib` module.
"""
m = hashlib.new(hash_name)
for chunk in self.chunks(8192, mode="rb"):
m.update(chunk)
return m
def read_hash(self, hash_name):
""" Calculate given hash for this file.
List of supported hashes can be obtained from :mod:`hashlib` package.
This reads the entire file.
.. seealso:: :meth:`hashlib.hash.digest`
"""
return self._hash(hash_name).digest()
def read_hexhash(self, hash_name):
""" Calculate given hash for this file, returning hexdigest.
List of supported hashes can be obtained from :mod:`hashlib` package.
This reads the entire file.
.. seealso:: :meth:`hashlib.hash.hexdigest`
"""
return self._hash(hash_name).hexdigest()
# --- Methods for querying the filesystem.
# N.B. On some platforms, the os.path functions may be implemented in C
# (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get
# bound. Playing it safe and wrapping them all in method calls.
def isabs(self):
""" .. seealso:: :func:`os.path.isabs` """
return self.module.isabs(self)
def exists(self):
""" .. seealso:: :func:`os.path.exists` """
return self.module.exists(self)
def isdir(self):
""" .. seealso:: :func:`os.path.isdir` """
return self.module.isdir(self)
def isfile(self):
""" .. seealso:: :func:`os.path.isfile` """
return self.module.isfile(self)
def islink(self):
""" .. seealso:: :func:`os.path.islink` """
return self.module.islink(self)
def ismount(self):
""" .. seealso:: :func:`os.path.ismount` """
return self.module.ismount(self)
def samefile(self, other):
""" .. seealso:: :func:`os.path.samefile` """
if not hasattr(self.module, 'samefile'):
other = Path(other).realpath().normpath().normcase()
return self.realpath().normpath().normcase() == other
return self.module.samefile(self, other)
def getatime(self):
""" .. seealso:: :attr:`atime`, :func:`os.path.getatime` """
return self.module.getatime(self)
atime = property(
getatime, None, None,
""" Last access time of the file.
.. seealso:: :meth:`getatime`, :func:`os.path.getatime`
""")
def getmtime(self):
""" .. seealso:: :attr:`mtime`, :func:`os.path.getmtime` """
return self.module.getmtime(self)
mtime = property(
getmtime, None, None,
""" Last-modified time of the file.
.. seealso:: :meth:`getmtime`, :func:`os.path.getmtime`
""")
def getctime(self):
""" .. seealso:: :attr:`ctime`, :func:`os.path.getctime` """
return self.module.getctime(self)
ctime = property(
getctime, None, None,
""" Creation time of the file.
.. seealso:: :meth:`getctime`, :func:`os.path.getctime`
""")
def getsize(self):
""" .. seealso:: :attr:`size`, :func:`os.path.getsize` """
return self.module.getsize(self)
size = property(
getsize, None, None,
""" Size of the file, in bytes.
.. seealso:: :meth:`getsize`, :func:`os.path.getsize`
""")
if hasattr(os, 'access'):
def access(self, mode):
""" Return ``True`` if current user has access to this path.
mode - One of the constants :data:`os.F_OK`, :data:`os.R_OK`,
:data:`os.W_OK`, :data:`os.X_OK`
.. seealso:: :func:`os.access`
"""
return os.access(self, mode)
def stat(self):
""" Perform a ``stat()`` system call on this path.
.. seealso:: :meth:`lstat`, :func:`os.stat`
"""
return os.stat(self)
def lstat(self):
""" Like :meth:`stat`, but do not follow symbolic links.
.. seealso:: :meth:`stat`, :func:`os.lstat`
"""
return os.lstat(self)
def __get_owner_windows(self):
"""
Return the name of the owner of this file or directory. Follow
symbolic links.
Return a name of the form ``r'DOMAIN\\User Name'``; may be a group.
.. seealso:: :attr:`owner`
"""
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + '\\' + account
def __get_owner_unix(self):
"""
Return the name of the owner of this file or directory. Follow
symbolic links.
.. seealso:: :attr:`owner`
"""
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
def __get_owner_not_implemented(self):
raise NotImplementedError("Ownership not available on this platform.")
if 'win32security' in globals():
get_owner = __get_owner_windows
elif 'pwd' in globals():
get_owner = __get_owner_unix
else:
get_owner = __get_owner_not_implemented
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory.
.. seealso:: :meth:`get_owner`""")
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a ``statvfs()`` system call on this path.
.. seealso:: :func:`os.statvfs`
"""
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
""" .. seealso:: :func:`os.pathconf` """
return os.pathconf(self, name)
#
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file.
.. seealso:: :func:`os.utime`
"""
os.utime(self, times)
return self
def chmod(self, mode):
"""
Set the mode. May be the new mode (os.chmod behavior) or a `symbolic
mode <http://en.wikipedia.org/wiki/Chmod#Symbolic_modes>`_.
.. seealso:: :func:`os.chmod`
"""
if isinstance(mode, string_types):
mask = _multi_permission_mask(mode)
mode = mask(self.stat().st_mode)
os.chmod(self, mode)
return self
def chown(self, uid=-1, gid=-1):
"""
Change the owner and group by names rather than the uid or gid numbers.
.. seealso:: :func:`os.chown`
"""
if hasattr(os, 'chown'):
if 'pwd' in globals() and isinstance(uid, string_types):
uid = pwd.getpwnam(uid).pw_uid
if 'grp' in globals() and isinstance(gid, string_types):
gid = grp.getgrnam(gid).gr_gid
os.chown(self, uid, gid)
else:
raise NotImplementedError("Ownership not available on this platform.")
return self
def rename(self, new):
""" .. seealso:: :func:`os.rename` """
os.rename(self, new)
return self._next_class(new)
def renames(self, new):
""" .. seealso:: :func:`os.renames` """
os.renames(self, new)
return self._next_class(new)
#
# --- Create/delete operations on directories
def mkdir(self, mode=0o777):
""" .. seealso:: :func:`os.mkdir` """
os.mkdir(self, mode)
return self
def mkdir_p(self, mode=0o777):
""" Like :meth:`mkdir`, but does not raise an exception if the
directory already exists. """
try:
self.mkdir(mode)
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.EEXIST:
raise
return self
def makedirs(self, mode=0o777):
""" .. seealso:: :func:`os.makedirs` """
os.makedirs(self, mode)
return self
def makedirs_p(self, mode=0o777):
""" Like :meth:`makedirs`, but does not raise an exception if the
directory already exists. """
try:
self.makedirs(mode)
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.EEXIST:
raise
return self
def rmdir(self):
""" .. seealso:: :func:`os.rmdir` """
os.rmdir(self)
return self
def rmdir_p(self):
""" Like :meth:`rmdir`, but does not raise an exception if the
directory is not empty or does not exist. """
try:
self.rmdir()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
return self
def removedirs(self):
""" .. seealso:: :func:`os.removedirs` """
os.removedirs(self)
return self
def removedirs_p(self):
""" Like :meth:`removedirs`, but does not raise an exception if the
directory is not empty or does not exist. """
try:
self.removedirs()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
return self
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0o666)
os.close(fd)
os.utime(self, None)
return self
def remove(self):
""" .. seealso:: :func:`os.remove` """
os.remove(self)
return self
def remove_p(self):
""" Like :meth:`remove`, but does not raise an exception if the
file does not exist. """
try:
self.unlink()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOENT:
raise
return self
def unlink(self):
""" .. seealso:: :func:`os.unlink` """
os.unlink(self)
return self
def unlink_p(self):
""" Like :meth:`unlink`, but does not raise an exception if the
file does not exist. """
self.remove_p()
return self
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at `newpath`, pointing to this file.
.. seealso:: :func:`os.link`
"""
os.link(self, newpath)
return self._next_class(newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at `newlink`, pointing here.
.. seealso:: :func:`os.symlink`
"""
os.symlink(self, newlink)
return self._next_class(newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
.. seealso:: :meth:`readlinkabs`, :func:`os.readlink`
"""
return self._next_class(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
.. seealso:: :meth:`readlink`, :func:`os.readlink`
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# High-level functions from shutil
# These functions will be bound to the instance such that
# Path(name).copy(target) will invoke shutil.copy(name, target)
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
def rmtree_p(self):
""" Like :meth:`rmtree`, but does not raise an exception if the
directory does not exist. """
try:
self.rmtree()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOENT:
raise
return self
def chdir(self):
""" .. seealso:: :func:`os.chdir` """
os.chdir(self)
cd = chdir
def merge_tree(self, dst, symlinks=False, *args, **kwargs):
"""
Copy entire contents of self to dst, overwriting existing
contents in dst with those in self.
If the additional keyword `update` is True, each
`src` will only be copied if `dst` does not exist,
or `src` is newer than `dst`.
Note that the technique employed stages the files in a temporary
directory first, so this function is not suitable for merging
trees with large files, especially if the temporary directory
is not capable of storing a copy of the entire source tree.
"""
update = kwargs.pop('update', False)
with tempdir() as _temp_dir:
# first copy the tree to a stage directory to support
# the parameters and behavior of copytree.
stage = _temp_dir / str(hash(self))
self.copytree(stage, symlinks, *args, **kwargs)
# now copy everything from the stage directory using
# the semantics of dir_util.copy_tree
dir_util.copy_tree(stage, dst, preserve_symlinks=symlinks,
update=update)
#
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
""" .. seealso:: :func:`os.chroot` """
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
""" .. seealso:: :func:`os.startfile` """
os.startfile(self)
return self
# in-place re-writing, courtesy of Martijn Pieters
# http://www.zopatista.com/python/2013/11/26/inplace-file-rewriting/
@contextlib.contextmanager
def in_place(self, mode='r', buffering=-1, encoding=None, errors=None,
newline=None, backup_extension=None):
"""
A context in which a file may be re-written in-place with new content.
Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable`
replaces `readable`.
If an exception occurs, the old file is restored, removing the
written data.
Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are
allowed. A :exc:`ValueError` is raised on invalid modes.
For example, to add line numbers to a file::
p = Path(filename)
assert p.isfile()
with p.in_place() as (reader, writer):
for number, line in enumerate(reader, 1):
writer.write('{0:3}: '.format(number)))
writer.write(line)
Thereafter, the file at `filename` will have line numbers in it.
"""
import io
if set(mode).intersection('wa+'):
raise ValueError('Only read-only file modes can be used')
# move existing file to backup, create new file with same permissions
# borrowed extensively from the fileinput module
backup_fn = self + (backup_extension or os.extsep + 'bak')
try:
os.unlink(backup_fn)
except os.error:
pass
os.rename(self, backup_fn)
readable = io.open(backup_fn, mode, buffering=buffering,
encoding=encoding, errors=errors, newline=newline)
try:
perm = os.fstat(readable.fileno()).st_mode
except OSError:
writable = open(self, 'w' + mode.replace('r', ''),
buffering=buffering, encoding=encoding, errors=errors,
newline=newline)
else:
os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
if hasattr(os, 'O_BINARY'):
os_mode |= os.O_BINARY
fd = os.open(self, os_mode, perm)
writable = io.open(fd, "w" + mode.replace('r', ''),
buffering=buffering, encoding=encoding, errors=errors,
newline=newline)
try:
if hasattr(os, 'chmod'):
os.chmod(self, perm)
except OSError:
pass
try:
yield readable, writable
except Exception:
# move backup back
readable.close()
writable.close()
try:
os.unlink(self)
except os.error:
pass
os.rename(backup_fn, self)
raise
else:
readable.close()
writable.close()
finally:
try:
os.unlink(backup_fn)
except os.error:
pass
@ClassProperty
@classmethod
def special(cls):
"""
Return a SpecialResolver object suitable referencing a suitable
directory for the relevant platform for the given
type of content.
For example, to get a user config directory, invoke:
dir = Path.special().user.config
Uses the `appdirs
<https://pypi.python.org/pypi/appdirs/1.4.0>`_ to resolve
the paths in a platform-friendly way.
To create a config directory for 'My App', consider:
dir = Path.special("My App").user.config.makedirs_p()
If the ``appdirs`` module is not installed, invocation
of special will raise an ImportError.
"""
return functools.partial(SpecialResolver, cls)
class SpecialResolver(object):
class ResolverScope:
def __init__(self, paths, scope):
self.paths = paths
self.scope = scope
def __getattr__(self, class_):
return self.paths.get_dir(self.scope, class_)
def __init__(self, path_class, *args, **kwargs):
appdirs = importlib.import_module('appdirs')
# let appname default to None until
# https://github.com/ActiveState/appdirs/issues/55 is solved.
not args and kwargs.setdefault('appname', None)
vars(self).update(
path_class=path_class,
wrapper=appdirs.AppDirs(*args, **kwargs),
)
def __getattr__(self, scope):
return self.ResolverScope(self, scope)
def get_dir(self, scope, class_):
"""
Return the callable function from appdirs, but with the
result wrapped in self.path_class
"""
prop_name = '{scope}_{class_}_dir'.format(**locals())
value = getattr(self.wrapper, prop_name)
MultiPath = Multi.for_class(self.path_class)
return MultiPath.detect(value)
class Multi:
"""
A mix-in for a Path which may contain multiple Path separated by pathsep.
"""
@classmethod
def for_class(cls, path_cls):
name = 'Multi' + path_cls.__name__
if PY2:
name = str(name)
return type(name, (cls, path_cls), {})
@classmethod
def detect(cls, input):
if os.pathsep not in input:
cls = cls._next_class
return cls(input)
def __iter__(self):
return iter(map(self._next_class, self.split(os.pathsep)))
@ClassProperty
@classmethod
def _next_class(cls):
"""
Multi-subclasses should use the parent class
"""
return next(
class_
for class_ in cls.__mro__
if not issubclass(class_, Multi)
)
class tempdir(Path):
"""
A temporary directory via :func:`tempfile.mkdtemp`, and constructed with the
same parameters that you can use as a context manager.
Example:
with tempdir() as d:
# do stuff with the Path object "d"
# here the directory is deleted automatically
.. seealso:: :func:`tempfile.mkdtemp`
"""
@ClassProperty
@classmethod
def _next_class(cls):
return Path
def __new__(cls, *args, **kwargs):
dirname = tempfile.mkdtemp(*args, **kwargs)
return super(tempdir, cls).__new__(cls, dirname)
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not exc_value:
self.rmtree()
def _multi_permission_mask(mode):
"""
Support multiple, comma-separated Unix chmod symbolic modes.
>>> _multi_permission_mask('a=r,u+w')(0) == 0o644
True
"""
compose = lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs))
return functools.reduce(compose, map(_permission_mask, mode.split(',')))
def _permission_mask(mode):
"""
Convert a Unix chmod symbolic mode like ``'ugo+rwx'`` to a function
suitable for applying to a mask to affect that change.
>>> mask = _permission_mask('ugo+rwx')
>>> mask(0o554) == 0o777
True
>>> _permission_mask('go-x')(0o777) == 0o766
True
>>> _permission_mask('o-x')(0o445) == 0o444
True
>>> _permission_mask('a+x')(0) == 0o111
True
>>> _permission_mask('a=rw')(0o057) == 0o666
True
>>> _permission_mask('u=x')(0o666) == 0o166
True
>>> _permission_mask('g=')(0o157) == 0o107
True
"""
# parse the symbolic mode
parsed = re.match('(?P<who>[ugoa]+)(?P<op>[-+=])(?P<what>[rwx]*)$', mode)
if not parsed:
raise ValueError("Unrecognized symbolic mode", mode)
# generate a mask representing the specified permission
spec_map = dict(r=4, w=2, x=1)
specs = (spec_map[perm] for perm in parsed.group('what'))
spec = functools.reduce(operator.or_, specs, 0)
# now apply spec to each subject in who
shift_map = dict(u=6, g=3, o=0)
who = parsed.group('who').replace('a', 'ugo')
masks = (spec << shift_map[subj] for subj in who)
mask = functools.reduce(operator.or_, masks)
op = parsed.group('op')
# if op is -, invert the mask
if op == '-':
mask ^= 0o777
# if op is =, retain extant values for unreferenced subjects
if op == '=':
masks = (0o7 << shift_map[subj] for subj in who)
retain = functools.reduce(operator.or_, masks) ^ 0o777
op_map = {
'+': operator.or_,
'-': operator.and_,
'=': lambda mask, target: target & retain ^ mask,
}
return functools.partial(op_map[op], mask)
class CaseInsensitivePattern(text_type):
"""
A string with a ``'normcase'`` property, suitable for passing to
:meth:`listdir`, :meth:`dirs`, :meth:`files`, :meth:`walk`,
:meth:`walkdirs`, or :meth:`walkfiles` to match case-insensitive.
For example, to get all files ending in .py, .Py, .pY, or .PY in the
current directory::
from path import Path, CaseInsensitivePattern as ci
Path('.').files(ci('*.py'))
"""
@property
def normcase(self):
return __import__('ntpath').normcase
########################
# Backward-compatibility
class path(Path):
def __new__(cls, *args, **kwargs):
msg = "path is deprecated. Use Path instead."
warnings.warn(msg, DeprecationWarning)
return Path.__new__(cls, *args, **kwargs)
__all__ += ['path']
########################
| LudditeLabs/query-reform | data/samples/Python/1.py | Python | apache-2.0 | 56,555 |
#!/usr/bin/env python3
"""
Given two Bibles all in one file (with books and verses in any order), with one
verse per line, with lines like this:
BOOK_chapter_verse{TAB}Text of verse goes here...
... print out which verses are present in the first Bible but missing in the
second, and vice-versa.
"""
import sys
import util
def set_of_verses(fn):
"""Return the set of verses found in the given filename."""
out = set()
with open(fn) as infile:
for line in infile:
line = line.strip()
verse, text = line.split("\t")
if verse in out:
util.dprint("WARNING duplicate verse {0} in {1}".format(
verse, fn))
out.add(verse)
return out
def main():
left = set_of_verses(sys.argv[1])
right = set_of_verses(sys.argv[2])
print("[left but not right]")
leftbutnotright = sorted(list(left - right))
for verse in leftbutnotright:
print(verse)
print("[right but not left]")
rightbutnotleft = sorted(list(right - left))
for verse in rightbutnotleft:
print(verse)
if __name__ == "__main__": main()
| alexrudnick/terere | bibletools/unify_bibles.py | Python | gpl-3.0 | 1,141 |
###############################################################################
# Copyright 2006 to the present, Orbitz Worldwide, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''
Created on Jun 6, 2011
@author: cbrinley
'''
import re
from romeo.directives import Directive,DirectiveException
class PreMergeLists(Directive):
'''This directive merges multiple lists into one list.
Recommended to be used with be with your parser's anchor
or tag system. here is a yaml example:
SERVER:
SERVICES: ${ROMEO.merge_lists *list1,*list2,*complex_type}
MYLIST: &list1
- item1
- item2
ANOTHER: &list2
- i3
- i4
output without this directive:
SERVER:
SERVICES: [ [item1,item2],[i3,i4],*complex_type]
output with this directive:
SERVER:
SERVICES: [item1,item2,i3,i4,{comple:type}]
'''
name = "merge_lists"
modes = ['pre']
repl_pattern = '["$<ROMEO.merge_lists %s>", %s]'
@classmethod
def init_kwargs(cls, preprocessor):
state = preprocessor.get_group_state("merge_lists")
return {"group_state":state}
def is_valid(self):
'''for this directive we have no extended validation.
we leave it up to the outer structured data parser
to determine if our arguments are valid.
'''
return
def apply(self):
'''pre side of this directive basically just
sets up some markers for the post side
of the directive. that's where the heavy
lifting is at.
#1 if we got this far we set state to true
so post side of directive can quickly
detect if we should continue processing.
'''
group_state = self.kwargs['group_state']
group_state[self.filename] = True #1
out = []
self.data.seek(0)
for line in self.get_lines():
m = self.used_pattern.search(line)
if not m:
out.append(line)
continue
args = ", ".join( self.extract_args(line[m.start():m.end()]) )
dirargs = ",".join( self.extract_args(line[m.start():m.end()]) )
repl = self.repl_pattern % (dirargs,args)
post_style_line = self.used_pattern.sub(repl,line,1)
out.append(post_style_line)
return "".join(out)
class PostMergeLists(Directive):
'''Main logic is in merge_lists(). see its doc for details
see PreMergLists for general directive notes.
'''
name = "merge_lists"
modes = ['post']
repl_pattern = '["$<ROMEO.merge_lists %s>", %s]'
@classmethod
def init_kwargs(cls, preprocessor):
state = preprocessor.get_group_state('merge_lists')
marker = re.compile("\$\<ROMEO\.%s.*\>" % cls.name )
return {'group_state':state,
'marker': marker,
}
def is_used(self):
'''traversing the whole dom could be quite expensive
depending on how many tags and imports were used
in raw source file. Our "pre" cousin has given us
a way to check on the cheap.
'''
group_state = self.kwargs['group_state']
return self.filename in group_state
def is_valid(self):
'''for this directive we have no extended validation.
we leave it up to the outer structured data parser
to determine if our arguments are valid.
'''
return
def apply(self):
self.used_pattern = self.kwargs['marker']
td = type(self.data)
if td == list: self.try_list_iterate(self.data)
if td == dict: self.try_dict_iterate(self.data)
del self.kwargs['group_state'][self.filename]
return self.data
def try_dict_iterate(self,data):
for v in data.values():
if type(v) == list:
self.try_list_iterate(v)
if type(v) == dict:
self.try_dict_iterate(v)
def try_list_iterate(self,data):
#check list value 0
#if its our guy merge it pluss next N lists
#remove first N+1 lists
#insert merged list as ord 0
#iterate over list
head = data[0]
if type(head) == str and self.used_pattern.match(head):
self.merge_lists(data)
for i in data:
if type(i) == list:
self.try_list_iterate(i)
if type(i) == dict:
self.try_dict_iterate(i)
def merge_lists(self,data):
'''#1 figure out how many lists we should merge
this is == to number of args passed to directive.
#2 our total list len (of lists) must be at least as
long as the number of args to our directive.
#3 skip the directive string and get the arguments
to the directive which should be the next <minlen>
items in our parent list.
#4 in case not all the items in our parent were
themselves lists. make em lists.
#5 flatten out this list of lists [[1],[2]] -> [1,2]
#6 reverse our list so we have [2,1] and push these
values onto the front of our list.
'''
err0 = 'merge_lists failed. '
err0 += 'there are not enough input lists. '
err0 += 'expected %s found %s.'
head = data[0]
args = self.extract_args(head) #1
minlen = len(args) + 1
actlen = len(data)
if actlen < minlen: #2
msg = err0 % (minlen,actlen)
raise DirectiveException(msg)
to_merge = data[1:minlen] #3
for i in range(len(to_merge)): #4
if type(to_merge[i]) != list:
to_merge[i] = [to_merge[i]]
i += 1
out = []
for l in to_merge: #5
for i in l:
out.append(i)
del data[:minlen]
out.reverse() #6
for i in out:
data.insert(0,i)
| OrbitzWorldwide/droned | romeo/lib/romeo/directives/merge_lists.py | Python | apache-2.0 | 6,736 |
# -*- encoding: utf-8 -*-
"""Implements different locators for UI"""
from selenium.webdriver.common.by import By
from .model import LocatorDict
common_locators = LocatorDict({
# common locators
"body": (By.CSS_SELECTOR, "body"),
# Notifications
"notif.error": (
By.XPATH, "//div[contains(@class, 'jnotify-notification-error')]"),
"notif.warning": (
By.XPATH, "//div[contains(@class, 'jnotify-notification-warning')]"),
"notif.success": (
By.XPATH, "//div[contains(@class, 'jnotify-notification-success')]"),
"notif.close": (
By.XPATH, "//a[@class='jnotify-close']"),
"alert.success": (
By.XPATH, "//div[contains(@class, 'alert-success')]"),
"alert.error": (
By.XPATH, "//div[contains(@class, 'alert-danger')]"),
"alert.success_sub_form": (
By.XPATH, "//div[contains(@bst-alert, 'success')]"),
"alert.error_sub_form": (
By.XPATH, "//div[contains(@bst-alert, 'danger')]"),
"alert.close": (By.XPATH, "//button[@class='close ng-scope']"),
"selected_entity": (
By.XPATH,
("//div[@class='ms-selection']/ul[@class='ms-list']"
"/li[@class='ms-elem-selection ms-selected']")),
"select_filtered_entity": (
By.XPATH, "//table//a/span[contains(@data-original-title, '%s')]"),
"checked_entity": (
By.XPATH, "//input[@checked='checked']/parent::label"),
"entity_select": (
By.XPATH,
("//div[@class='ms-selectable']//"
"li[not(contains(@style, 'display: none'))]/span[contains(.,'%s')]")),
"entity_deselect": (
By.XPATH,
("//div[@class='ms-selection']//"
"li[not(contains(@style, 'display: none'))]/span[contains(.,'%s')]")),
"entity_checkbox": (
By.XPATH,
"//label[normalize-space(.)='%s']/input[@type='checkbox']"),
"entity_select_list": (
By.XPATH,
"//ul/li/div[normalize-space(.)='%s']"),
"entity_select_list_vmware": (
By.XPATH,
"//ul/li/div[contains(normalize-space(.),'%s')]"),
"select_list_search_box": (
By.XPATH, "//div[@id='select2-drop']//input"),
"name_haserror": (
By.XPATH,
("//label[@for='name']/../../"
"div[contains(@class,'has-error')]")),
"haserror": (
By.XPATH,
"//div[contains(@class,'has-error')]"),
"common_haserror": (
By.XPATH,
("//span[@class='help-block']/ul/"
"li[contains(@ng-repeat,'error.messages')]")),
"table_haserror": (
By.XPATH,
"//tr[contains(@class,'has-error')]/td/span"),
"common_invalid": (
By.XPATH,
"//input[@id='name' and contains(@class,'ng-invalid')]"),
"common_param_error": (
By.XPATH,
("//div[@id='parameters']/span[@class='help-block'"
"and string-length(text()) > 10]")),
"search": (By.ID, "search"),
"clear_search": (By.XPATH, "//a[@class='autocomplete-clear']"),
"search_no_results": (By.XPATH, "//div[text()='No entries found']"),
"auto_search": (
By.XPATH,
("//ul[contains(@class, 'ui-autocomplete') or "
"contains(@template-url, 'autocomplete')]/li/a[contains(., '%s')]")),
"search_button": (By.XPATH, "//button[contains(@type,'submit')]"),
"search_dropdown": (
By.XPATH,
("//button[contains(@class, 'dropdown-toggle')]"
"[@data-toggle='dropdown']")),
"cancel_form": (By.XPATH, "//a[text()='Cancel']"),
"submit": (By.NAME, "commit"),
"select_action_dropdown": (
By.XPATH,
"//td[descendant::*[normalize-space(.)='%s']]/"
"following-sibling::td/div/a[@data-toggle='dropdown']"),
"delete_button": (
By.XPATH,
"//a[contains(@data-confirm, '%s') and @data-method='delete']"),
"copy_name_input": (By.XPATH, "//input[@ng-model='copyName']"),
"copy_create_button": (By.XPATH, "//button[@ng-click='copy(copyName)']"),
"filter": (By.XPATH,
("//div[@id='ms-%s_ids']"
"//input[@class='ms-filter']")),
"parameter_tab": (By.XPATH, "//a[contains(., 'Parameters')]"),
"add_parameter": (
By.XPATH, "//a[contains(text(),'+ Add Parameter')]"),
"new_parameter_name": (
By.XPATH, "//input[@placeholder='Name' and not(@value)]"),
"parameter_value": (
By.XPATH,
("//table[contains(@id, 'parameters')]//tr"
"/td[input[contains(@id, 'name')][contains(@value, '%s')]]"
"/following-sibling::td//textarea")),
"new_parameter_value": (
By.XPATH, "//textarea[@placeholder='Value' and not(text())]"),
"parameter_remove": (
By.XPATH, "//tr/td/input[@value='%s']/following::td/a"),
"table_column_title": (By.XPATH, "//th[contains(., '%s')]/*"),
"table_cell_link": (
By.XPATH,
"//table[contains(@class, 'table')]"
"//td[contains(normalize-space(.), '%s')]"
"/parent::tr"
"/td[count(//thead//tr/th[.='%s']/preceding-sibling::*)+1]/a"
),
"table_cell_value": (
By.XPATH,
"//table[contains(@class, 'table')]"
"//td[contains(normalize-space(.), '%s')]"
"/parent::tr"
"/td[count(//thead//tr/th[.='%s']/preceding-sibling::*)+1]"
),
"table_column_values": (
By.XPATH,
"//table//td/parent::tr/td[count(//thead//tr/th[contains(., '%s')]"
"/preceding-sibling::*)+1]"
),
"table_select_all_checkbox": (
By.XPATH,
"//table[contains(@class, 'table')]"
"//input[@type='checkbox'and @ng-model='selection.allSelected']"
),
"application_logo": (
By.XPATH, "//img[contains(@alt, 'Header logo')]"),
"permission_denied": (
By.XPATH,
"//h1[contains(.,'Permission denied')]"
),
# Katello Common Locators
"confirm_remove": (
By.XPATH, "//button[@ng-click='ok()' or @ng-click='delete()']"),
"create": (By.XPATH, "//button[contains(@ng-click,'Save')]"),
"save": (
By.XPATH, ("//button[contains(@ng-click,'save')"
"and not(contains(@class,'ng-hide'))]")),
"close": (By.XPATH, "//button[@aria-label='Close']"),
"cancel": (
By.XPATH,
"//button[contains(@ng-click,'cancel') and "
"not(contains(@class,'ng-hide'))][contains(., 'Cancel')]"
),
"name": (By.ID, "name"),
"label": (By.ID, "label"),
"description": (By.ID, "description"),
"kt_select_action_dropdown": (
By.XPATH,
("//button[contains(@ng-click, 'toggleDropdown')]"
"[descendant::span[text()='Select Action']]")),
"select_action": (
By.XPATH,
"//li/a/span[@class='ng-scope' and contains(., '%s')]"),
"kt_search": (By.XPATH, "//input[@ng-model='table.searchTerm']"),
"kt_clear_search": (
By.XPATH, "//button[contains(@ng-click, 'searchCompleted = false')]"),
"kt_search_no_results": (
By.XPATH, "//table//span[@data-block='no-search-results-message']"),
"kt_search_button": (
By.XPATH,
"//button[@ng-click='table.search(table.searchTerm)']"),
"kt_table_search": (
By.XPATH, "//input[@ng-model='detailsTable.searchTerm']"),
"kt_table_search_button": (
By.XPATH,
"//button[@ng-click='detailsTable.search(detailsTable.searchTerm)']"),
"kt_table_cell_value": (
By.XPATH,
"//table[@bst-table='table']//td[contains(normalize-space(.), '%s')]"
"/parent::tr/td[count(//thead//tr/th[.='%s']/preceding-sibling::*)+1]"
),
# Katello common Product and Repo locators
"gpg_key": (By.ID, "gpg_key_id"),
"all_values": (By.XPATH,
("//div[contains(@class,'active')]//input[@type='checkbox'"
" and contains(@name, '%s')]")),
"all_values_selection": (
By.XPATH,
("//div[@class='ms-selection']//ul[@class='ms-list']/li"
"/span[contains(.,'%s')]/..")),
"usage_limit": (
By.XPATH,
"//input[contains(@ng-model, 'max')"
"and contains(@ng-model, 'hosts')]"),
"usage_limit_checkbox": (
By.XPATH,
"//input[contains(@ng-model, 'unlimited')"
"and contains(@ng-model, 'hosts')]"),
"invalid_limit": (
By.XPATH,
"//input[contains(@id, 'max') and contains(@class, 'ng-invalid')]"),
"modal_background": (
By.XPATH,
"//*[@class='modal-backdrop fade in']",
),
"select_repo": (By.XPATH, "//select[@ng-model='repository']"),
"table_per_page": (
By.XPATH, "//select[@ng-model='table.params.per_page']"),
# ace editor
"ace.input": (By.XPATH, "//label[contains(., 'Input') and"
" contains(@class, 'btn')]"),
"ace.diff": (By.XPATH, "//label[contains(., 'Diff') and"
" contains(@class, 'btn')]"),
"ace.preview": (By.XPATH, "//label[contains(., 'Preview') and"
" contains(@class, 'btn')]"),
# 'Run Job' button that is accessible from Jobs and Hosts pages
"run_job": (By.XPATH, "//a[@data-id='aid_job_invocations_new']"),
# org environment
"org_environment_info": (
By.XPATH,
'//div[@bst-alert="info"]//span[contains(., '
'"Access to repositories is unrestricted in this organization.")]'),
})
| sghai/robottelo | robottelo/ui/locators/common.py | Python | gpl-3.0 | 9,297 |
import urllib.request
import urllib.parse
import pprint
from .utils import transform_datetime
from .utils import flatten
from warnings import warn
import json as simplejson
_debug = 1
class ChimpyException(Exception):
pass
class ChimpyWarning(Warning):
pass
class Connection(object):
"""mailchimp api connection"""
output = "json"
version = '1.3'
def __init__(self, apikey=None, secure=False):
self._apikey = apikey
proto = 'http'
if secure:
proto = 'https'
api_host = 'api.mailchimp.com'
if '-' in apikey:
key, dc = apikey.split('-')
else:
dc = 'us1'
api_host = dc + '.' + api_host
self.url = '%s://%s/%s/' % (proto, api_host, self.version)
self.opener = urllib.request.build_opener()
self.opener.addheaders = [('Content-Type', 'application/x-www-form-urlencoded')]
def _rpc(self, method, **params):
"""make an rpc call to the server"""
params = urllib.parse.urlencode(params, doseq=True).encode('utf8')
if _debug > 1:
print (__name__, "making request with parameters")
pprint.pprint(params)
print (__name__, "encoded parameters:", params)
response = self.opener.open("%s?method=%s" %(self.url, method), params)
data = response.read().decode('utf8')
response.close()
if _debug > 1:
print (__name__, "rpc call received", data)
result = simplejson.loads(data)
try:
if 'error' in result:
raise ChimpyException("%s:\n%s" % (result['error'], params))
except TypeError:
# thrown when results is not iterable (eg bool)
pass
return result
def _api_call(self, method, **params):
"""make an api call"""
# flatten dict variables
params = dict([(str(k), v.encode('utf-8') if isinstance(v, unicode) else v) for k,v in flatten(params).items()])
params['output'] = self.output
params['apikey'] = self._apikey
return self._rpc(method=method, **params)
def ping(self):
return self._api_call(method='ping')
def lists(self, limit=25):
all_lists = []
start = 0
has_more = True
while has_more:
response = self._api_call(method='lists', start=start, limit=limit)
all_lists += response['data']
has_more = int(response['total']) > len(all_lists)
start += 1
return all_lists
def list_batch_subscribe(self,
id,
batch,
double_optin=True,
update_existing=False,
replace_interests=False):
return self._api_call(method='listBatchSubscribe',
id=id,
batch=batch,
double_optin=double_optin,
update_existing=update_existing,
replace_interests=replace_interests)
def list_batch_unsubscribe(self,
id,
emails,
delete_member=False,
send_goodbye=True,
send_notify=False):
return self._api_call(method='listBatchUnsubscribe',
id=id,
emails=emails,
delete_member=delete_member,
send_goodbye=send_goodbye,
send_notify=send_notify)
def list_subscribe(self,
id,
email_address,
merge_vars,
email_type='text',
double_optin=True,
update_existing=False,
replace_interests=True,
send_welcome=False):
return self._api_call(method='listSubscribe',
id=id,
email_address=email_address,
merge_vars=merge_vars,
email_type=email_type,
double_optin=double_optin,
update_existing=update_existing,
replace_interests=replace_interests,
send_welcome=send_welcome)
def list_unsubscribe(self,
id,
email_address,
delete_member=False,
send_goodbye=True,
send_notify=True):
return self._api_call(method='listUnsubscribe',
id=id,
email_address=email_address,
delete_member=delete_member,
send_goodbye=send_goodbye,
send_notify=send_notify)
def list_update_member(self,
id,
email_address,
merge_vars,
email_type='',
replace_interests=True):
return self._api_call(method='listUpdateMember',
id=id,
email_address=email_address,
merge_vars=merge_vars,
email_type=email_type,
replace_interests=replace_interests)
def list_member_info(self, id, email_address):
if isinstance(email_address, basestring):
first = True
email_address = [email_address]
else:
first = False
result = self._api_call(method='listMemberInfo',
id=id,
email_address=email_address)
if first:
return result['data'][0]
return result
def list_members(self, id, status='subscribed', since=None, start=0, limit=100):
return self._api_call(method='listMembers', id=id, status=status, since=since, start=start, limit=limit)
def list_interest_groupings_add(self, id, name, type, groups):
"""
Add a new Interest Grouping - if interest groups for the List are not yet
enabled, adding the first grouping will automatically turn them on.
http://apidocs.mailchimp.com/api/1.3/listinterestgroupingadd.func.php
"""
return self._api_call(method='listInterestGroupingAdd', id=id, name=name, type=type, groups=groups)
def list_interest_groupings_del(self, grouping_id):
"""
Delete an existing Interest Grouping - this will permanently delete all
contained interest groups and will remove those selections from all list
members
http://apidocs.mailchimp.com/api/1.3/listinterestgroupingdel.func.php
"""
return self._api_call(method='listInterestGroupingDel', grouping_id=grouping_id)
def list_interest_groupings(self, id):
return self._api_call(method='listInterestGroupings', id=id)
def list_interest_groups(self, id, grouping_id, full=False):
groupings = self.list_interest_groupings(id)
grouping = None
for g in groupings:
if int(g['id']) == grouping_id:
grouping = g
break
if not grouping:
return []
if not full:
return [group['name'] for group in grouping['groups']]
return grouping
def list_interest_group_add(self, id, name, grouping_id):
return self._api_call(method='listInterestGroupAdd', id=id, group_name=name, grouping_id=grouping_id)
def list_interest_group_del(self, id, name, grouping_id):
return self._api_call(method='listInterestGroupDel', id=id, group_name=name, grouping_id=grouping_id)
def list_interest_group_update(self, id, old_name, new_name, grouping_id):
return self._api_call(method='listInterestGroupUpdate', id=id, old_name=old_name, new_name=new_name, grouping_id=grouping_id)
def list_merge_vars(self, id):
return self._api_call(method='listMergeVars', id=id)
def list_merge_var_add(self, id, tag, name, req=False):
tag = tag.upper()
return self._api_call(method='listMergeVarAdd', id=id, tag=tag, name=name, req=req)
def list_merge_var_del(self, id, tag):
return self._api_call(method='listMergeVarDel', id=id, tag=tag)
def list_webhooks(self, id):
return self._api_call(method='listWebhooks', id=id)
# public static listWebhookAdd(string apikey, string id, string url, array actions, array sources)
def list_webhook_add(self, id, url, actions, sources):
return self._api_call(method='listWebhookAdd', id=id, url=url, actions=actions, sources=sources)
def list_webhook_del(self, id, url):
return self._api_call(method='listWebhookDel', id=id, url=url)
def campaign_content(self, cid, archive_version=True):
"""Get the content (both html and text) for a campaign, exactly as it would appear in the campaign archive
http://apidocs.mailchimp.com/api/1.3/campaigncontent.func.php
"""
return self._api_call(method='campaignContent', cid=cid, for_archive=archive_version)
def campaign_create(self, campaign_type, options, content, **kwargs):
"""Create a new draft campaign to send.
http://www.mailchimp.com/api/1.3/campaigncreate.func.php
Optional parameters: segment_opts, type_opts
"""
# enforce the 100 char limit (urlencoded!!!)
title = options.get('title', options['subject'])
if isinstance(title, unicode):
title = title.encode('utf-8')
titlelen = len(urllib.quote_plus(title))
if titlelen > 99:
title = title[:-(titlelen - 96)] + '...'
warn("cropped campaign title to fit the 100 character limit, new title: '%s'" % title, ChimpyWarning)
subject = options['subject']
if isinstance(subject, unicode):
subject = subject.encode('utf-8')
subjlen = len(urllib.quote_plus(subject))
if subjlen > 99:
subject = subject[:-(subjlen - 96)] + '...'
warn("cropped campaign subject to fit the 100 character limit, new subject: '%s'" % subject, ChimpyWarning)
options['title'] = title
options['subject'] = subject
return self._api_call(method='campaignCreate', type=campaign_type, options=options, content=content, **kwargs)
def campaign_delete(self, cid):
"""Delete a campaign.
http://www.mailchimp.com/api/1.3/campaigndelete.func.php
"""
return self._api_call(method='campaignDelete', cid=cid)
def campaign_pause(self, cid):
"""Pause a RSS campaign from sending.
http://apidocs.mailchimp.com/api/1.3/campaignpause.func.php
"""
return self._api_call(method='campaignPause', cid=cid)
def campaign_replicate(self, cid):
"""Replicate a campaign.
http://apidocs.mailchimp.com/api/1.3/campaignreplicate.func.php
"""
return self._api_call(method='campaignReplicate', cid=cid)
def campaign_resume(self, cid):
"""Resume sending a RSS campaign.
http://apidocs.mailchimp.com/api/1.3/campaignresume.func.php
"""
return self._api_call(method='campaignResume', cid=cid)
def campaign_schedule(self, cid, schedule_time, schedule_time_b=None):
"""Schedule a campaign to be sent in the future.
http://apidocs.mailchimp.com/api/1.3/campaignschedule.func.php
"""
schedule_time = transform_datetime(schedule_time)
if schedule_time_b:
schedule_time_b = transform_datetime(schedule_time_b)
return self._api_call(method='campaignSchedule', cid=cid, schedule_time=schedule_time, schedule_time_b=schedule_time_b)
def campaign_send_now(self, cid):
"""Send a given campaign immediately.
http://apidocs.mailchimp.com/api/1.3/campaignsendnow.func.php
"""
return self._api_call(method='campaignSendNow', cid=cid)
def campaign_send_test(self, cid, test_emails, **kwargs):
"""Send a test of this campaign to the provided email address.
Optional parameter: send_type
http://apidocs.mailchimp.com/api/1.3/campaignsendtest.func.php
"""
if isinstance(test_emails, basestring):
test_emails = [test_emails]
return self._api_call(method='campaignSendTest', cid=cid, test_emails=test_emails, **kwargs)
def templates(self, user=True, gallery=False, base=False):
"""
Retrieve various templates available in the system, allowing something
similar to our template gallery to be created.
http://apidocs.mailchimp.com/api/1.3/templates.func.php
"""
return self._api_call(method='templates', user=user, gallery=gallery, base=base)
def template_info(self, template_id, template_type='user'):
"""
Pull details for a specific template to help support editing
http://apidocs.mailchimp.com/api/1.3/templateinfo.func.php
"""
return self._api_call(method='templateInfo', tid=template_id, type=type)
def campaign_templates(self):
return self.templates()['user']
def campaign_unschedule(self, cid):
"""Unschedule a campaign that is scheduled to be sent in the future """
return self._api_call(method='campaignUnschedule', cid=cid)
def campaign_update(self, cid, name, value):
"""Update just about any setting for a campaign that has not been sent.
http://apidocs.mailchimp.com/api/1.3/campaignupdate.func.php
"""
return self._api_call(method='campaignUpdate', cid=cid, name=name, value=value)
def campaigns(self, filter_id='', filter_folder=None, filter_fromname='', filter_fromemail='',
filter_title='', filter_subject='', filter_sendtimestart=None, filter_sendtimeend=None,
filter_exact=False, start=0, limit=50):
"""Get the list of campaigns and their details matching the specified filters.
Timestamps should be passed as datetime objects.
http://apidocs.mailchimp.com/api/1.3/campaigns.func.php
"""
filter_sendtimestart = transform_datetime(filter_sendtimestart)
filter_sendtimeend = transform_datetime(filter_sendtimeend)
return self._api_call(method='campaigns',
filter_id=filter_id, filter_folder=filter_folder, filter_fromname=filter_fromname,
filter_fromemail=filter_fromemail, filter_title=filter_title, filter_subject=filter_subject,
filter_sendtimestart=filter_sendtimestart, filter_sendtimeend=filter_sendtimeend,
filter_exact=filter_exact, start=start, limit=limit)
def campaign_segment_test(self, list_id, options):
return self._api_call(method='campaignSegmentTest', list_id=list_id, options=options)
def folder_add(self, name, folder_type='campaign'):
"""
Add a new folder to file campaigns or autoresponders in
http://apidocs.mailchimp.com/api/1.3/folderadd.func.php
"""
return self._api_call('folderAdd', name=name, type=folder_type)
def folder_del(self, folder_id, folder_type='campaign'):
"""
Delete a campaign or autoresponder folder.
http://apidocs.mailchimp.com/api/1.3/folderdel.func.php
"""
return self._api_call('folderDel', fid=folder_id, type=folder_type)
def folder_update(self, folder_id, name, folder_type='campaign'):
"""
Update the name of a folder for campaigns or autoresponders
http://apidocs.mailchimp.com/api/1.3/folderupdate.func.php
"""
return self._api_call('folderUpdate', fid=folder_id, name=name, type=folder_type)
def folders(self):
"""List all the folders for a user account.
http://apidocs.mailchimp.com/api/1.3/folders.func.php
"""
return self._api_call(method='folders')
# backwars compat for v1.2
campaign_folders = folders
| jorjun/django-mailchimp | mailchimp/chimpy/chimpy.py | Python | bsd-3-clause | 16,486 |
#######################################################################
# Copyright 2012 Mark Wolf
#
# This file is part of OrgWolf.
#
# OrgWolf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#######################################################################
import datetime as dt
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
import pytz
from orgwolf import settings
@python_2_unicode_compatible
class Message(models.Model):
"""Class for all messages that get passed around, either incoming or
outgoing. Most of the functionality is actually implemented in the
self.handler attribute that is created on __init__() from the
self.handler_path field.
"""
subject = models.TextField()
sender = models.TextField(blank=True)
recipient = models.TextField(blank=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
unread = models.BooleanField(default=True)
handler_path = models.CharField(max_length=100, blank=True)
in_inbox = models.BooleanField(default=True)
rcvd_date = models.DateTimeField(default=now)
message_text = models.TextField(blank=True)
spawned_nodes = models.ManyToManyField('gtd.Node', blank=True)
source_node = models.OneToOneField('gtd.Node', null=True, blank=True,
related_name='deferred_message',
on_delete=models.CASCADE)
def __str__(self):
return self.subject
| m3wolf/orgwolf | wolfmail/models.py | Python | gpl-3.0 | 2,137 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def traverse(matrix, size, point, step, path = []):
x, y = point
m, n = size
dx, dy = step
res = []
# Clone path. Faster than copy.copy()
path = path[:]
path.append(point)
# Clone matrix. Faster than copy.deepcopy()
matrix = [row[:] for row in matrix]
matrix[y][x] = len(path)
# Test if all points have been visited.
if matrix[y][x] == m * n:
return [(0, path)]
# All path we can take, ignoring the boundaries of the matrix
# and the values of the points.
points = set([
(x - dx, y - dy),
(x - dy, y - dx),
(x + dx, y - dy),
(x + dy, y - dx),
(x - dx, y + dy),
(x - dy, y + dx),
(x + dx, y + dy),
(x + dy, y + dx)
])
# Remove points that lie beyond the boundaries of the matrix.
points = [(x, y) for x, y in points if 0 <= x < m and 0 <= y < n]
# Remove points that have already been visited.
points = [(x, y) for x, y in points if matrix[y][x] == 0]
# Check if there are still points available to visit.
if not points:
# Yep, we're fucked now.
return [(m * n - matrix[y][x], path)]
# Refine the points we should visit.
# Get the points with the shortest distance to the boundaries where
# distance is the distance to the m boundaries plus the
# distance to the n boundaries:
#
# d = min(x, m - x - 1) + min(y, n - y - 1)
distances = [min(x, m - x - 1) + min(y, n - y - 1) for x, y in points]
minimum = min(distances)
points = [points[i] for i, d in enumerate(distances) if d == minimum]
# Refine the points we should visit for a final time.
# We should only visit the points with
# the shortest distance to the boundaries where distance
# is either the distance to the x boundaries or the y boundaries:
#
# d = min(x, m - x - 1, y, n - y - 1)
distances = [min(x, m - x - 1, y, n - y - 1) for x, y in points]
minimum = min(distances)
points = [points[i] for i, d in enumerate(distances) if d == minimum]
# Try the points that are available and collect the results.
for p in points:
res += traverse(matrix, size, p, step, path)
return res
if __name__ == "__main__":
size, step = [list(map(lambda x: int(x), input().split())) for _ in range(2)]
m, n = size
matrix = [[0] * m for _ in range(n)]
results = traverse(matrix, size, (0, 0), step)
results = sorted(results, key=lambda k: k[0])
if len(results) > 0:
unvisited, path = results[0]
print(len(path))
for x, y in path:
print("{0}{1}".format(chr(x + ord('a')), n - y))
| fabianm/olympiad | 2014-2015/round-1/C2-chess/chess.py | Python | apache-2.0 | 3,035 |
import string
def parse(url):
"""
Parse ZooKeeper URL.
:param url: The URL in the form of "zk://username:password@servers/path".
:return: Tuple (credential, servers, path).
credential: Credential for authentication with "digest" scheme. Optional and default to
None.
servers: Compatible with Kazoo's 'hosts' argument.
path: Optional and default to '/'.
NOTE: This method doesn't validate the values in the returned tuple.
"""
index = string.find(url, "zk://")
if index != 0:
raise ValueError("Expecting 'zk://' at the beginning of the URL")
url = string.lstrip(url, "zk://")
try:
servers, path = string.split(url, '/', 1)
except ValueError:
servers = url
path = ''
path = '/' + path
try:
credential, servers = string.split(servers, '@', 1)
except ValueError:
credential = None
return credential, servers, path
| xujyan/cotton | mysos/common/zookeeper.py | Python | apache-2.0 | 943 |
import logging
LOGGER = logging.getLogger(__name__)
from itertools import chain
import uuid
class Counter(object):
"""
"""
_redcon = None
_key = None
def __add__(self, other):
1 + other # sanity check
return self._redcon._execute('incr', self._key, other)
def __sub__(self, other):
1 - other # sanity check
return self._redcon._execute('decr', self._key, other)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__sub__(other)
class Set(set):
"""
SADD - add(), update(), __ior__()
SCARD - len(s)
SDIFF - difference()
SDIFFSTORE -
SINTER - isdisjoint(), intersection(), __and__(), __rand__()
SINTERSTORE -
SISMEMBER - x in s / x not in s
SMEMBERS - __eq__(), __gt__(), __ge__(), __lt__(), __le__(), __ne__(), __iter__()
issubset(), issuperset()
SMOVE -
SPOP -
SRANDMEMBER -
SREM -
SUNION - union(), __or__(), __ror__()
SUNIONSTORE -
SSCAN -
"""
#:
key = None
_redcon = None
#
# Private methods
#
@property
def _all(self):
return self._exec('smembers')
def _exec(self, cmd, *args, **kwargs):
"""
Utility method for executing redis commands for this :class:`.Set` instance.
:param str cmd: redis command to execute
:param list args: args to pass to executed redis command
:param dict kwargs: kwargs to pass to executed redis command
"""
assert self.key is not None
return self._redcon._execute(cmd, self.key, *args, **kwargs)
def _expiring_set_commands(self, key, iter_obj):
"""
Builds the pipeline commands for creating an expiring set.
:param str key: key name for the expiring set
:param obj: object to build values from
:type obj: iterable
"""
commands = list()
key_and_values = [key]
for value in sorted(list(iter_obj)):
key_and_values.append(value)
commands.append(('sadd', tuple(key_and_values)))
commands.append(('expire', (key, 5)))
return commands
def _separate_iterables(self, *items):
"""
Given a list of objects, separate the Redcon sets from other iterables.
:param list items: list of iterable objects
:rtype tuple: (redcon_set_keys, redcon_sets, non_redcon_iters)
"""
redcon_sets = list()
non_redcon_iters = list()
for item in items:
if isinstance(item, Set):
redcon_sets.append(item)
else:
# using `iter` here ensures that the method will bomb out before
# any requests to redis are made in the event some of the
# arguments are not iterable
non_redcon_iters.append(iter(item))
keys = [redcon_set.key for redcon_set in redcon_sets]
return (keys, redcon_sets, non_redcon_iters)
def _setify_combine_execute(self, cmd, *iters):
"""
Given a list of :class:`.Set`s and other builtin iterables, collect all
the :class:`.Set` keys, add the non-:class:`.Set` iterables to redis
as expiring sets, and perform the supplied command on all passed objects
utilizing a pipeline. If all supplied objects are Redcon :class:`.Set`s
no pipeline is used. The keys are collected and the supplied command
is executed on the collected keys.
:param str cmd: command to execute against all supplied objects
:param list iters: list of iterable objects to use with the given command
:rtype set: set result from the executed command
"""
redcon_keys, _, non_redcon_iters = self._separate_iterables(*iters)
if len(non_redcon_iters) > 0:
# first add all non_redcon objects as expiring sets in redis
# then call *cmd* on everything
commands = list()
all_keys = [self.key]
for obj in iters:
temp_key = str(uuid.uuid4())
all_keys.append(temp_key)
commands.extend(self._expiring_set_commands(temp_key, obj))
commands.append((cmd, tuple(all_keys)))
pipeline_results = self._redcon._pipeline(*commands)
return pipeline_results[-1]
# no non-Redcon objects
cmd_result = self._exec(cmd, *redcon_keys)
return cmd_result
#
# Public methods
#
def add(self, elem):
"""
Add element :attr:`elem` to the set.
:param elem: element to add to the set
"""
# SADD
self._exec('sadd', elem)
def clear(self):
"""
Remove all elements from the set.
"""
raise NotImplementedError()
def copy(self):
"""
Return a new set with a shallow copy of *s*.
"""
raise NotImplementedError()
def difference(self, *other):
"""
**set - other - ...**
Return a new set with elements in the set that are not in the others.
"""
# __sub__
# __rsub__
# SDIFF
return self._setify_combine_execute('sdiff', *other)
def difference_update(self, *other):
"""
**set -= other | ...**
Update the set, keeping only elements found in either set, but not in both.
"""
# __isub__
raise NotImplementedError()
def discard(self, elem):
"""
Remove element :attr:`elem` from the set if it is present.
"""
raise NotImplementedError()
def intersection(self, *other):
"""
**set & other & ...**
Return a new set with elements common to the set and all others.
"""
# __and__
# __rand__
# SINTER
return self._setify_combine_execute('sinter', *other)
def intersection_update(self, *other):
"""
**set &= other & ...**
Update the set, keeping only elements found in it and all others.
"""
# __iand__
raise NotImplementedError()
def isdisjoint(self, other):
"""
Return `True` if the set has no elements in common with :attr:`other`. Sets
are disjoint if and only if their intersection is the empty set.
"""
# SINTER
if isinstance(other, Set):
inter_result = self._exec('sinter', self.key, other.key)
return len(inter_result) == 0
# we have a builtin object we need to first add, then compare
temp_key = str(uuid.uuid4())
commands = list()
commands.extend(self._expiring_set_commands(temp_key, other))
commands.append(('sinter', (self.key, temp_key)))
res = self._redcon._pipeline(*commands)
return len(res[-1]) == 0
def issubset(self, other):
"""
**set <= other**
Test whether every element in the set is in :attr:`other`.
**set < other**
Test whether the set is a proper subset of :attr:`other`, that is,
``set <= other and set != other``.
"""
# __le__
# __lt__
# SMEMBERS
if isinstance(other, Set):
# I'm not sure why Set.__iter__ isn't getting called automagically
other = iter(other)
return self._all.issubset(other)
def issuperset(self, other):
"""
**set >= other**
Test whether every element in :attr:`other` is in the set.
**set > other**
Test whether the set is a proper superset of :attr:`other`, that is,
``set >= other and set != other``.
"""
# __ge__
# __gt__
# SMEMBERS
if isinstance(other, Set):
# I'm not sure why Set.__iter__ isn't getting called automagically
other = iter(other)
return self._all.issuperset(other)
def pop(self):
"""
Remove and return an arbitrary element from the set. Raises :class:`KeyError`
if the set is empty.
"""
raise NotImplementedError()
def remove(self, elem):
"""
Remove element :attr:`elem` from the set. Raises :class:`KeyError` if :attr:`elem`
is not contained in the set.
"""
raise NotImplementedError()
def symmetric_difference(self, other):
"""
**set ^ other**
Return a new set with elements in either the set or :attr:`other` but not both.
"""
# __xor__
# __rxor__
raise NotImplementedError()
def symmetric_difference_update(self, other):
"""
**set ^= other**
Update the set, keeping only elements found in either set, but not in both.
"""
# __ixor__
raise NotImplementedError()
def union(self, *other):
"""
**set | other | ...**
Return a new set with elements from the set and all others.
:param other: iterables of items to perform the union with
"""
# __or__
# __ror__
keys, _, non_redcon_iters = self._separate_iterables(*other)
union_result = self._exec('sunion', self.key, *keys)
if len(non_redcon_iters) > 0:
union_result = union_result.union(*non_redcon_iters)
return union_result
def update(self, *other):
"""
**set |= other | ...**
Update the set, adding elements found in it and all others.
:param other: iterables of items to add to the set
"""
# __ior__
args = list(chain.from_iterable(other))
self._exec('sadd', *args)
#
# Container methods
#
def __contains__(self, item):
"""
Test *x* for membership in *s*.
Test *x* for non-membership in *s*.
"""
# SISMEMBER
return self._exec('sismember', item)
def __iter__(self):
# SMEMBERS
return iter(self._all)
def __len__(self):
"""
Return the cardinality of set *s*.
"""
# SCARD
return self._exec('scard')
#
# Comparison methods
#
def __eq__(self, other):
# ==
# SMEMBERS
return self._all == other
def __ge__(self, other):
# >=
# Test whether every element in other is in the set
# SMEMBERS
return self._all >= other
def __gt__(self, other):
# >
# Test whether the set is a proper superset of other
# set >= other and set != other
# SMEMBERS
return self._all > other
def __le__(self, other):
# <=
# Test whether every element in the set is in other
# SMEMBERS
return self._all <= other
def __lt__(self, other):
# <
# Test whether the set is a proper subset of other
# set <= other and set != other
# SMEMBERS
return self._all < other
def __ne__(self, other):
# !=
# SMEMBERS
return self._all != other
#
# Arithmetic methods
#
def __and__(self, other):
# &
# SINTER
chain = SetIntersectionChain(self._redcon, self)
chain & other
return chain
def __or__(self, other):
# |
# SUNION
chain = SetUnionChain(self._redcon, self)
chain | other
return chain
def __sub__(self, other):
# -
# SDIFF
chain = SetDifferenceChain(self._redcon, self)
chain - other
return chain
def __xor__(self, other):
# ^
raise NotImplementedError()
#
# Reflected arithmetic methods
#
def __rand__(self, other):
# &
return self.__and__(other)
def __ror__(self, other):
# |
# SUNION
return self.__or__(other)
def __rsub__(self, other):
# -
# SDIFF
return self.__sub__(other)
def __rxor__(self, other):
# ^
return self.__xor__(other)
#
# Augmented assignment methods
#
def __iand__(self, other):
# &=
raise NotImplementedError()
def __ior__(self, other):
# |=
# SADD
if not isinstance(other, set):
raise TypeError(
'unsupported operand type(s) for |=: {!r} and {!r}'.format(
Set.__name__, other.__class__.__name__
)
)
self._exec('sadd', *other)
def __isub__(self, other):
# -=
raise NotImplementedError()
def __ixor__(self, other):
# ^=
raise NotImplementedError()
#
# Representation methods
#
def __repr__(self):
return '{}(key={!r})'.format(self.__class__.__name__, self.key)
class _SetChainCommand(object):
"""
Base class for chainable Set commands.
"""
def __init__(self, redcon, set_):
self._redcon = redcon
self._items = [set_]
def _exec(self, redcon_set_keys, redcon_chains, non_redcon_sets):
# `redcon_set_keys` is a tuple of all the keys belonging to any Redcon Sets in the
# chain starting with the Set key that started the chain.
# `redcon_chains` is a tuple of all the Redcon chains that were operated against
# `non_redcon_sets` is a tuple of all the non-Redcon sets in the chain
raise NotImplementedError()
def _prep_and_exec(self):
"""
Iterate over all objects operated on by this instance, separating
them by Redcon set keys (any instances of :class:`.Set`), other
chains (instances of :class:`._SetChainCommand`), and finally any
non-Redcon sets.
Once everything is sorted out :meth:`_exec` is executed with
the following parameters: ``redcon_set_keys``, ``redcon_chains``,
and ``non_redcon_sets``.
"""
redcon_set_keys = list() # keys of any Set objects
redcon_chains = list() # chains (_SetChainCommand instances)
non_redcon_sets = list() # any other objects that are not Sets
for item in self._items:
if isinstance(item, Set):
redcon_set_keys.append(item.key)
elif isinstance(item, _SetChainCommand):
redcon_chains.append(item)
else:
non_redcon_sets.append(item)
return self._exec(tuple(redcon_set_keys), tuple(redcon_chains), tuple(non_redcon_sets))
def __eq__(self, other):
# ==
result = self._prep_and_exec()
return result == other
def __ge__(self, other):
# >=
# Test whether every element in other is in the set
result = self._prep_and_exec()
return result >= other
def __gt__(self, other):
# >
# Test whether the set is a proper superset of other
# set >= other and set != other
result = self._prep_and_exec()
return result > other
def __le__(self, other):
# <=
# Test whether every element in the set is in other
result = self._prep_and_exec()
return result <= other
def __lt__(self, other):
# <
# Test whether the set is a proper subset of other
# set <= other and set != other
result = self._prep_and_exec()
return result < other
def __ne__(self, other):
# !=
result = self._prep_and_exec()
return result != other
def __iter__(self):
result = self._prep_and_exec()
return iter(result)
def __repr__(self):
keys = list()
non_redcons = list()
for item in self._items:
if isinstance(item, Set):
keys.append(repr(item.key))
else:
non_redcons.append(repr(item))
keys_str = ', '.join(keys)
non_redcons_str = ', '.join(non_redcons)
return '{}(keys=[{}], others=[{}])'.format(
self.__class__.__name__,
keys_str,
non_redcons_str
)
class SetUnionChain(_SetChainCommand):
"""
Returned when performing a bitwise or (``|``) on an instance of
:class:`.Set`.
"""
def _exec(self, redcon_set_keys, redcon_chains, non_redcon_sets):
union_result = self._redcon._execute(
'sunion',
redcon_set_keys[0],
*redcon_set_keys[1:]
)
for non_redcon in non_redcon_sets:
union_result = union_result.union(non_redcon)
return union_result
def __or__(self, other):
# |
# SUNION
if not isinstance(other, (set, SetUnionChain)):
raise TypeError(
'unsupported operand type(s) for |: {!r} and {!r}'.format(
Set.__name__, other.__class__.__name__
)
)
if isinstance(other, SetUnionChain):
self._items.extend(other._items)
else:
self._items.append(other)
return self
def __ror__(self, other):
# |
# SUNION
return self.__or__(other)
class SetIntersectionChain(_SetChainCommand):
"""
Returned when performing a bitwise and (``&``) on an instance of
:class:`.Set`.
"""
def _exec(self, redcon_set_keys, redcon_chains, non_redcon_sets):
if len(non_redcon_sets) > 0:
all_keys = list(redcon_set_keys)
commands = list()
for item in non_redcon_sets:
temp_key = str(uuid.uuid4())
all_keys.append(temp_key)
commands.extend(self._items[0]._expiring_set_commands(temp_key, item))
commands.append(('sinter', tuple(all_keys)))
inter_result = self._redcon._pipeline(*commands)
return inter_result[-1]
# no non-Redcon sets
inter_result = self._redcon._execute(
'sinter',
redcon_set_keys[0],
*redcon_set_keys[1:]
)
return inter_result
def __and__(self, other):
# &
# SINTER
if not isinstance(other, (set, SetIntersectionChain)):
raise TypeError(
'unsupported operand type(s) for &: {!r} and {!r}'.format(
Set.__name__, other.__class__.__name__
)
)
if isinstance(other, SetIntersectionChain):
self._items.extend(other._items)
else:
self._items.append(other)
return self
def __rand__(self, other):
# &
# SINTER
return self.__and__(other)
class SetDifferenceChain(_SetChainCommand):
"""
Returned when performing subtraction (``-``) on an instance of
:class:`Set`.
"""
def _exec(self, redcon_set_keys, redcon_chains, non_redcon_sets):
for item in self._items:
pass
if len(non_redcon_sets) > 0:
all_keys = list(redcon_set_keys)
commands = list()
for item in non_redcon_sets:
temp_key = str(uuid.uuid4())
all_keys.append(temp_key)
commands.extend(self._items[0]._expiring_set_commands(temp_key, item))
commands.append(('sdiff', tuple(all_keys)))
diff_result = self._redcon._pipeline(*commands)
return diff_result[-1]
diff_result = self._redcon._execute(
'sdiff',
redcon_set_keys[0],
*redcon_set_keys[1:]
)
return diff_result
def __sub__(self, other):
# -
# SDIFF
if not isinstance(other, (set, SetDifferenceChain)):
raise TypeError(
'unsupported operand type(s) for -: {!r} and {!r}'.format(
Set.__name__, other.__class__.__name__
)
)
self._items.append(other)
return self
def __rsub__(self, other):
# -
# SDIFF
return self.__sub__(other)
| atatsu/redcon | redcon/types.py | Python | mit | 20,021 |
# Map bundle interfaces to member ports
for path, value in get("RootCfg.InterfaceConfiguration(*).BundleMember.ID"):
print('Bundle-Ether', value['BundleID'], ':', path['InterfaceName'])
| cisco/xr-telemetry-m2m-web | src/assets/scriptlets/70_bundles-basic.py | Python | apache-2.0 | 191 |
# coding: utf-8
"""
This script tests the traceback of a decorated template.
"""
__author__ = "Caleb Burns"
__version__ = "0.7.4"
__status__ = "Development"
import os.path
import pprint
import sys
import traceback
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../")))
import pdt
@pdt.template
def test_traceback():
this_will_cause_an_error()
class A:
@staticmethod
@pdt.template()
def test():
another_error()
def main(argv):
try:
test_traceback()
except:
traceback.print_exc()
try:
A.test()
except:
traceback.print_exc()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| cpburnz/python-decorated-templates | pdt/test/test_traceback.py | Python | mit | 665 |
encoders = []
for encoder in encoders:
encoder["case"] = "mixedcase"
| ohio813/alpha3 | x86/utf_16/mixedcase/__init__.py | Python | bsd-3-clause | 74 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'removeTopic.ui'
#
# Created: Mon Jul 4 22:26:07 2011
# by: PyQt4 UI code generator 4.7.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_removeTopic(object):
def setupUi(self, removeTopic):
removeTopic.setObjectName("removeTopic")
removeTopic.resize(422, 150)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/images/feedIO.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
removeTopic.setWindowIcon(icon)
self.gridLayout = QtGui.QGridLayout(removeTopic)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.topicListCombo = QtGui.QComboBox(removeTopic)
self.topicListCombo.setMinimumSize(QtCore.QSize(291, 0))
self.topicListCombo.setObjectName("topicListCombo")
self.verticalLayout.addWidget(self.topicListCombo)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btnRemove = QtGui.QPushButton(removeTopic)
self.btnRemove.setObjectName("btnRemove")
self.horizontalLayout.addWidget(self.btnRemove)
self.btnCancel = QtGui.QPushButton(removeTopic)
self.btnCancel.setWhatsThis("")
self.btnCancel.setObjectName("btnCancel")
self.horizontalLayout.addWidget(self.btnCancel)
self.verticalLayout.addLayout(self.horizontalLayout)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(removeTopic)
QtCore.QMetaObject.connectSlotsByName(removeTopic)
def retranslateUi(self, removeTopic):
removeTopic.setWindowTitle(QtGui.QApplication.translate("removeTopic", "Remove Topic", None, QtGui.QApplication.UnicodeUTF8))
self.btnRemove.setText(QtGui.QApplication.translate("removeTopic", "Remove", None, QtGui.QApplication.UnicodeUTF8))
self.btnCancel.setText(QtGui.QApplication.translate("removeTopic", "&Cancel", None, QtGui.QApplication.UnicodeUTF8))
import feedIOicons_rc
| seejay/feedIO | feedio/UI/removeTopic_ui.py | Python | gpl-3.0 | 2,356 |
__author__ = 'stefanperndl'
| zenith0/foodroller | foodroller/tests/__init__.py | Python | apache-2.0 | 28 |
import unittest
import sys
sys.path.insert(0, '..')
from lib.item import Item, Elem
from lib.organizem import Organizem, Conf
from lib.orgm_controller_base import ActionArg
TEST_DATA_FILE = "orgm_test.dat"
TEST_BAK_FILE = "orgm_test_bak.dat"
IS_UNIT_TESTING = True
Organizem(TEST_DATA_FILE, IS_UNIT_TESTING).setconf(Conf.BAK_FILE, TEST_BAK_FILE)
class OrganizemTestCase(unittest.TestCase):
# Helpers
def _init_test_data_file(self):
with open(TEST_DATA_FILE, 'w') as f:
item = Item("TEST_ITEM")
f.write(str(item))
# Tests
def test_init_item(self):
title = "title"
item = Item(title)
self.assertTrue(item != None)
self.assertTrue(isinstance(item, Item))
self.assertTrue(item.title == title)
def test_init_organizem(self):
self._init_test_data_file()
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
self.assertTrue(orgm != None)
self.assertTrue(isinstance(orgm, Organizem))
self.assertTrue(orgm.data_file == TEST_DATA_FILE)
def test_add_item__find_item_by_title(self):
self._init_test_data_file()
title = "title"
item = Item(title)
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.TITLE, title))
def test_add_item__find_rgx_item_by_title(self):
self._init_test_data_file()
title = "title"
rgx_match = "titl*"
item = Item(title)
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.TITLE, rgx_match, use_regex_match=True))
def test_add_item__find_items_by_area(self):
self._init_test_data_file()
title = "title"
area = "my area"
item = Item(title, {Elem.AREA : area})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.AREA, area))
def test_add_item__find_rgx_item_by_area(self):
self._init_test_data_file()
title = "title"
area = "area"
rgx_match = "are*"
item = Item(title, {Elem.AREA : area})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.AREA, rgx_match, use_regex_match=True))
def test_add_item__find_items_by_project(self):
self._init_test_data_file()
title = "title"
project = "my project"
item = Item(title, {Elem.PROJECT : project})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.PROJECT, project))
def test_add_item__find_rgx_items_by_project(self):
self._init_test_data_file()
title = "title"
project = "my project"
rgx_match = "my proj*"
item = Item(title, {Elem.PROJECT : project})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.PROJECT, rgx_match, use_regex_match=True))
def test_add_item__find_items_by_tags(self):
self._init_test_data_file()
title = "title"
# Test case of single-value passed to find_items() for a
# element that is stored in item as a list (tags)
tag1 = 'tag 1'
tags1 = [tag1]
item1 = Item(title, {Elem.TAGS : tags1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.TAGS, tag1))
# Test case of multi-value list passed to find_items() for a
# element that is stored in item as a list (tags)
tag2 = 'tag 2'
tags2 = [tag1, tag2]
item2 = Item(title, {Elem.TAGS : tags2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.TAGS, tag2))
self.assertTrue(orgm.find_items(Elem.TAGS, tags2))
def test_add_item__find_rgx_items_by_tags(self):
self._init_test_data_file()
title = "title"
# Test case of single-value passed to find_items() for a
# element that is stored in item as a list (tags)
tag1 = 'tag 1001'
tag1_rgx = 'tag 100*'
tags1 = [tag1]
item1 = Item(title, {Elem.TAGS : tags1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.TAGS, tag1_rgx, use_regex_match=True))
# Test case of multi-value list passed to find_items() for a
# element that is stored in item as a list (tags)
tag2 = 'tag 1012'
tag2_rgx = 'tag 101*'
tags2 = [tag1, tag2]
item2 = Item(title, {Elem.TAGS : tags2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.TAGS, tag2_rgx, use_regex_match=True))
def test_add_item__find_items_by_actions(self):
self._init_test_data_file()
title = "title"
action1 = 'action 100'
action1_rgx = 'action 10*'
actions1 = [action1]
# TODO FIX ALL THESE Itme() ctor calls
item1 = Item(title, {Elem.ACTIONS : actions1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action1_rgx, use_regex_match=True))
action2 = 'action 200'
actions2 = [action1, action2]
item2 = Item(title, {Elem.ACTIONS : actions2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action2))
self.assertTrue(orgm.find_items(Elem.ACTIONS, actions2))
def test_add_item__find_rgx_items_by_actions(self):
self._init_test_data_file()
title = "title"
# Test case of single-value passed to find_items() for a
# element that is stored in item as a list (tags)
action1 = 'action 1010'
action1_rgx = 'action 101*'
actions1 = [action1]
item1 = Item(title, {Elem.ACTIONS : actions1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action1_rgx, use_regex_match=True))
# Test case of multi-value list passed to find_items() for a
# element that is stored in item as a list (tags)
action2 = 'action 1020'
action2_rgx = 'action 102*'
actions2 = [action1, action2]
item2 = Item(title, {Elem.ACTIONS : actions2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action2_rgx, use_regex_match=True))
def test_add_item__find_items_by_priority(self):
self._init_test_data_file()
title = "title"
priority = "P1"
item = Item(title, {Elem.PRIORITY : priority})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.PRIORITY, priority))
def test_add_item__find_rgx_items_by_priority(self):
self._init_test_data_file()
title = "title"
priority = "P1"
rgx_match = "P*"
item = Item(title, {Elem.PRIORITY : priority})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.PRIORITY, rgx_match, use_regex_match=True))
def test_add_item__find_items_by_note(self):
self._init_test_data_file()
title = "title"
note = """* Support for reporting on metadata
** all titles (alpha order, due date order)
** all projects (alpha order)
** all areas (alpha order)
** all tags (alpha order)
** all actions (grouped by item, item next due date order)
http://www.snippy.com
ljalj;
a dafs asdfdsa wkwjl;qq;q;"""
item = Item(title, {Elem.NOTE : note})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.NOTE, note))
def test_add_item__find_rgx_items_by_note(self):
self._init_test_data_file()
title = "title"
note = """* Support for reporting on metadata
** all titles (alpha order, due date order)
** all projects (alpha order)
** all areas (alpha order)
** all tags (alpha order)
** all actions (grouped by item, item next due date order)
http://www.snippy.com
ljalj;
a dafs asdfdsa wkwjl;qq;q;"""
note_rgx = "\* Support for reporting *"
item = Item(title, {Elem.NOTE : note})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.NOTE, note_rgx, use_regex_match=True))
def test_remove_items_rgx_by_title(self):
self._init_test_data_file()
title = "title"
rgx_match = "titl*"
item = Item(title)
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.TITLE, rgx_match, use_regex_match=True))
# NOTE: Now remove the item and check that it's not there any more
orgm.remove_items(Elem.TITLE, rgx_match, use_regex_match=True)
self.assertFalse(orgm.find_items(Elem.TITLE, rgx_match, use_regex_match=True))
def test_remove_items_rgx_by_area(self):
self._init_test_data_file()
title = "title"
area = "area"
rgx_match = "are*"
item = Item(title, {Elem.AREA : area})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.AREA, rgx_match, use_regex_match=True))
orgm.remove_items(Elem.AREA, rgx_match, use_regex_match=True)
self.assertFalse(orgm.find_items(Elem.AREA, rgx_match, use_regex_match=True))
def test_remove_items_by_project(self):
self._init_test_data_file()
title = "title"
project = "project"
item = Item(title, {Elem.PROJECT : project})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.PROJECT, project))
orgm.remove_items(Elem.PROJECT, project)
self.assertFalse(orgm.find_items(Elem.PROJECT, project))
def test_remove_items_by_tags(self):
self._init_test_data_file()
title = "title"
tag1 = 'tag 1'
tags1 = [tag1]
item1 = Item(title, {Elem.TAGS : tags1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.TAGS, tag1))
orgm.remove_items(Elem.TAGS, tag1)
self.assertFalse(orgm.find_items(Elem.TAGS, tag1))
tag2 = 'tag 2'
tags2 = [tag1, tag2]
item2 = Item(title, {Elem.TAGS : tags2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.TAGS, tag2))
self.assertTrue(orgm.find_items(Elem.TAGS, tags2))
orgm.remove_items(Elem.TAGS, tags2)
self.assertFalse(orgm.find_items(Elem.TAGS, tags2))
def test_remove_items_rgx_by_actions(self):
self._init_test_data_file()
title = "title"
action1 = 'action 110'
rgx_match = "action 11*"
actions1 = [action1]
item1 = Item(title, {Elem.ACTIONS : actions1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action1))
orgm.remove_items(Elem.ACTIONS, rgx_match, use_regex_match=True)
self.assertFalse(orgm.find_items(Elem.ACTIONS, action1))
action2 = 'action 101'
rgx_match = "action 10*"
actions2 = [action1, action2]
item2 = Item(title, {Elem.ACTIONS : actions2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action2))
self.assertTrue(orgm.find_items(Elem.ACTIONS, actions2))
orgm.remove_items(Elem.ACTIONS, rgx_match, use_regex_match=True)
self.assertFalse(orgm.find_items(Elem.ACTIONS, actions2))
def test_remove_items_by_note(self):
self._init_test_data_file()
title = "title"
note = """* Support for reporting on metadata
** all titles (alpha order, due date order)
** all projects (alpha order)
** all areas (alpha order)
** all tags (alpha order)
** all actions (grouped by item, item next due date order)
http://www.snippy.com
ljalj;
a dafs asdfdsa wkwjl;qq;q;"""
item = Item(title, {Elem.NOTE : note})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.NOTE, note))
orgm.remove_items(Elem.NOTE, note)
self.assertFalse(orgm.find_items(Elem.NOTE, note))
def test_remove_items_rgx_by_note(self):
self._init_test_data_file()
title = "title"
note = """* Support for reporting on metadata
** all titles (alpha order, due date order)
** all projects (alpha order)
** all areas (alpha order)
** all tags (alpha order)
** all actions (grouped by item, item next due date order)
http://www.snippy.com
ljalj;
a dafs asdfdsa wkwjl;qq;q;"""
note_rgx = "\* Support for reporting *"
item = Item(title, {Elem.NOTE : note})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.NOTE, note_rgx, use_regex_match=True))
orgm.remove_items(Elem.NOTE, note_rgx, use_regex_match=True)
self.assertFalse(orgm.find_items(Elem.NOTE, note_rgx))
def test_get_all_titles(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
item1 = Item(title1)
item2 = Item(title2)
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
# Have to handle the fact that init of test dat file includes dummy item with "TEST_ITEM" title
self.assertTrue(orgm.get_elements(Elem.TITLE) == ['TEST_ITEM', 'title 1', 'title 2'])
def test_get_all_projects(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
project1 = 'project 1'
project2 = 'project 2'
item1 = Item(title1, {Elem.PROJECT : project1})
item2 = Item(title2, {Elem.PROJECT : project2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
expected = ["''", 'project 1', 'project 2']
actual = orgm.get_elements(Elem.PROJECT)
# Have to handle the fact that init of test dat file includes dummy item with empty name
self.assertTrue(expected == actual)
def test_get_all_areas(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
area1 = 'area 1'
area2 = 'area 2'
item1 = Item(title1, {Elem.AREA : area1})
item2 = Item(title2, {Elem.AREA : area2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
expected = ["''", 'area 1', 'area 2']
actual = orgm.get_elements(Elem.AREA)
# Have to handle the fact that init of test dat file includes dummy item with empty name
self.assertTrue(expected == actual)
def test_get_all_tags(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
tags1 = ['tag 1', 'tag 2']
tags2 = ['tag 3', 'tag 4']
item1 = Item(title1, {Elem.TAGS : tags1})
item2 = Item(title2, {Elem.TAGS : tags2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
expected = ['tag 1', 'tag 2', 'tag 3', 'tag 4']
actual = orgm.get_elements(Elem.TAGS)
# Have to handle the fact that init of test dat file includes dummy item with empty name
self.assertTrue(expected == actual)
def test_get_all_actions(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
actions1 = ['action 1', 'action 2']
actions2 = ['action 3', 'action 4']
item1 = Item(title1, {Elem.ACTIONS : actions1})
item2 = Item(title2, {Elem.ACTIONS : actions2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
expected = ['action 1', 'action 2', 'action 3', 'action 4']
actual = orgm.get_elements(Elem.ACTIONS)
# Have to handle the fact that init of test dat file includes dummy item with empty name
self.assertTrue(expected == actual)
def test_get_grouped_items_project(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
project1 = 'project 1'
project2 = 'project 2'
item1 = Item(title1, {Elem.PROJECT : project1})
item2 = Item(title2, {Elem.PROJECT : project2})
item3 = Item(title3, {Elem.PROJECT : project1})
item4 = Item(title4, {Elem.PROJECT : project2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
expected1 = repr([{'item' : [{'title': 'title 1'}, {'area': "''"}, {'project': 'project 1'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 3'}, {'area': "''"}, {'project': 'project 1'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
expected2 = repr([{'item' : [{'title': 'title 2'}, {'area': "''"}, {'project': 'project 2'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 4'}, {'area': "''"}, {'project': 'project 2'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
actual = orgm.get_grouped_items(Elem.PROJECT)
actual1 = repr(actual[project1])
actual2 = repr(actual[project2])
self.assertTrue(expected1 == actual1)
self.assertTrue(expected2 == actual2)
def test_get_grouped_items_area(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
area1 = 'area 1'
area2 = 'area 2'
item1 = Item(title1, {Elem.AREA : area1})
item2 = Item(title2, {Elem.AREA : area2})
item3 = Item(title3, {Elem.AREA : area1})
item4 = Item(title4, {Elem.AREA : area2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
expected1 = repr([{'item' : [{'title': 'title 1'}, {'area': 'area 1'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 3'}, {'area': 'area 1'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
expected2 = repr([{'item' : [{'title': 'title 2'}, {'area': 'area 2'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 4'}, {'area': 'area 2'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
actual = orgm.get_grouped_items(Elem.AREA)
actual1 = repr(actual[area1])
actual2 = repr(actual[area2])
self.assertTrue(expected1 == actual1)
self.assertTrue(expected2 == actual2)
def test_get_grouped_items_tags(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
tag1 = 'tag 1'
tag2 = 'tag 2'
tag3 = 'tag 3'
tag4 = 'tag 4'
tags1 = [tag1, tag2]
tags2 = [tag3, tag4]
item1 = Item(title1, {Elem.TAGS : tags1})
item2 = Item(title2, {Elem.TAGS : tags2})
item3 = Item(title3, {Elem.TAGS : tags1})
item4 = Item(title4, {Elem.TAGS : tags2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
expected1 = repr([{'item' : [{'title': 'title 1'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \
{'item' : [{'title': 'title 3'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
expected2 = repr([{'item' : [{'title': 'title 1'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \
{'item' : [{'title': 'title 3'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
expected3 = repr([{'item' : [{'title': 'title 2'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \
{'item' : [{'title': 'title 4'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
expected4 = repr([{'item' : [{'title': 'title 2'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \
{'item' : [{'title': 'title 4'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []},{'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
actual = orgm.get_grouped_items(Elem.TAGS)
actual1 = repr(actual[tag1])
actual2 = repr(actual[tag2])
actual3 = repr(actual[tag3])
actual4 = repr(actual[tag4])
self.assertTrue(expected1 == actual1)
self.assertTrue(expected2 == actual2)
self.assertTrue(expected3 == actual3)
self.assertTrue(expected4 == actual4)
def test_regroup_data_file_project(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
project1 = 'project 1'
project2 = 'project 2'
item1 = Item(title1, {Elem.PROJECT : project1})
item2 = Item(title2, {Elem.PROJECT : project2})
item3 = Item(title3, {Elem.PROJECT : project1})
item4 = Item(title4, {Elem.PROJECT : project2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
grouped_items = orgm.get_grouped_items(Elem.PROJECT)
new_data_file_str = orgm.regroup_data_file(Elem.PROJECT, ActionArg.ASCENDING, with_group_labels=False)
grouped_items_str = []
for group_key in grouped_items.keys():
for item in grouped_items[group_key]:
grouped_items_str.append(str(item))
grouped_items_str = "\n".join(grouped_items_str)
self.assertTrue(grouped_items_str == new_data_file_str)
def test_regroup_data_file_area(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
area1 = 'area 1'
area2 = 'area 2'
item1 = Item(title1, {Elem.AREA : area1})
item2 = Item(title2, {Elem.AREA : area2})
item3 = Item(title3, {Elem.AREA : area1})
item4 = Item(title4, {Elem.AREA : area2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
grouped_items = orgm.get_grouped_items(Elem.AREA)
new_data_file_str = orgm.regroup_data_file(Elem.AREA, ActionArg.ASCENDING, with_group_labels=False)
grouped_items_str = []
for group_key in grouped_items.keys():
for item in grouped_items[group_key]:
grouped_items_str.append(str(item))
grouped_items_str = "\n".join(grouped_items_str)
self.assertTrue(grouped_items_str == new_data_file_str)
def test_regroup_data_file_area_sort_desc(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
area1 = 'area 1'
area2 = 'area 2'
item1 = Item(title1, {Elem.AREA : area1})
item2 = Item(title2, {Elem.AREA : area2})
item3 = Item(title3, {Elem.AREA : area1})
item4 = Item(title4, {Elem.AREA : area2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
grouped_items = orgm.get_grouped_items(Elem.AREA)
new_data_file_str = orgm.regroup_data_file(Elem.AREA, ActionArg.DESCENDING, with_group_labels=False)
grouped_items_str = []
group_keys = grouped_items.keys()
group_keys.reverse()
for group_key in group_keys:
for item in grouped_items[group_key]:
grouped_items_str.append(str(item))
grouped_items_str = "\n".join(grouped_items_str)
self.assertTrue(grouped_items_str == new_data_file_str)
def test_regroup_data_file_tags(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
tag1 = 'tag 1'
tag2 = 'tag 2'
tag3 = 'tag 3'
tag4 = 'tag 4'
tags1 = [tag1, tag2]
tags2 = [tag3, tag4]
item1 = Item(title1, {Elem.TAGS : tags1})
item2 = Item(title2, {Elem.TAGS : tags2})
item3 = Item(title3, {Elem.TAGS : tags1})
item4 = Item(title4, {Elem.TAGS : tags2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
grouped_items = orgm.get_grouped_items(Elem.TAGS)
new_data_file_str = orgm.regroup_data_file(Elem.TAGS, ActionArg.ASCENDING, with_group_labels=False)
grouped_items_str = []
for group_key in grouped_items.keys():
for item in grouped_items[group_key]:
grouped_items_str.append(str(item))
grouped_items_str = "\n".join(grouped_items_str)
self.assertTrue(grouped_items_str == new_data_file_str)
def test_regroup_data_file_tags_sort_desc(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
tag1 = 'tag 1'
tag2 = 'tag 2'
tag3 = 'tag 3'
tag4 = 'tag 4'
tags1 = [tag1, tag2]
tags2 = [tag3, tag4]
item1 = Item(title1, {Elem.TAGS : tags1})
item2 = Item(title2, {Elem.TAGS : tags2})
item3 = Item(title3, {Elem.TAGS : tags1})
item4 = Item(title4, {Elem.TAGS : tags2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
grouped_items = orgm.get_grouped_items(Elem.TAGS)
new_data_file_str = orgm.regroup_data_file(Elem.TAGS, ActionArg.DESCENDING, with_group_labels=False)
grouped_items_str = []
group_keys = grouped_items.keys()
group_keys.reverse()
for group_key in group_keys:
for item in grouped_items[group_key]:
grouped_items_str.append(str(item))
grouped_items_str = "\n".join(grouped_items_str)
self.assertTrue(grouped_items_str == new_data_file_str)
def test_backup(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
tag1 = 'tag 1'
tag2 = 'tag 2'
tag3 = 'tag 3'
tag4 = 'tag 4'
tags1 = [tag1, tag2]
tags2 = [tag3, tag4]
item1 = Item(title1, {Elem.TAGS : tags1})
item2 = Item(title2, {Elem.TAGS : tags2})
item3 = Item(title3, {Elem.TAGS : tags1})
item4 = Item(title4, {Elem.TAGS : tags2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
bak_data_file = 'orgm_test.dat_bak'
orgm.backup(bak_data_file)
import filecmp
filecmp.cmp(TEST_DATA_FILE, bak_data_file)
# NOTE: This is a maual test, no assert(). User must look at TEST_DATA_FILE
# and confirm there is a new empty item
def test_add_empty(self):
self._init_test_data_file()
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_empty()
#def test_add_item__find_item_by_title__cli(self):
# self._init_test_data_file()
# orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
# title = 'my item title'
# cmd = ['-- add', '--title', title]
# orgm.run_shell_cmd(cmd)
# self.assertTrue(orgm.find_items(Elem.TITLE, title))
if __name__ == '__main__':
unittest.main() | marksweiss/organize-m | test/organizem_test.py | Python | mit | 30,369 |
#coding=utf-8
from flask import Flask
app = Flask(__name__)
app.debug = True
@app.route('/', methods=['GET'])
def index():
return '<p>Hello world</p>'
if __name__ == "__main__":
app.run()
| Ju2ender/Python-E | a-byte-of-python/app.py | Python | mit | 200 |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
from resource_management.libraries.functions import get_unique_id_and_date
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hcat_service_check():
import params
smoke_cmd = os.path.join(params.hdp_root, "Run-SmokeTests.cmd")
service = "HCatalog"
Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hcat_user, logoutput=True)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hcat_service_check():
import params
unique = get_unique_id_and_date()
output_file = format("{hive_apps_whs_dir}/hcatsmoke{unique}")
test_cmd = format("fs -test -e {output_file}")
if params.security_enabled:
kinit_cmd = format(
"{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
else:
kinit_cmd = ""
File(format("{tmp_dir}/hcatSmoke.sh"),
content=StaticFile("hcatSmoke.sh"),
mode=0755
)
prepare_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} prepare {purge_tables}")
exec_path = params.execute_path
if params.version and params.stack_name:
upgrade_hive_bin = format("/usr/hdp/{version}/hive/bin")
exec_path = os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin
Execute(prepare_cmd,
tries=3,
user=params.smokeuser,
try_sleep=5,
path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
logoutput=True)
if params.security_enabled:
ExecuteHadoop(test_cmd,
user=params.hdfs_user,
logoutput=True,
conf_dir=params.hadoop_conf_dir,
security_enabled=params.security_enabled,
kinit_path_local=params.kinit_path_local,
keytab=params.hdfs_user_keytab,
principal=params.hdfs_principal_name,
bin_dir=params.execute_path)
else:
ExecuteHadoop(test_cmd,
user=params.hdfs_user,
logoutput=True,
conf_dir=params.hadoop_conf_dir,
security_enabled=params.security_enabled,
kinit_path_local=params.kinit_path_local,
keytab=params.hdfs_user_keytab,
bin_dir=params.execute_path
)
cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup {purge_tables}")
Execute(cleanup_cmd,
tries=3,
user=params.smokeuser,
try_sleep=5,
path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
logoutput=True)
| zouzhberk/ambaridemo | demo-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_service_check.py | Python | apache-2.0 | 3,624 |
ELECTRONIC = 0
NON_ELECTRONIC = 1
ALIAS = 2
def as_choices_tuple():
return ((ELECTRONIC, 'Electronic'),
(NON_ELECTRONIC, 'Non electronic'),
(ALIAS, 'Alias'))
| vegarang/devilry-django | devilry/apps/core/models/deliverytypes.py | Python | bsd-3-clause | 187 |
def revrot(s, size):
if size <= 0 or not s or size > len(s):
return ''
result = ''
for i in range(0, len(s), size):
chunk = s[i:i+size]
if len(chunk) != size:
break
if sum(int(digit)**3 for digit in chunk) % 2 == 0:
result += chunk[::-1]
else:
result += chunk[1:] + chunk[:1]
return result
| VladKha/CodeWars | 6 kyu/Reverse or rotate?/solve.py | Python | gpl-3.0 | 384 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage to S3 operator."""
import warnings
from typing import Dict, Iterable, List, Optional, Sequence, Union, cast
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
class GCSToS3Operator(BaseOperator):
"""
Synchronizes a Google Cloud Storage bucket with an S3 bucket.
:param bucket: The Google Cloud Storage bucket to find the objects. (templated)
:type bucket: str
:param prefix: Prefix string which filters objects whose name begin with
this prefix. (templated)
:type prefix: str
:param delimiter: The delimiter by which you want to filter the objects. (templated)
For e.g to lists the CSV files from in a directory in GCS you would use
delimiter='.csv'.
:type delimiter: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: Google account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param dest_aws_conn_id: The destination S3 connection
:type dest_aws_conn_id: str
:param dest_s3_key: The base S3 key to be used to store the files. (templated)
:type dest_s3_key: str
:param dest_verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type dest_verify: bool or str
:param replace: Whether or not to verify the existence of the files in the
destination bucket.
By default is set to False
If set to True, will upload all the files replacing the existing ones in
the destination bucket.
If set to False, will upload only the files that are in the origin but not
in the destination bucket.
:type replace: bool
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Union[str, Sequence[str]]
:param s3_acl_policy: Optional The string to specify the canned ACL policy for the
object to be uploaded in S3
:type s3_acl_policy: str
"""
template_fields: Iterable[str] = (
'bucket',
'prefix',
'delimiter',
'dest_s3_key',
'google_impersonation_chain',
)
ui_color = '#f0eee4'
def __init__(
self,
*,
bucket: str,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
dest_aws_conn_id: str = 'aws_default',
dest_s3_key: str,
dest_verify: Optional[Union[str, bool]] = None,
replace: bool = False,
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
dest_s3_extra_args: Optional[Dict] = None,
s3_acl_policy: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = google_cloud_storage_conn_id
self.bucket = bucket
self.prefix = prefix
self.delimiter = delimiter
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.dest_aws_conn_id = dest_aws_conn_id
self.dest_s3_key = dest_s3_key
self.dest_verify = dest_verify
self.replace = replace
self.google_impersonation_chain = google_impersonation_chain
self.dest_s3_extra_args = dest_s3_extra_args or {}
self.s3_acl_policy = s3_acl_policy
def execute(self, context) -> List[str]:
# list all files in an Google Cloud Storage bucket
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.google_impersonation_chain,
)
self.log.info(
'Getting list of the files. Bucket: %s; Delimiter: %s; Prefix: %s',
self.bucket,
self.delimiter,
self.prefix,
)
files = hook.list(bucket_name=self.bucket, prefix=self.prefix, delimiter=self.delimiter)
s3_hook = S3Hook(
aws_conn_id=self.dest_aws_conn_id, verify=self.dest_verify, extra_args=self.dest_s3_extra_args
)
if not self.replace:
# if we are not replacing -> list all files in the S3 bucket
# and only keep those files which are present in
# Google Cloud Storage and not in S3
bucket_name, prefix = S3Hook.parse_s3_url(self.dest_s3_key)
# look for the bucket and the prefix to avoid look into
# parent directories/keys
existing_files = s3_hook.list_keys(bucket_name, prefix=prefix)
# in case that no files exists, return an empty array to avoid errors
existing_files = existing_files if existing_files is not None else []
# remove the prefix for the existing files to allow the match
existing_files = [file.replace(prefix, '', 1) for file in existing_files]
files = list(set(files) - set(existing_files))
if files:
for file in files:
file_bytes = hook.download(object_name=file, bucket_name=self.bucket)
dest_key = self.dest_s3_key + file
self.log.info("Saving file to %s", dest_key)
s3_hook.load_bytes(
cast(bytes, file_bytes), key=dest_key, replace=self.replace, acl_policy=self.s3_acl_policy
)
self.log.info("All done, uploaded %d files to S3", len(files))
else:
self.log.info("In sync, no files needed to be uploaded to S3")
return files
| apache/incubator-airflow | airflow/providers/amazon/aws/transfers/gcs_to_s3.py | Python | apache-2.0 | 8,283 |
from django.contrib import admin
from .models import Message
admin.site.register(Message)
| Pylvax/django | project/starter_app/admin.py | Python | mit | 92 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='response',
field=models.CharField(default=b'', max_length=4, null=True, blank=True),
),
]
| erickdom/restAndroid | transactions/migrations/0002_transaction_response.py | Python | apache-2.0 | 436 |
# ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import pytest
from twitter.common.zookeeper.serverset.endpoint import Endpoint, ServiceInstance, Status
def test_endpoint_constructor():
# Check that those do not throw
Endpoint('host', 8340)
Endpoint('host', 8340, '1.2.3.4')
Endpoint('host', 8340, None, '2001:db8:1234:ffff:ffff:ffff:ffff:ffff')
Endpoint('host', 8340, '1.2.3.4', '2001:db8:1234:ffff:ffff:ffff:ffff:ffff')
with pytest.raises(ValueError):
Endpoint('host', 8340, 'not an IP')
with pytest.raises(ValueError):
Endpoint('host', 8340, None, 'not an IPv6')
def test_endpoint_equality():
assert Endpoint('host', 8340) == Endpoint('host', 8340)
assert Endpoint('host', 8340, '1.2.3.4') == Endpoint('host', 8340, '1.2.3.4')
assert (Endpoint('host', 8340, '1.2.3.4', '2001:db8:1234:ffff:ffff:ffff:ffff:ffff')
== Endpoint('host', 8340, '1.2.3.4', '2001:db8:1234:ffff:ffff:ffff:ffff:ffff'))
assert (Endpoint('host', 8340, None, '2001:db8:1234:ffff:ffff:ffff:ffff:ffff')
== Endpoint('host', 8340, None, '2001:db8:1234:ffff:ffff:ffff:ffff:ffff'))
def test_endpoint_hash_equality():
assert Endpoint('host', 8340).__hash__() == Endpoint('host', 8340).__hash__()
assert Endpoint('host', 8340, '1.2.3.4').__hash__() == Endpoint('host', 8340, '1.2.3.4').__hash__()
assert (Endpoint('host', 8340, '1.2.3.4', '2001:db8:1234:ffff:ffff:ffff:ffff:ffff').__hash__()
== Endpoint('host', 8340, '1.2.3.4', '2001:db8:1234:ffff:ffff:ffff:ffff:ffff').__hash__())
assert (Endpoint('host', 8340, None, '2001:db8:1234:ffff:ffff:ffff:ffff:ffff').__hash__()
== Endpoint('host', 8340, None, '2001:db8:1234:ffff:ffff:ffff:ffff:ffff').__hash__())
def test_endpoint_inequality():
assert Endpoint('host', 8340) != Endpoint('xhost', 8340)
assert Endpoint('host', 8340) != Endpoint('host', 8341)
assert (Endpoint('host', 8340, '1.2.3.4', '2001:db8:1234:ffff:ffff:ffff:ffff:ffff')
!= Endpoint('host', 8340, '5.6.7.8', '2001:db8:5678:ffff:ffff:ffff:ffff:ffff'))
assert (Endpoint('host', 8340, None, '2001:db8:1234:ffff:ffff:ffff:ffff:ffff')
!= Endpoint('host', 8340, None, '2001:db8:5678:ffff:ffff:ffff:ffff:ffff'))
def test_endpoint_hash_inequality():
assert Endpoint('host', 8340).__hash__() != Endpoint('xhost', 8341).__hash__()
assert Endpoint('host', 8340).__hash__() != Endpoint('host', 8341).__hash__()
assert (Endpoint('host', 8340, '1.2.3.4', '2001:db8:1234:ffff:ffff:ffff:ffff:ffff').__hash__()
!= Endpoint('host', 8340, '5.6.7.8', '2001:db8:5678:ffff:ffff:ffff:ffff:ffff').__hash__())
assert (Endpoint('host', 8340, None, '2001:db8:1234:ffff:ffff:ffff:ffff:ffff').__hash__()
!= Endpoint('host', 8340, None, '2001:db8:5678:ffff:ffff:ffff:ffff:ffff').__hash__())
def test_endpoint_from_dict():
expected = {
Endpoint('smfd-akb-12-sr1', 31181): {'host': 'smfd-akb-12-sr1', 'port': 31181},
Endpoint('smfd-akb-12-sr1', 31181, '1.2.3.4'): {'host': 'smfd-akb-12-sr1', 'port': 31181, 'inet': '1.2.3.4'},
Endpoint('smfd-akb-12-sr1', 31181, '1.2.3.4', '2001:db8:5678:ffff:ffff:ffff:ffff:ffff'):
{'host': 'smfd-akb-12-sr1', 'port': 31181, 'inet': '1.2.3.4', 'inet6':
'2001:db8:5678:ffff:ffff:ffff:ffff:ffff'},
Endpoint('smfd-akb-12-sr1', 31181, None, '2001:db8:5678:ffff:ffff:ffff:ffff:ffff'):
{'host': 'smfd-akb-12-sr1', 'port': 31181, 'inet6': '2001:db8:5678:ffff:ffff:ffff:ffff:ffff'}
}
for (endpoint, dic) in expected.items():
assert Endpoint.to_dict(endpoint) == dic
assert Endpoint.from_dict(dic) == endpoint
def test_status_equality():
assert Status.from_string('DEAD') == Status.from_string('DEAD')
def test_status_hash_equality():
assert Status.from_string('DEAD').__hash__() == Status.from_string('DEAD').__hash__()
def test_status_inequality():
assert Status.from_string('DEAD') != Status.from_string('STARTING')
def test_status_hash_inequality():
assert Status.from_string('DEAD').__hash__() != Status.from_string('STARTING').__hash__()
def _service_instance(vals):
json = '''{
"additionalEndpoints": {
"aurora": {
"host": "smfd-akb-%d-sr1.devel.twitter.com",
"port": 31181
},
"health": {
"host": "smfd-akb-%d-sr1.devel.twitter.com",
"port": 31181
}
},
"serviceEndpoint": {
"host": "smfd-akb-%d-sr1.devel.twitter.com",
"port": 31181
},
"shard": %d,
"status": "ALIVE"
}''' % vals
return ServiceInstance.unpack(json)
def test_service_instance_equality():
vals = (1, 2, 3, 4)
assert _service_instance(vals) == _service_instance(vals)
def test_service_instance_hash_equality():
vals = (1, 2, 3, 4)
assert _service_instance(vals).__hash__() == _service_instance(vals).__hash__()
def test_service_instance_inequality():
vals = (1, 2, 3, 4)
vals2 = (5, 6, 7, 8)
assert _service_instance(vals) != _service_instance(vals2)
def test_service_instance_hash_inequality():
vals = (1, 2, 3, 4)
vals2 = (5, 6, 7, 8)
assert _service_instance(vals).__hash__() != _service_instance(vals2).__hash__()
def test_service_instance_to_json():
json = """{
"additionalEndpoints": {
"aurora": {
"host": "hostname",
"inet6": "2001:db8:1234:ffff:ffff:ffff:ffff:ffff",
"port": 22
},
"health": {
"host": "hostname",
"inet": "1.2.3.4",
"port": 23
},
"http": {
"host": "hostname",
"inet": "1.2.3.4",
"inet6": "2001:db8:1234:ffff:ffff:ffff:ffff:ffff",
"port": 23
}
},
"serviceEndpoint": {
"host": "hostname",
"port": 24
},
"shard": 1,
"status": "ALIVE"
}"""
service_instance = ServiceInstance(
Endpoint("hostname", 24),
{"aurora": Endpoint("hostname", 22, "1.2.3.4"),
"health": Endpoint("hostname", 23, None, "2001:db8:1234:ffff:ffff:ffff:ffff:ffff"),
"http": Endpoint("hostname", 23, "1.2.3.4", "2001:db8:1234:ffff:ffff:ffff:ffff:ffff"),
},
'ALIVE',
1
)
assert ServiceInstance.unpack(json) == service_instance
assert ServiceInstance.unpack(ServiceInstance.pack(service_instance)) == service_instance
| ericzundel/commons | tests/python/twitter/common/zookeeper/serverset/test_endpoint.py | Python | apache-2.0 | 7,118 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.