repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
wrouesnel/ansible | lib/ansible/plugins/callback/full_skip.py | 25 | 2290 | # (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: full_skip
type: stdout
short_description: suppresses tasks if all hosts skipped
description:
- Use this plugin when you dont care about any output for tasks that were completly skipped
version_added: "2.4"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuation
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'full_skip'
def v2_runner_on_skipped(self, result):
self.outlines = []
def v2_playbook_item_on_skipped(self, result):
self.outlines = []
def v2_runner_item_on_skipped(self, result):
self.outlines = []
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display()
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
def v2_playbook_on_task_start(self, task, is_conditional):
self.outlines = []
self.outlines.append("TASK [%s]" % task.get_name().strip())
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self.outlines.append("task path: %s" % path)
def v2_playbook_item_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_playbook_item_on_ok(result)
def v2_runner_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_runner_on_ok(result)
def display(self):
if len(self.outlines) == 0:
return
(first, rest) = self.outlines[0], self.outlines[1:]
self._display.banner(first)
for line in rest:
self._display.display(line)
self.outlines = []
| gpl-3.0 | 5,615,647,055,135,321,000 | 30.805556 | 97 | 0.647162 | false |
Sergiojimenez/criticas_del_doctor_Mabuse | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/PRESUBMIT.py | 1369 | 3662 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Possible unbalanced tuple unpacking with sequence.
'W0632',
# Attempting to unpack a non-sequence.
'W0633',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# map/filter on lambda could be replaced by comprehension.
'W0110',
# Use of eval.
'W0123',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Cyclic import.
'R0401',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
TRYBOTS = [
'linux_try',
'mac_try',
'win_try',
]
def GetPreferredTryMasters(_, change):
return {
'client.gyp': { t: set(['defaulttests']) for t in TRYBOTS },
}
| mit | -6,909,142,546,696,017,000 | 25.729927 | 77 | 0.623157 | false |
jithinbp/pslab-desktop-apps | psl_res/GUI/Z_SCHOOL_LEVEL/A_voltage_fundamentals/K_LDR.py | 2 | 3951 | #!/usr/bin/python
"""
::
This experiment is used to study........
"""
from __future__ import print_function
from PSL_Apps.utilitiesClass import utilitiesClass
from PSL_Apps.templates import ui_template_graph_nofft as template_graph_nofft
import numpy as np
from PyQt4 import QtGui,QtCore
import pyqtgraph as pg
import sys,functools,time
params = {
'image' : 'ldr.png',
'name':"Light Dependent\nResistor",
'hint':'''
Observe the workings of a light dependent Resistor.<br>
Use it to study the 50 Hz fluctuation of flourescent lamps.
'''
}
class AppWindow(QtGui.QMainWindow, template_graph_nofft.Ui_MainWindow,utilitiesClass):
def __init__(self, parent=None,**kwargs):
super(AppWindow, self).__init__(parent)
self.setupUi(self)
from PSL.analyticsClass import analyticsClass
self.math = analyticsClass()
self.I=kwargs.get('I',None)
self.setWindowTitle(self.I.H.version_string+' : '+params.get('name','').replace('\n',' ') )
self.plot=self.add2DPlot(self.plot_area,enableMenu=False)
self.enableCrossHairs(self.plot)
labelStyle = {'color': 'rgb(255,255,255)', 'font-size': '11pt'}
self.plot.setLabel('left','Resistance', units=u"\u03A9",**labelStyle)
self.plot.setLabel('bottom','Time', units='S',**labelStyle)
self.tg=30.
self.max_samples=1000
self.samples = self.max_samples
self.plot.setLimits(yMax=50e3,yMin=0,xMin=0,xMax=1e-6*self.tg*self.samples)
self.plot.setYRange(1e3,30e3)
self.timer = self.newTimer()
self.legend = self.plot.addLegend(offset=(-10,30))
self.curve1 = self.addCurve(self.plot,'RESISTANCE (SEN)')
self.WidgetLayout.setAlignment(QtCore.Qt.AlignLeft)
#Control widgets
self.sqr = self.dialIcon(TITLE='SQR1',MIN=10,MAX=300,FUNC=self.I.sqr1,UNITS='Hz',TOOLTIP='Frequency of square wave generator #1\n0 for switched off, Max for On state')
self.WidgetLayout.addWidget(self.sqr)
self.voltmeter = self.displayIcon(TITLE = 'Average Resistance',UNITS=u"\u03A9",TOOLTIP='')
self.WidgetLayout.addWidget(self.voltmeter)
self.addPauseButton(self.bottomLayout,self.pause)
self.running=True
self.paused=False
self.timer.singleShot(100,self.run)
def pause(self,v):
self.paused = v
def run(self):
if not self.running: return
if self.paused:
self.timer.singleShot(100,self.run)
return
try:
self.I.capture_traces(1,self.samples,self.tg,'SEN',trigger=False)
if self.running:self.timer.singleShot(self.samples*self.I.timebase*1e-3+10,self.plotData)
except:
pass
def plotData(self):
if not self.running: return
try:
n=0
while(not self.I.oscilloscope_progress()[0]):
time.sleep(0.1)
n+=1
if n>10:
self.timer.singleShot(100,self.run)
return
self.I.__fetch_channel__(1)
V = np.clip(self.I.achans[0].get_yaxis(),0,3.2)
I = (3.3-V)/5.1e3
R = V/I
self.curve1.setData(self.I.achans[0].get_xaxis()*1e-6,R,connect='finite')
self.voltmeter.setValue(self.math.RMS(R))
self.displayCrossHairData(self.plot,False,self.samples,self.I.timebase,[V],[(0,255,0)])
if self.running:self.timer.singleShot(100,self.run)
except Exception as e:
print (e)
def crossHairEvent(self,plot,evt):
pos = evt[0].scenePos() ## using signal proxy turns original arguments into a tuple
if plot.sceneBoundingRect().contains(pos):
plot.mousePoint = plot.getPlotItem().vb.mapSceneToView(pos)
plot.vLine.setPos(plot.mousePoint.x())
plot.hLine.setPos(plot.mousePoint.y())
self.displayCrossHairData(plot,False,self.samples,self.I.timebase,[self.I.achans[0].get_yaxis()],[(0,255,0)])
def saveData(self):
self.saveDataWindow([self.curve1],self.plot)
def closeEvent(self, event):
self.running=False
self.timer.stop()
self.finished=True
def __del__(self):
self.timer.stop()
print('bye')
if __name__ == "__main__":
from PSL import sciencelab
app = QtGui.QApplication(sys.argv)
myapp = AppWindow(I=sciencelab.connect())
myapp.show()
sys.exit(app.exec_())
| gpl-3.0 | -426,849,058,224,252,200 | 28.266667 | 169 | 0.703872 | false |
proxysh/Safejumper-for-Mac | buildlinux/env32/lib/python2.7/site-packages/Crypto/PublicKey/pubkey.py | 125 | 8221 | #
# pubkey.py : Internal functions for public key operations
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew Kuchling, Paul Swartz, and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
#
__revision__ = "$Id$"
import types, warnings
from Crypto.Util.number import *
# Basic public key class
class pubkey:
"""An abstract class for a public key object.
:undocumented: __getstate__, __setstate__, __eq__, __ne__, validate
"""
def __init__(self):
pass
def __getstate__(self):
"""To keep key objects platform-independent, the key data is
converted to standard Python long integers before being
written out. It will then be reconverted as necessary on
restoration."""
d=self.__dict__
for key in self.keydata:
if d.has_key(key): d[key]=long(d[key])
return d
def __setstate__(self, d):
"""On unpickling a key object, the key data is converted to the big
number representation being used, whether that is Python long
integers, MPZ objects, or whatever."""
for key in self.keydata:
if d.has_key(key): self.__dict__[key]=bignum(d[key])
def encrypt(self, plaintext, K):
"""Encrypt a piece of data.
:Parameter plaintext: The piece of data to encrypt.
:Type plaintext: byte string or long
:Parameter K: A random parameter required by some algorithms
:Type K: byte string or long
:Return: A tuple with two items. Each item is of the same type as the
plaintext (string or long).
"""
wasString=0
if isinstance(plaintext, types.StringType):
plaintext=bytes_to_long(plaintext) ; wasString=1
if isinstance(K, types.StringType):
K=bytes_to_long(K)
ciphertext=self._encrypt(plaintext, K)
if wasString: return tuple(map(long_to_bytes, ciphertext))
else: return ciphertext
def decrypt(self, ciphertext):
"""Decrypt a piece of data.
:Parameter ciphertext: The piece of data to decrypt.
:Type ciphertext: byte string, long or a 2-item tuple as returned by `encrypt`
:Return: A byte string if ciphertext was a byte string or a tuple
of byte strings. A long otherwise.
"""
wasString=0
if not isinstance(ciphertext, types.TupleType):
ciphertext=(ciphertext,)
if isinstance(ciphertext[0], types.StringType):
ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1
plaintext=self._decrypt(ciphertext)
if wasString: return long_to_bytes(plaintext)
else: return plaintext
def sign(self, M, K):
"""Sign a piece of data.
:Parameter M: The piece of data to encrypt.
:Type M: byte string or long
:Parameter K: A random parameter required by some algorithms
:Type K: byte string or long
:Return: A tuple with two items.
"""
if (not self.has_private()):
raise TypeError('Private key not available in this object')
if isinstance(M, types.StringType): M=bytes_to_long(M)
if isinstance(K, types.StringType): K=bytes_to_long(K)
return self._sign(M, K)
def verify (self, M, signature):
"""Verify the validity of a signature.
:Parameter M: The expected message.
:Type M: byte string or long
:Parameter signature: The signature to verify.
:Type signature: tuple with two items, as return by `sign`
:Return: True if the signature is correct, False otherwise.
"""
if isinstance(M, types.StringType): M=bytes_to_long(M)
return self._verify(M, signature)
# alias to compensate for the old validate() name
def validate (self, M, signature):
warnings.warn("validate() method name is obsolete; use verify()",
DeprecationWarning)
def blind(self, M, B):
"""Blind a message to prevent certain side-channel attacks.
:Parameter M: The message to blind.
:Type M: byte string or long
:Parameter B: Blinding factor.
:Type B: byte string or long
:Return: A byte string if M was so. A long otherwise.
"""
wasString=0
if isinstance(M, types.StringType):
M=bytes_to_long(M) ; wasString=1
if isinstance(B, types.StringType): B=bytes_to_long(B)
blindedmessage=self._blind(M, B)
if wasString: return long_to_bytes(blindedmessage)
else: return blindedmessage
def unblind(self, M, B):
"""Unblind a message after cryptographic processing.
:Parameter M: The encoded message to unblind.
:Type M: byte string or long
:Parameter B: Blinding factor.
:Type B: byte string or long
"""
wasString=0
if isinstance(M, types.StringType):
M=bytes_to_long(M) ; wasString=1
if isinstance(B, types.StringType): B=bytes_to_long(B)
unblindedmessage=self._unblind(M, B)
if wasString: return long_to_bytes(unblindedmessage)
else: return unblindedmessage
# The following methods will usually be left alone, except for
# signature-only algorithms. They both return Boolean values
# recording whether this key's algorithm can sign and encrypt.
def can_sign (self):
"""Tell if the algorithm can deal with cryptographic signatures.
This property concerns the *algorithm*, not the key itself.
It may happen that this particular key object hasn't got
the private information required to generate a signature.
:Return: boolean
"""
return 1
def can_encrypt (self):
"""Tell if the algorithm can deal with data encryption.
This property concerns the *algorithm*, not the key itself.
It may happen that this particular key object hasn't got
the private information required to decrypt data.
:Return: boolean
"""
return 1
def can_blind (self):
"""Tell if the algorithm can deal with data blinding.
This property concerns the *algorithm*, not the key itself.
It may happen that this particular key object hasn't got
the private information required carry out blinding.
:Return: boolean
"""
return 0
# The following methods will certainly be overridden by
# subclasses.
def size (self):
"""Tell the maximum number of bits that can be handled by this key.
:Return: int
"""
return 0
def has_private (self):
"""Tell if the key object contains private components.
:Return: bool
"""
return 0
def publickey (self):
"""Construct a new key carrying only the public information.
:Return: A new `pubkey` object.
"""
return self
def __eq__ (self, other):
"""__eq__(other): 0, 1
Compare us to other for equality.
"""
return self.__getstate__() == other.__getstate__()
def __ne__ (self, other):
"""__ne__(other): 0, 1
Compare us to other for inequality.
"""
return not self.__eq__(other)
| gpl-2.0 | -7,466,906,361,580,820,000 | 33.254167 | 86 | 0.619389 | false |
eMerzh/Diamond-1 | src/diamond/handler/g_metric.py | 52 | 2760 | # coding=utf-8
"""
Emulate a gmetric client for usage with
[Ganglia Monitoring System](http://ganglia.sourceforge.net/)
"""
from Handler import Handler
import logging
try:
import gmetric
except ImportError:
gmetric = None
class GmetricHandler(Handler):
"""
Implements the abstract Handler class, sending data the same way that
gmetric does.
"""
def __init__(self, config=None):
"""
Create a new instance of the GmetricHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
if gmetric is None:
logging.error("Failed to load gmetric module")
return
# Initialize Data
self.socket = None
# Initialize Options
self.host = self.config['host']
self.port = int(self.config['port'])
self.protocol = self.config['protocol']
if not self.protocol:
self.protocol = 'udp'
# Initialize
self.gmetric = gmetric.Gmetric(self.host, self.port, self.protocol)
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GmetricHandler, self).get_default_config_help()
config.update({
'host': 'Hostname',
'port': 'Port',
'protocol': 'udp or tcp',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GmetricHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8651,
'protocol': 'udp',
})
return config
def __del__(self):
"""
Destroy instance of the GmetricHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to a gmond instance
"""
# Just send the data as a string
self._send(metric)
def _send(self, metric):
"""
Send data to gmond.
"""
metric_name = self.get_name_from_path(metric.path)
tmax = "60"
dmax = "0"
slope = "both"
# FIXME: Badness, shouldn't *assume* double type
metric_type = "double"
units = ""
group = ""
self.gmetric.send(metric_name,
metric.value,
metric_type,
units,
slope,
tmax,
dmax,
group)
def _close(self):
"""
Close the connection
"""
self.gmetric = None
| mit | -7,917,457,484,803,976,000 | 23.642857 | 76 | 0.514493 | false |
groschovskiy/lerigos_music | Server/API/lib/bson/min_key.py | 55 | 1324 | # Copyright 2010-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representation for the MongoDB internal MinKey type.
"""
class MinKey(object):
"""MongoDB internal MinKey type.
.. versionchanged:: 2.7
``MinKey`` now implements comparison operators.
"""
_type_marker = 255
def __eq__(self, other):
return isinstance(other, MinKey)
def __hash__(self):
return hash(self._type_marker)
def __ne__(self, other):
return not self == other
def __le__(self, dummy):
return True
def __lt__(self, other):
return not isinstance(other, MinKey)
def __ge__(self, other):
return isinstance(other, MinKey)
def __gt__(self, dummy):
return False
def __repr__(self):
return "MinKey()"
| apache-2.0 | 2,853,858,879,797,168,000 | 25.48 | 74 | 0.651057 | false |
sharadagarwal/autorest | AutoRest/Generators/Python/Python.Tests/AcceptanceTests/file_tests.py | 2 | 5671 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import io
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath, sep, pardir
cwd = dirname(realpath(__file__))
root = realpath(join(cwd , pardir, pardir, pardir, pardir, pardir))
sys.path.append(join(root, "ClientRuntimes" , "Python", "msrest"))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "BodyFile"))
from msrest.exceptions import DeserializationError
from autorestswaggerbatfileservice import AutoRestSwaggerBATFileService
from autorestswaggerbatfileservice.models import ErrorException
class FileTests(unittest.TestCase):
def test_files(self):
client = AutoRestSwaggerBATFileService(base_url="http://localhost:3000")
client.config.connection.data_block_size = 1000
def test_callback(data, response, progress=[0]):
self.assertTrue(len(data) > 0)
self.assertIsNotNone(response)
self.assertFalse(response._content_consumed)
total = float(response.headers['Content-Length'])
if total < 4096:
progress[0] += len(data)
print("Downloading... {}%".format(int(progress[0]*100/total)))
file_length = 0
with io.BytesIO() as file_handle:
stream = client.files.get_file(callback=test_callback)
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertNotEqual(file_length, 0)
sample_file = realpath(
join(cwd, pardir, pardir, pardir, "NodeJS",
"NodeJS.Tests", "AcceptanceTests", "sample.png"))
with open(sample_file, 'rb') as data:
sample_data = hash(data.read())
self.assertEqual(sample_data, hash(file_handle.getvalue()))
client.config.connection.data_block_size = 4096
file_length = 0
with io.BytesIO() as file_handle:
stream = client.files.get_empty_file(callback=test_callback)
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertEqual(file_length, 0)
def add_headers(adapter, request, response, *args, **kwargs):
response.headers['Content-Length'] = str(3000 * 1024 * 1024)
file_length = 0
client._client.add_hook('response', add_headers)
stream = client.files.get_file_large(callback=test_callback)
#for data in stream:
# file_length += len(data)
#self.assertEqual(file_length, 3000 * 1024 * 1024)
def test_files_raw(self):
def test_callback(data, response, progress=[0]):
self.assertTrue(len(data) > 0)
self.assertIsNotNone(response)
self.assertFalse(response._content_consumed)
total = float(response.headers.get('Content-Length', 0))
if total:
progress[0] += len(data)
print("Downloading... {}%".format(int(progress[0]*100/total)))
client = AutoRestSwaggerBATFileService(base_url="http://localhost:3000")
file_length = 0
with io.BytesIO() as file_handle:
response = client.files.get_file(raw=True, callback=test_callback)
stream = response.output
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertNotEqual(file_length, 0)
sample_file = realpath(
join(cwd, pardir, pardir, pardir, "NodeJS",
"NodeJS.Tests", "AcceptanceTests", "sample.png"))
with open(sample_file, 'rb') as data:
sample_data = hash(data.read())
self.assertEqual(sample_data, hash(file_handle.getvalue()))
file_length = 0
with io.BytesIO() as file_handle:
response = client.files.get_empty_file(raw=True, callback=test_callback)
stream = response.output
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertEqual(file_length, 0)
if __name__ == '__main__':
unittest.main() | mit | -714,810,147,437,002,200 | 36.8 | 84 | 0.621979 | false |
openshift/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_openshift/src/class/oc_edit.py | 21 | 3384 | # pylint: skip-file
# flake8: noqa
class Edit(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
kind,
namespace,
resource_name=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
separator='.',
verbose=False):
''' Constructor for OpenshiftOC '''
super(Edit, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
self.separator = separator
def get(self):
'''return a secret by name '''
return self._get(self.kind, self.name)
def update(self, file_name, content, edits, force=False, content_type='yaml'):
'''run update '''
if file_name:
if content_type == 'yaml':
data = yaml.load(open(file_name))
elif content_type == 'json':
data = json.loads(open(file_name).read())
yed = Yedit(filename=file_name, content=data, separator=self.separator)
# Keep this for compatibility
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([not change[0] for change in changes]):
return {'returncode': 0, 'updated': False}
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if not results['changed']:
return results
yed.write()
atexit.register(Utils.cleanup, [file_name])
return self._replace(file_name, force=force)
return self._replace_content(self.kind, self.name, content, edits, force=force, sep=self.separator)
@staticmethod
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocedit = Edit(params['kind'],
params['namespace'],
params['name'],
kubeconfig=params['kubeconfig'],
separator=params['separator'],
verbose=params['debug'])
api_rval = ocedit.get()
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
return {"failed": True, 'msg': api_rval}
########
# Update
########
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
api_rval = ocedit.update(params['file_name'],
params['content'],
params['edits'],
params['force'],
params['file_format'])
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
if 'updated' in api_rval and not api_rval['updated']:
return {"changed": False, 'results': api_rval, 'state': 'present'}
# return the created object
api_rval = ocedit.get()
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
return {"changed": True, 'results': api_rval, 'state': 'present'}
| apache-2.0 | -8,052,438,944,740,666,000 | 32.50495 | 107 | 0.502955 | false |
FederatedAI/FATE | python/fate_test/fate_test/flow_test/flow_process.py | 1 | 17149 | import json
import os
import tarfile
import time
from contextlib import closing
from datetime import datetime
import requests
def get_dict_from_file(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
json_info = json.load(f)
return json_info
class Base(object):
def __init__(self, server_url, component_name):
self.config = None
self.dsl = None
self.guest_party_id = None
self.host_party_id = None
self.job_id = None
self.model_id = None
self.model_version = None
self.server_url = server_url
self.component_name = component_name
def set_config(self, guest_party_id, host_party_id, arbiter_party_id, path, work_mode):
self.config = get_dict_from_file(path)
self.config["initiator"]["party_id"] = guest_party_id[0]
self.config["role"]["guest"] = guest_party_id
self.config["role"]["host"] = host_party_id
if self.config["job_parameters"].get("common"):
self.config["job_parameters"]["common"]["work_mode"] = work_mode
else:
self.config["job_parameters"]["work_mode"] = work_mode
if "arbiter" in self.config["role"]:
self.config["role"]["arbiter"] = arbiter_party_id
self.guest_party_id = guest_party_id
self.host_party_id = host_party_id
return self.config
def set_dsl(self, path):
self.dsl = get_dict_from_file(path)
return self.dsl
def submit(self):
post_data = {'job_runtime_conf': self.config, 'job_dsl': self.dsl}
print(f"start submit job, data:{post_data}")
response = requests.post("/".join([self.server_url, "job", "submit"]), json=post_data)
if response.status_code == 200 and not response.json().get('retcode'):
self.job_id = response.json().get("jobId")
print(f"submit job success: {response.json()}")
self.model_id = response.json().get("data").get("model_info").get("model_id")
self.model_version = response.json().get("data").get("model_info").get("model_version")
return True
else:
print(f"submit job failed: {response.text}")
return False
def query_job(self):
post_data = {'job_id': self.job_id}
response = requests.post("/".join([self.server_url, "job", "query"]), json=post_data)
if response.status_code == 200:
if response.json().get("data"):
return response.json().get("data")[0].get("f_status")
return False
def wait_success(self, timeout=60 * 10):
for i in range(timeout // 10):
time.sleep(10)
status = self.query_job()
print("job {} status is {}".format(self.job_id, status))
if status and status == "success":
return True
if status and status in ["canceled", "timeout", "failed"]:
return False
return False
def get_component_output_data(self, output_path=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
if not output_path:
output_path = './output/data'
os.makedirs(os.path.dirname(output_path), exist_ok=True)
tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(post_data['job_id'], post_data['component_name'],
post_data['role'], post_data['party_id'])
extract_dir = os.path.join(output_path, tar_file_name.replace('.tar.gz', ''))
print("start get component output dat")
with closing(
requests.get("/".join([self.server_url, "tracking", "component/output/data/download"]), json=post_data,
stream=True)) as response:
if response.status_code == 200:
try:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
print(f'get component output path {extract_dir}')
except:
print(f"get component output data failed")
return False
def get_output_data_table(self):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
response = requests.post("/".join([self.server_url, "tracking", "component/output/data/table"]), json=post_data)
result = {}
try:
if response.status_code == 200:
result["name"] = response.json().get("data")[0].get("table_name")
result["namespace"] = response.json().get("data")[0].get("namespace")
except Exception as e:
raise RuntimeError(f"output data table error: {response}") from e
return result
def get_table_info(self, table_name):
post_data = {
"name": table_name['name'],
"namespace": table_name['namespace']
}
response = requests.post("/".join([self.server_url, "table", "table_info"]), json=post_data)
try:
if response.status_code == 200:
table_count = response.json().get("data").get("count")
else:
raise RuntimeError(f"get table info failed: {response}")
except Exception as e:
raise RuntimeError(f"get table count error: {response}") from e
return table_count
def get_auc(self):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": "evaluation_0"
}
response = requests.post("/".join([self.server_url, "tracking", "component/metric/all"]), json=post_data)
try:
if response.status_code == 200:
auc = response.json().get("data").get("train").get(self.component_name).get("data")[0][1]
else:
raise RuntimeError(f"get metrics failed: {response}")
except Exception as e:
raise RuntimeError(f"get table count error: {response}") from e
return auc
class TrainLRModel(Base):
def get_component_metrics(self, metric_output_path, file=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": "evaluation_0"
}
response = requests.post("/".join([self.server_url, "tracking", "component/metric/all"]), json=post_data)
if response.status_code == 200:
if response.json().get("data"):
if not file:
file = metric_output_path.format(self.job_id)
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as fp:
json.dump(response.json().get("data"), fp)
print(f"save component metrics success, path is:{os.path.abspath(file)}")
else:
print(f"get component metrics:{response.json()}")
return False
def get_component_output_model(self, model_output_path, file=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
print(f"request component output model: {post_data}")
response = requests.post("/".join([self.server_url, "tracking", "component/output/model"]), json=post_data)
if response.status_code == 200:
if response.json().get("data"):
if not file:
file = model_output_path.format(self.job_id)
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as fp:
json.dump(response.json().get("data"), fp)
print(f"save component output model success, path is:{os.path.abspath(file)}")
else:
print(f"get component output model:{response.json()}")
return False
class PredictLRMode(Base):
def set_predict(self, guest_party_id, host_party_id, arbiter_party_id, model_id, model_version, path, work_mode):
self.set_config(guest_party_id, host_party_id, arbiter_party_id, path, work_mode)
if self.config["job_parameters"].get("common"):
self.config["job_parameters"]["common"]["model_id"] = model_id
self.config["job_parameters"]["common"]["model_version"] = model_version
else:
self.config["job_parameters"]["model_id"] = model_id
self.config["job_parameters"]["model_version"] = model_version
def download_from_request(http_response, tar_file_name, extract_dir):
with open(tar_file_name, 'wb') as fw:
for chunk in http_response.iter_content(1024):
if chunk:
fw.write(chunk)
tar = tarfile.open(tar_file_name, "r:gz")
file_names = tar.getnames()
for file_name in file_names:
tar.extract(file_name, extract_dir)
tar.close()
os.remove(tar_file_name)
def train_job(guest_party_id, host_party_id, arbiter_party_id, train_conf_path, train_dsl_path, server_url, work_mode,
component_name, metric_output_path, model_output_path, constant_auc):
train = TrainLRModel(server_url, component_name)
train.set_config(guest_party_id, host_party_id, arbiter_party_id, train_conf_path, work_mode)
train.set_dsl(train_dsl_path)
status = train.submit()
if status:
is_success = train.wait_success(timeout=600)
if is_success:
train.get_component_metrics(metric_output_path)
train.get_component_output_model(model_output_path)
train.get_component_output_data()
train_auc = train.get_auc()
assert abs(constant_auc - train_auc) <= 1e-4, 'The training result is wrong, auc: {}'.format(train_auc)
train_data_count = train.get_table_info(train.get_output_data_table())
return train, train_data_count
return False
def predict_job(guest_party_id, host_party_id, arbiter_party_id, predict_conf_path, predict_dsl_path, model_id,
model_version, server_url, work_mode, component_name):
predict = PredictLRMode(server_url, component_name)
predict.set_predict(guest_party_id, host_party_id, arbiter_party_id, model_id, model_version, predict_conf_path,
work_mode)
predict.set_dsl(predict_dsl_path)
status = predict.submit()
if status:
is_success = predict.wait_success(timeout=600)
if is_success:
predict.get_component_output_data()
predict_data_count = predict.get_table_info(predict.get_output_data_table())
return predict, predict_data_count
return False
class UtilizeModel:
def __init__(self, model_id, model_version, server_url):
self.model_id = model_id
self.model_version = model_version
self.deployed_model_version = None
self.service_id = None
self.server_url = server_url
def deploy_model(self):
post_data = {
"model_id": self.model_id,
"model_version": self.model_version
}
response = requests.post("/".join([self.server_url, "model", "deploy"]), json=post_data)
print(f'Request data of deploy model request: {json.dumps(post_data, indent=4)}')
if response.status_code == 200:
resp_data = response.json()
print(f'Response of model deploy request: {json.dumps(resp_data, indent=4)}')
if resp_data.get("retcode", 100) == 0:
self.deployed_model_version = resp_data.get("data", {}).get("model_version")
else:
raise Exception(f"Model {self.model_id} {self.model_version} deploy failed, "
f"details: {resp_data.get('retmsg')}")
else:
raise Exception(f"Request model deploy api failed, status code: {response.status_code}")
def load_model(self):
post_data = {
"job_id": self.deployed_model_version
}
response = requests.post("/".join([self.server_url, "model", "load"]), json=post_data)
print(f'Request data of load model request: {json.dumps(post_data, indent=4)}')
if response.status_code == 200:
resp_data = response.json()
print(f'Response of load model request: {json.dumps(resp_data, indent=4)}')
if not resp_data.get('retcode'):
return True
raise Exception(f"Load model {self.model_id} {self.deployed_model_version} failed, "
f"details: {resp_data.get('retmsg')}")
raise Exception(f"Request model load api failed, status code: {response.status_code}")
def bind_model(self):
post_data = {
"job_id": self.deployed_model_version,
"service_id": f"auto_test_{datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')}"
}
response = requests.post("/".join([self.server_url, "model", "bind"]), json=post_data)
print(f'Request data of bind model request: {json.dumps(post_data, indent=4)}')
if response.status_code == 200:
resp_data = response.json()
print(f'Response data of bind model request: {json.dumps(resp_data, indent=4)}')
if not resp_data.get('retcode'):
self.service_id = post_data.get('service_id')
return True
raise Exception(f"Bind model {self.model_id} {self.deployed_model_version} failed, "
f"details: {resp_data.get('retmsg')}")
raise Exception(f"Request model bind api failed, status code: {response.status_code}")
def online_predict(self, online_serving):
serving_url = f"http://{online_serving}/federation/1.0/inference"
post_data = {
"head": {
"serviceId": self.service_id
},
"body": {
"featureData": {
"phone_num": "18576635456",
},
"sendToRemoteFeatureData": {
"device_type": "imei",
"phone_num": "18576635456",
"encrypt_type": "raw"
}
}
}
headers = {"Content-Type": "application/json"}
response = requests.post(serving_url, json=post_data, headers=headers)
print(f"Request data of online predict request: {json.dumps(post_data, indent=4)}")
if response.status_code == 200:
print(f"Online predict successfully, response: {json.dumps(response.json(), indent=4)}")
else:
print(f"Online predict successfully, details: {response.text}")
def run_fate_flow_test(config_json):
guest_party_id = config_json['guest_party_id']
host_party_id = config_json['host_party_id']
arbiter_party_id = config_json['arbiter_party_id']
train_conf_path = config_json['train_conf_path']
train_dsl_path = config_json['train_dsl_path']
server_url = config_json['server_url']
online_serving = config_json['online_serving']
work_mode = config_json['work_mode']
constant_auc = config_json['train_auc']
component_name = config_json['component_name']
metric_output_path = config_json['metric_output_path']
model_output_path = config_json['model_output_path']
print('submit train job')
# train
train, train_count = train_job(guest_party_id, host_party_id, arbiter_party_id, train_conf_path, train_dsl_path,
server_url, work_mode, component_name, metric_output_path, model_output_path, constant_auc)
if not train:
print('train job run failed')
return False
print('train job success')
# deploy
print('start deploy model')
utilize = UtilizeModel(train.model_id, train.model_version, server_url)
utilize.deploy_model()
print('deploy model success')
# predict
predict_conf_path = config_json['predict_conf_path']
predict_dsl_path = config_json['predict_dsl_path']
model_id = train.model_id
model_version = utilize.deployed_model_version
print('start submit predict job')
predict, predict_count = predict_job(guest_party_id, host_party_id, arbiter_party_id, predict_conf_path,
predict_dsl_path, model_id, model_version, server_url, work_mode, component_name)
if not predict:
print('predict job run failed')
return False
if train_count != predict_count:
print('Loss of forecast data')
return False
print('predict job success')
# load model
utilize.load_model()
# bind model
utilize.bind_model()
# online predict
utilize.online_predict(online_serving=online_serving)
| apache-2.0 | -2,851,475,279,479,799,000 | 42.525381 | 126 | 0.580267 | false |
Lambdanaut/crits | crits/campaigns/forms.py | 15 | 2173 | from django import forms
from django.forms.widgets import HiddenInput
from crits.campaigns.campaign import Campaign
from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form
from crits.core.handlers import get_item_names
class AddCampaignForm(forms.Form):
"""
Django form for adding a new Campaign.
"""
error_css_class = 'error'
required_css_class = 'required'
campaign = forms.CharField(widget=forms.TextInput, required=True)
aliases = forms.CharField(widget=forms.TextInput, required=False)
description = forms.CharField(widget=forms.TextInput, required=False)
def __init__(self, *args, **kwargs):
super(AddCampaignForm, self).__init__(*args, **kwargs)
add_bucketlist_to_form(self)
add_ticket_to_form(self)
class TTPForm(forms.Form):
"""
Django form for adding/editing a Campaign TTP.
"""
error_css_class = 'error'
required_css_class = 'required'
ttp = forms.CharField(
widget=forms.Textarea(attrs={'cols': '35',
'rows': '5'}),
required=True)
class CampaignForm(forms.Form):
"""
Django form for attributing a Campaign to another object.
The list of names comes from :func:`get_item_names`.
Confidence can be one of "low", "medium", or "high".
"""
error_css_class = 'error'
required_css_class = 'required'
name = forms.ChoiceField(widget=forms.Select, required=True)
confidence = forms.ChoiceField(widget=forms.Select, required=True)
description = forms.CharField(widget=forms.Textarea(), required=False)
date = forms.CharField(widget=HiddenInput, required=False)
related = forms.BooleanField(
help_text="Apply to all first level related objects.",
initial=False,
required=False)
def __init__(self, *args, **kwargs):
super(CampaignForm, self).__init__(*args, **kwargs)
self.fields['confidence'].choices = [
('low', 'low'),
('medium', 'medium'),
('high', 'high'),
]
self.fields['name'].choices = [
(c.name, c.name) for c in get_item_names(Campaign, True)]
| mit | -8,334,712,032,930,798,000 | 33.492063 | 74 | 0.640129 | false |
monarch-initiative/monarch-app | tests/behave/steps/selenium-forms.py | 3 | 1359 | ####
#### Steps for operating on the various forms and their results.
####
from behave import *
###
### radio button click
###
@given('I click the "{id}" radio button')
def step_impl(context, id):
webelt = context.browser.find_element_by_id(id)
webelt.click()
###
### Submission.
###
## Submit analyze phenotype.
@when('I submit analyze phenotype')
def step_impl(context):
webelt = context.browser.find_element_by_id('analyze-submit')
webelt.click()
## Submit navbar search.
@given('I submit navbar search')
def step_impl(context):
#print(context.browser.title)
webelt = context.browser.find_element_by_id('search_form')
webelt.submit()
###
### Example for input for a possible text area form.
###
@given('I input "{text}" into the textarea "{eid}"')
def step_impl(context, text, eid):
webelt = context.browser.find_element_by_id(eid)
webelt.send_keys(text)
@given('I input the following text into the textarea "{eid}"')
def step_impl(context, eid):
input_box_text = context.text
webelt = context.browser.find_element_by_id(eid)
webelt.send_keys(input_box_text)
@when('I submit the form by clicking XPath "{xpath}"')
def step_impl(context, xpath):
## xpath like "/html/body/div[2]/div[4]/div/div/form/div[2]/button"
webelt = context.browser.find_element_by_xpath(xpath)
webelt.click()
| bsd-3-clause | -3,846,750,432,897,417,700 | 25.134615 | 71 | 0.676233 | false |
yousrabk/mne-python | mne/viz/tests/test_misc.py | 17 | 4858 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import (io, read_events, read_cov, read_source_spaces, read_evokeds,
read_dipole, SourceEstimate)
from mne.datasets import testing
from mne.minimum_norm import read_inverse_operator
from mne.viz import (plot_bem, plot_events, plot_source_spectrogram,
plot_snr_estimate)
from mne.utils import requires_nibabel, run_tests_if_main, slow_test
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
inv_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
evoked_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
dip_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_set1.dip')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def _get_raw():
return io.Raw(raw_fname, preload=True)
def _get_events():
return read_events(event_fname)
def test_plot_cov():
"""Test plotting of covariances
"""
raw = _get_raw()
cov = read_cov(cov_fname)
fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
@testing.requires_testing_data
@requires_nibabel()
def test_plot_bem():
"""Test plotting of BEM contours
"""
assert_raises(IOError, plot_bem, subject='bad-subject',
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_bem, subject='sample',
subjects_dir=subjects_dir, orientation='bad-ori')
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='sagittal', slices=[25, 50])
def test_plot_events():
"""Test plotting events
"""
event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
raw = _get_raw()
events = _get_events()
plot_events(events, raw.info['sfreq'], raw.first_samp)
plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
# Test plotting events without sfreq
plot_events(events, first_samp=raw.first_samp)
warnings.simplefilter('always', UserWarning)
with warnings.catch_warnings(record=True):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels)
plot_events(events, raw.info['sfreq'], raw.first_samp,
color=color)
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 1}, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 111}, color=color)
@testing.requires_testing_data
def test_plot_source_spectrogram():
"""Test plotting of source spectrogram
"""
sample_src = read_source_spaces(op.join(subjects_dir, 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_times = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts, n_times))
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
assert_raises(ValueError, plot_source_spectrogram, [], [])
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmin=0)
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmax=7)
@slow_test
@testing.requires_testing_data
def test_plot_snr():
"""Test plotting SNR estimate
"""
inv = read_inverse_operator(inv_fname)
evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0]
plot_snr_estimate(evoked, inv)
@testing.requires_testing_data
def test_plot_dipole_amplitudes():
"""Test plotting dipole amplitudes
"""
dipoles = read_dipole(dip_fname)
dipoles.plot_amplitudes(show=False)
run_tests_if_main()
| bsd-3-clause | 8,473,609,485,165,179,000 | 34.985185 | 79 | 0.633388 | false |
350dotorg/Django | django/contrib/gis/db/models/fields.py | 400 | 11157 | from django.db.models.fields import Field
from django.db.models.sql.expressions import SQLEvaluator
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis import forms
from django.contrib.gis.db.models.proxy import GeometryProxy
from django.contrib.gis.geometry.backend import Geometry, GeometryException
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if not connection.alias in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if not srid in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeometryField(Field):
"The base GIS field -- maps to the OpenGIS Specification Geometry type."
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
# Geodetic units.
geodetic_units = ('Decimal Degree', 'degree')
description = _("The base GIS field -- maps to the OpenGIS Specification Geometry type.")
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, dim=2,
geography=False, **kwargs):
"""
The initialization function for geometry fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the dimension of the geometry field.
self.dim = dim
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entrie in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(**kwargs)
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the GeometryField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
### Routines specific to GeometryField ###
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return self.units_name(connection) in self.geodetic_units
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
if isinstance(value, SQLEvaluator):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, basestring) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
### Routines overloaded from Field ###
def contribute_to_class(self, cls, name):
super(GeometryField, self).contribute_to_class(cls, name)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, GeometryProxy(Geometry, self))
def db_type(self, connection):
return connection.ops.geo_db_type(self)
def formfield(self, **kwargs):
defaults = {'form_class' : forms.GeometryField,
'null' : self.null,
'geom_type' : self.geom_type,
'srid' : self.srid,
}
defaults.update(kwargs)
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
if lookup_type in connection.ops.gis_terms:
# special case for isnull lookup
if lookup_type == 'isnull':
return []
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if lookup_type in connection.ops.distance_functions:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, SQLEvaluator):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'isnull':
return bool(value)
else:
return self.get_prep_value(value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if value is None:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
def get_placeholder(self, value, connection):
"""
Returns the placeholder for the geometry column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
description = _("Geometry collection")
| bsd-3-clause | -161,126,352,176,438,140 | 36.94898 | 117 | 0.626961 | false |
osgcc/ryzom | nel/tools/build_gamedata/processes/clodbank/3_install.py | 3 | 1753 | #!/usr/bin/python
#
# \file 3_install.py
# \brief Install clodbank
# \date 2009-03-10 13:13GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Install clodbank
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time, sys, os, shutil, subprocess, distutils.dir_util
sys.path.append("../../configuration")
if os.path.isfile("log.log"):
os.remove("log.log")
log = open("log.log", "w")
from scripts import *
from buildsite import *
from process import *
from tools import *
from directories import *
printLog(log, "")
printLog(log, "-------")
printLog(log, "--- Install clodbank")
printLog(log, "-------")
printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time())))
printLog(log, "")
printLog(log, ">>> Install clodbank <<<")
srcDir = ExportBuildDirectory + "/" + ClodBankBuildDirectory
mkPath(log, srcDir)
destDir = InstallDirectory + "/" + ShapeInstallDirectory
mkPath(log, destDir)
copyFilesNoTreeIfNeeded(log, srcDir, destDir)
printLog(log, "")
log.close()
# end of file
| agpl-3.0 | 1,143,301,065,029,545,300 | 29.754386 | 75 | 0.72105 | false |
royc1/gpdb | gpMgmt/bin/gppylib/mainUtils.py | 19 | 22742 | # Line too long - pylint: disable=C0301
# Invalid name - pylint: disable=C0103
"""
mainUtils.py
------------
This file provides a rudimentary framework to support top-level option
parsing, initialization and cleanup logic common to multiple programs.
It also implements workarounds to make other modules we use like
GpCoverage() work properly.
The primary interface function is 'simple_main'. For an example of
how it is expected to be used, see gprecoverseg.
It is anticipated that the functionality of this file will grow as we
extend common functions of our gp utilities. Please keep this in mind
and try to avoid placing logic for a specific utility here.
"""
import os, sys, signal, errno, yaml
gProgramName = os.path.split(sys.argv[0])[-1]
if sys.version_info < (2, 5, 0):
sys.exit(
'''Error: %s is supported on Python versions 2.5 or greater
Please upgrade python installed on this machine.''' % gProgramName)
from gppylib import gplog
from gppylib.commands import gp, unix
from gppylib.commands.base import ExecutionError
from gppylib.system import configurationInterface, configurationImplGpdb, fileSystemInterface, \
fileSystemImplOs, osInterface, osImplNative, faultProberInterface, faultProberImplGpdb
from optparse import OptionGroup, OptionParser, SUPPRESS_HELP
from gppylib.gpcoverage import GpCoverage
from lockfile.pidlockfile import PIDLockFile, LockTimeout
def getProgramName():
"""
Return the name of the current top-level program from sys.argv[0]
or the programNameOverride option passed to simple_main via mainOptions.
"""
global gProgramName
return gProgramName
class SimpleMainLock:
"""
Tools like gprecoverseg prohibit running multiple instances at the same time
via a simple lock file created in the MASTER_DATA_DIRECTORY. This class takes
care of the work to manage this lock as appropriate based on the mainOptions
specified.
Note that in some cases, the utility may want to recursively invoke
itself (e.g. gprecoverseg -r). To handle this, the caller may specify
the name of an environment variable holding the pid already acquired by
the parent process.
"""
def __init__(self, mainOptions):
self.pidfilename = mainOptions.get('pidfilename', None) # the file we're using for locking
self.parentpidvar = mainOptions.get('parentpidvar', None) # environment variable holding parent pid
self.parentpid = None # parent pid which already has the lock
self.ppath = None # complete path to the lock file
self.pidlockfile = None # PIDLockFile object
self.pidfilepid = None # pid of the process which has the lock
self.locktorelease = None # PIDLockFile object we should release when done
if self.parentpidvar is not None and self.parentpidvar in os.environ:
self.parentpid = int(os.environ[self.parentpidvar])
if self.pidfilename is not None:
self.ppath = os.path.join(gp.get_masterdatadir(), self.pidfilename)
self.pidlockfile = PIDLockFile( self.ppath )
def acquire(self):
"""
Attempts to acquire the lock this process needs to proceed.
Returns None on successful acquisition of the lock or
the pid of the other process which already has the lock.
"""
# nothing to do if utiliity requires no locking
if self.pidlockfile is None:
return None
# look for a lock file
self.pidfilepid = self.pidlockfile.read_pid()
if self.pidfilepid is not None:
# we found a lock file
# allow the process to proceed if the locker was our parent
if self.pidfilepid == self.parentpid:
return None
# cleanup stale locks
try:
os.kill(self.pidfilepid, signal.SIG_DFL)
except OSError, exc:
if exc.errno == errno.ESRCH:
self.pidlockfile.break_lock()
self.pidfilepid = None
# try and acquire the lock
try:
self.pidlockfile.acquire(1)
except LockTimeout:
self.pidfilepid = self.pidlockfile.read_pid()
return self.pidfilepid
# we have the lock
# prepare for a later call to release() and take good
# care of the process environment for the sake of our children
self.locktorelease = self.pidlockfile
self.pidfilepid = self.pidlockfile.read_pid()
if self.parentpidvar is not None:
os.environ[self.parentpidvar] = str(self.pidfilepid)
return None
def release(self):
"""
Releases the lock this process acquired.
"""
if self.locktorelease is not None:
self.locktorelease.release()
self.locktorelease = None
#
# exceptions we handle specially by the simple_main framework.
#
class ProgramArgumentValidationException(Exception):
"""
Throw this out to main to have the message possibly
printed with a help suggestion.
"""
def __init__(self, msg, shouldPrintHelp=False):
"init"
Exception.__init__(self)
self.__shouldPrintHelp = shouldPrintHelp
self.__msg = msg
def shouldPrintHelp(self):
"shouldPrintHelp"
return self.__shouldPrintHelp
def getMessage(self):
"getMessage"
return self.__msg
class ExceptionNoStackTraceNeeded(Exception):
"""
Our code throws this exception when we encounter a condition
we know can arise which demands immediate termination.
"""
pass
class UserAbortedException(Exception):
"""
UserAbortedException should be thrown when a user decides to stop the
program (at a y/n prompt, for example).
"""
pass
def simple_main( createOptionParserFn, createCommandFn, mainOptions=None) :
"""
createOptionParserFn : a function that takes no arguments and returns an OptParser
createCommandFn : a function that takes two argument (the options and the args (those that are not processed into
options) and returns an object that has "run" and "cleanup" functions. Its "run" function must
run and return an exit code. "cleanup" will be called to clean up before the program exits;
this can be used to clean up, for example, to clean up a worker pool
mainOptions can include: forceQuietOutput (map to bool),
programNameOverride (map to string)
suppressStartupLogMessage (map to bool)
useHelperToolLogging (map to bool)
setNonuserOnToolLogger (map to bool, defaults to false)
pidfilename (string)
parentpidvar (string)
"""
coverage = GpCoverage()
coverage.start()
try:
simple_main_internal(createOptionParserFn, createCommandFn, mainOptions)
finally:
coverage.stop()
coverage.generate_report()
def simple_main_internal(createOptionParserFn, createCommandFn, mainOptions):
"""
If caller specifies 'pidfilename' in mainOptions then we manage the
specified pid file within the MASTER_DATA_DIRECTORY before proceeding
to execute the specified program and we clean up the pid file when
we're done.
"""
sml = None
if mainOptions is not None and 'pidfilename' in mainOptions:
sml = SimpleMainLock(mainOptions)
otherpid = sml.acquire()
if otherpid is not None:
logger = gplog.get_default_logger()
logger.error("An instance of %s is already running (pid %s)" % (getProgramName(), otherpid))
return
# at this point we have whatever lock we require
try:
simple_main_locked(createOptionParserFn, createCommandFn, mainOptions)
finally:
if sml is not None:
sml.release()
def simple_main_locked(createOptionParserFn, createCommandFn, mainOptions):
"""
Not to be called externally -- use simple_main instead
"""
logger = gplog.get_default_logger()
configurationInterface.registerConfigurationProvider( configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog())
fileSystemInterface.registerFileSystemProvider( fileSystemImplOs.GpFileSystemProviderUsingOs())
osInterface.registerOsProvider( osImplNative.GpOsProviderUsingNative())
faultProberInterface.registerFaultProber( faultProberImplGpdb.GpFaultProberImplGpdb())
commandObject = None
parser = None
forceQuiet = mainOptions is not None and mainOptions.get("forceQuietOutput")
options = None
if mainOptions is not None and mainOptions.get("programNameOverride"):
global gProgramName
gProgramName = mainOptions.get("programNameOverride")
suppressStartupLogMessage = mainOptions is not None and mainOptions.get("suppressStartupLogMessage")
useHelperToolLogging = mainOptions is not None and mainOptions.get("useHelperToolLogging")
nonuser = True if mainOptions is not None and mainOptions.get("setNonuserOnToolLogger") else False
exit_status = 1
# NOTE: if this logic is changed then also change test_main in testUtils.py
try:
execname = getProgramName()
hostname = unix.getLocalHostname()
username = unix.getUserName()
parser = createOptionParserFn()
(options, args) = parser.parse_args()
if useHelperToolLogging:
gplog.setup_helper_tool_logging(execname, hostname, username)
else:
gplog.setup_tool_logging(execname, hostname, username,
logdir=options.ensure_value("logfileDirectory", None), nonuser=nonuser )
if forceQuiet:
gplog.quiet_stdout_logging()
else:
if options.ensure_value("verbose", False):
gplog.enable_verbose_logging()
if options.ensure_value("quiet", False):
gplog.quiet_stdout_logging()
if options.ensure_value("masterDataDirectory", None) is not None:
options.master_data_directory = os.path.abspath(options.masterDataDirectory)
if not suppressStartupLogMessage:
logger.info("Starting %s with args: %s" % (gProgramName, ' '.join(sys.argv[1:])))
commandObject = createCommandFn(options, args)
exitCode = commandObject.run()
exit_status = exitCode
except ProgramArgumentValidationException, e:
if e.shouldPrintHelp():
parser.print_help()
logger.error("%s: error: %s" %(gProgramName, e.getMessage()))
exit_status = 2
except ExceptionNoStackTraceNeeded, e:
logger.error( "%s error: %s" % (gProgramName, e))
exit_status = 2
except UserAbortedException, e:
logger.info("User abort requested, Exiting...")
exit_status = 4
except ExecutionError, e:
logger.fatal("Error occurred: %s\n Command was: '%s'\n"
"rc=%d, stdout='%s', stderr='%s'" %\
(e.summary,e.cmd.cmdStr, e.cmd.results.rc, e.cmd.results.stdout,
e.cmd.results.stderr ))
exit_status = 2
except Exception, e:
if options is None:
logger.exception("%s failed. exiting...", gProgramName)
else:
if options.ensure_value("verbose", False):
logger.exception("%s failed. exiting...", gProgramName)
else:
logger.fatal("%s failed. (Reason='%s') exiting..." % (gProgramName, e))
exit_status = 2
except KeyboardInterrupt:
exit_status = 2
finally:
if commandObject:
commandObject.cleanup()
sys.exit(exit_status)
def addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption, includeUsageOption=False):
"""
Add the standard options for help and logging
to the specified parser object.
"""
parser.set_usage('%prog [--help] [options] ')
parser.remove_option('-h')
addTo = parser
addTo.add_option('-h', '-?', '--help', action='help',
help='show this help message and exit')
if includeUsageOption:
parser.add_option('--usage', action="briefhelp")
addTo = OptionGroup(parser, "Logging Options")
parser.add_option_group(addTo)
addTo.add_option('-v', '--verbose', action='store_true',
help='debug output.')
addTo.add_option('-q', '--quiet', action='store_true',
help='suppress status messages')
addTo.add_option("-l", None, dest="logfileDirectory", metavar="<directory>", type="string",
help="Logfile directory")
if includeNonInteractiveOption:
addTo.add_option('-a', dest="interactive" , action='store_false', default=True,
help="quiet mode, do not require user input for confirmations")
def addMasterDirectoryOptionForSingleClusterProgram(addTo):
"""
Add the -d master directory option to the specified parser object
which is intended to provide the value of the master data directory.
For programs that operate on multiple clusters at once, this function/option
is not appropriate.
"""
addTo.add_option('-d', '--master_data_directory', type='string',
dest="masterDataDirectory",
metavar="<master data directory>",
help="Optional. The master host data directory. If not specified, the value set"\
"for $MASTER_DATA_DIRECTORY will be used.")
#
# YamlMain
#
def get_yaml(targetclass):
"get_yaml"
# doc - class's doc string
# pos - where YAML starts in doc
# ystr - YAML string extracted from doc
if not hasattr(targetclass, '_yaml') or targetclass._yaml is None:
doc = targetclass.__doc__
pos = doc.find('%YAML')
assert pos >= 0, "targetclass doc string is missing %YAML plan"
ystr = doc[pos:].replace('\n ','\n')
targetclass._yaml = yaml.load(ystr)
return targetclass._yaml
class YamlMain:
"YamlMain"
def __init__(self):
"Parse arguments based on yaml docstring"
self.current = None
self.plan = None
self.scenario_name = None
self.logger = None
self.logfilename = None
self.errmsg = None
self.parser = YamlOptions(self).parser
self.options, self.args = self.parser.parse_args()
self.options.quiet = self.options.q
self.options.verbose = self.options.v
#
# simple_main interface
#
def __call__(self, *args):
"Allows us to use self as the create_parser and create_program functions in call to simple_main"
return self
def parse_args(self):
"Called by simple_main to obtain results from parser returned by create_parser"
return self.options, self.args
def run(self):
"Called by simple_main to execute the program returned by create_program"
self.plan = Plan(self)
self.scenario_name = self.plan.name
self.logger = self.plan.logger
self.logfilename = self.plan.logfilename
self.errmsg = self.plan.errmsg
self.current = []
self.plan.run()
def cleanup(self):
"Called by simple_main to cleanup after program returned by create_program finishes"
pass
def simple(self):
"Delegates setup and control to mainUtils.simple_main"
simple_main(self, self)
#
# option parsing
#
class YamlOptions:
"YamlOptions"
def __init__(self, target):
"""
Scan the class doc string of the given object, looking for the %YAML
containing the option specification. Parse the YAML and setup the
corresponding OptionParser object.
"""
# target - options object (input)
# gname - option group name
self.y = get_yaml(target.__class__)
self.parser = OptionParser( description=self.y['Description'], version='%prog version $Revision$')
self.parser.remove_option('-h')
self.parser.set_usage(self.y['Usage'])
self.opty = self.y['Options']
for gname in self.opty.get('Groups', []):
self._register_group(gname)
def _register_group(self, gname):
"""
Register options for the specified option group name to the OptionParser
using an OptionGroup unless the group name starts with 'Help' in which
case we just register the options with the top level OptionParser object.
"""
# gname - option group name (input)
# gy - option group YAML object
# grp - option group object
# tgt - where to add options (parser or option group)
# optkey - comma separated list of option flags
# optval - help string or dict with detailed option settings
# listargs - list of option flags (e.g. ['-h', '--help'])
# dictargs - key/value arguments to add_option
gy = self.opty.get(gname, None)
if gname.startswith('Help'):
grp = None
tgt = self.parser
else:
grp = OptionGroup(self.parser, gname)
tgt = grp
for optkey, optval in gy.items():
listargs = optkey.split(',')
if type(optval) == type(''):
# short form: optval is just a help string
dictargs = {
'action': 'store_true',
'help': optval
}
else:
# optval is the complete option specification
dictargs = optval
# hide hidden options
if dictargs.get('help','').startswith('hidden'):
dictargs['help'] = SUPPRESS_HELP
#print 'adding', listargs, dictargs
tgt.add_option(*listargs, **dictargs)
if grp is not None:
self.parser.add_option_group(grp)
#
# plan execution
#
class Task:
"Task"
def __init__(self, key, name, subtasks=None):
self.Key = key # task key
self.Name = name # task name
self.SubTasks = subtasks # subtasks, if any
self.Func = None # task function, set by _task
def _print(self, main, prefix):
print '%s %s %s:' % (prefix, self.Key, self.Name)
def _debug(self, main, prefix):
main.logger.debug('Execution Plan:%s %s %s%s' % (prefix, self.Key, self.Name, ':' if self.SubTasks else ''))
def _run(self, main, prefix):
main.logger.debug(' Now Executing:%s %s %s' % (prefix, self.Key, self.Name))
if self.Func:
self.Func()
class Exit(Exception):
def __init__(self, rc, code=None, call_support=False):
Exception.__init__(self)
self.code = code
self.prm = sys._getframe(1).f_locals
self.rc = rc
self.call_support = call_support
class Plan:
"Plan"
def __init__(self, main):
"""
Create cached yaml from class doc string of the given object,
looking for the %YAML indicating the beginning of the object's YAML plan and parse it.
Build the plan stages and tasks for the specified scenario.
"""
# main - object with yaml scenarios (input)
# sy - Stage yaml
self.logger = gplog.get_default_logger()
self.logfilename = gplog.get_logfile()
self.main = main
self.y = get_yaml(main.__class__)
self.name = main.options.scenario
if not self.name:
self.name = self.y['Default Scenario']
self.scenario = self.y['Scenarios'][self.name]
self.errors = self.y['Errors']
self.Tasks = [ self._task(ty) for ty in self.scenario ]
def _task(self, ty):
"Invoked by __init__ to build a top-level task from the YAML"
# ty - Task yaml (input)
# tyk - Task yaml key
# tyv - Task yaml value
# sty - Sub Task yaml
# t - Task (returned)
for tyk, tyv in ty.items():
key, workers = tyk.split(None, 1)
subtasks = [ self._subtask(sty) for sty in tyv ]
t = Task(key, workers, subtasks)
return t
def _subtask(self, sty):
"Invoked by _stage to build a task from the YAML"
# sty - Sub Task yaml (input)
# st - Sub Task (returned)
key, rest = sty.split(None, 1)
st = Task(key, rest)
fn = st.Name.lower().replace(' ','_')
try:
st.Func = getattr(self.main, fn)
except AttributeError, e:
raise Exception("Failed to lookup '%s' for sub task '%s': %s" % (fn, st.Name, str(e)))
return st
def _dotasks(self, subtasks, prefix, action):
"Apply an action to each subtask recursively"
# st - Sub Task
for st in subtasks or []:
self.main.current.append(st)
action(st, self.main, prefix)
self._dotasks(st.SubTasks, ' '+prefix, action)
self.main.current.pop()
def _print(self):
"Print in YAML form."
print '%s:' % self.name
self._dotasks(self.Tasks, ' -', lambda t,m,p:t._print(m,p))
def run(self):
"Run the stages and tasks."
self.logger.debug('Execution Plan: %s' % self.name)
self._dotasks(self.Tasks, ' -', lambda t,m,p:t._debug(m,p))
self.logger.debug(' Now Executing: %s' % self.name)
try:
self._dotasks(self.Tasks, ' -', lambda t,m,p:t._run(m,p))
except Exit, e:
self.exit(e.code, e.prm, e.rc, e.call_support)
def errmsg(self, code, prm={}):
"Return a formatted error message"
return self.errors[code] % prm
def exit(self, code=None, prm={}, rc=1, call_support=False):
"Terminate the application"
if code:
msg = self.errmsg(code, prm)
self.logger.error(msg)
if call_support:
self.logger.error('Please send %s to Greenplum support.' % self.logfilename)
self.logger.debug('exiting with status %(rc)s' % locals())
sys.exit(rc)
| apache-2.0 | -8,557,602,960,484,487,000 | 34.534375 | 122 | 0.608434 | false |
endlessm/chromium-browser | third_party/catapult/third_party/cloudstorage/cloudstorage/api_utils.py | 11 | 12009 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Util functions and classes for cloudstorage_api."""
__all__ = ['set_default_retry_params',
'RetryParams',
]
import copy
import httplib
import logging
import math
import os
import threading
import time
import urllib
try:
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import ndb
from google.appengine.ext.ndb import eventloop
from google.appengine.ext.ndb import tasklets
from google.appengine.ext.ndb import utils
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
except ImportError:
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
from google.appengine.datastore import datastore_rpc
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext import ndb
from google.appengine.ext.ndb import eventloop
from google.appengine.ext.ndb import tasklets
from google.appengine.ext.ndb import utils
_RETRIABLE_EXCEPTIONS = (urlfetch.DownloadError,
urlfetch_errors.InternalTransientError,
apiproxy_errors.Error,
app_identity.InternalError,
app_identity.BackendDeadlineExceeded)
_thread_local_settings = threading.local()
_thread_local_settings.default_retry_params = None
def set_default_retry_params(retry_params):
"""Set a default RetryParams for current thread current request."""
_thread_local_settings.default_retry_params = copy.copy(retry_params)
def _get_default_retry_params():
"""Get default RetryParams for current request and current thread.
Returns:
A new instance of the default RetryParams.
"""
default = getattr(_thread_local_settings, 'default_retry_params', None)
if default is None or not default.belong_to_current_request():
return RetryParams()
else:
return copy.copy(default)
def _quote_filename(filename):
"""Quotes filename to use as a valid URI path.
Args:
filename: user provided filename. /bucket/filename.
Returns:
The filename properly quoted to use as URI's path component.
"""
return urllib.quote(filename)
def _unquote_filename(filename):
"""Unquotes a valid URI path back to its filename.
This is the opposite of _quote_filename.
Args:
filename: a quoted filename. /bucket/some%20filename.
Returns:
The filename unquoted.
"""
return urllib.unquote(filename)
def _should_retry(resp):
"""Given a urlfetch response, decide whether to retry that request."""
return (resp.status_code == httplib.REQUEST_TIMEOUT or
(resp.status_code >= 500 and
resp.status_code < 600))
class _RetryWrapper(object):
"""A wrapper that wraps retry logic around any tasklet."""
def __init__(self,
retry_params,
retriable_exceptions=_RETRIABLE_EXCEPTIONS,
should_retry=lambda r: False):
"""Init.
Args:
retry_params: an RetryParams instance.
retriable_exceptions: a list of exception classes that are retriable.
should_retry: a function that takes a result from the tasklet and returns
a boolean. True if the result should be retried.
"""
self.retry_params = retry_params
self.retriable_exceptions = retriable_exceptions
self.should_retry = should_retry
@ndb.tasklet
def run(self, tasklet, **kwds):
"""Run a tasklet with retry.
The retry should be transparent to the caller: if no results
are successful, the exception or result from the last retry is returned
to the caller.
Args:
tasklet: the tasklet to run.
**kwds: keywords arguments to run the tasklet.
Raises:
The exception from running the tasklet.
Returns:
The result from running the tasklet.
"""
start_time = time.time()
n = 1
while True:
e = None
result = None
got_result = False
try:
result = yield tasklet(**kwds)
got_result = True
if not self.should_retry(result):
raise ndb.Return(result)
except runtime.DeadlineExceededError:
logging.debug(
'Tasklet has exceeded request deadline after %s seconds total',
time.time() - start_time)
raise
except self.retriable_exceptions, e:
pass
if n == 1:
logging.debug('Tasklet is %r', tasklet)
delay = self.retry_params.delay(n, start_time)
if delay <= 0:
logging.debug(
'Tasklet failed after %s attempts and %s seconds in total',
n, time.time() - start_time)
if got_result:
raise ndb.Return(result)
elif e is not None:
raise e
else:
assert False, 'Should never reach here.'
if got_result:
logging.debug(
'Got result %r from tasklet.', result)
else:
logging.debug(
'Got exception "%r" from tasklet.', e)
logging.debug('Retry in %s seconds.', delay)
n += 1
yield tasklets.sleep(delay)
class RetryParams(object):
"""Retry configuration parameters."""
_DEFAULT_USER_AGENT = 'App Engine Python GCS Client'
@datastore_rpc._positional(1)
def __init__(self,
backoff_factor=2.0,
initial_delay=0.1,
max_delay=10.0,
min_retries=3,
max_retries=6,
max_retry_period=30.0,
urlfetch_timeout=None,
save_access_token=False,
_user_agent=None,
memcache_access_token=True):
"""Init.
This object is unique per request per thread.
Library will retry according to this setting when App Engine Server
can't call urlfetch, urlfetch timed out, or urlfetch got a 408 or
500-600 response.
Args:
backoff_factor: exponential backoff multiplier.
initial_delay: seconds to delay for the first retry.
max_delay: max seconds to delay for every retry.
min_retries: min number of times to retry. This value is automatically
capped by max_retries.
max_retries: max number of times to retry. Set this to 0 for no retry.
max_retry_period: max total seconds spent on retry. Retry stops when
this period passed AND min_retries has been attempted.
urlfetch_timeout: timeout for urlfetch in seconds. Could be None,
in which case the value will be chosen by urlfetch module.
save_access_token: persist access token to datastore to avoid
excessive usage of GetAccessToken API. In addition to this, the token
will be cached in process, and may also be cached in memcache (see
memcache_access_token param). However, storing in Datastore can still
be useful in the event that memcache is unavailable.
_user_agent: The user agent string that you want to use in your requests.
memcache_access_token: cache access token in memcache to avoid excessive
usage of GetAccessToken API.
"""
self.backoff_factor = self._check('backoff_factor', backoff_factor)
self.initial_delay = self._check('initial_delay', initial_delay)
self.max_delay = self._check('max_delay', max_delay)
self.max_retry_period = self._check('max_retry_period', max_retry_period)
self.max_retries = self._check('max_retries', max_retries, True, int)
self.min_retries = self._check('min_retries', min_retries, True, int)
if self.min_retries > self.max_retries:
self.min_retries = self.max_retries
self.urlfetch_timeout = None
if urlfetch_timeout is not None:
self.urlfetch_timeout = self._check('urlfetch_timeout', urlfetch_timeout)
self.save_access_token = self._check('save_access_token', save_access_token,
True, bool)
self.memcache_access_token = self._check('memcache_access_token',
memcache_access_token,
True,
bool)
self._user_agent = _user_agent or self._DEFAULT_USER_AGENT
self._request_id = os.getenv('REQUEST_LOG_ID')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def _check(cls, name, val, can_be_zero=False, val_type=float):
"""Check init arguments.
Args:
name: name of the argument. For logging purpose.
val: value. Value has to be non negative number.
can_be_zero: whether value can be zero.
val_type: Python type of the value.
Returns:
The value.
Raises:
ValueError: when invalid value is passed in.
TypeError: when invalid value type is passed in.
"""
valid_types = [val_type]
if val_type is float:
valid_types.append(int)
if type(val) not in valid_types:
raise TypeError(
'Expect type %s for parameter %s' % (val_type.__name__, name))
if val < 0:
raise ValueError(
'Value for parameter %s has to be greater than 0' % name)
if not can_be_zero and val == 0:
raise ValueError(
'Value for parameter %s can not be 0' % name)
return val
def belong_to_current_request(self):
return os.getenv('REQUEST_LOG_ID') == self._request_id
def delay(self, n, start_time):
"""Calculate delay before the next retry.
Args:
n: the number of current attempt. The first attempt should be 1.
start_time: the time when retry started in unix time.
Returns:
Number of seconds to wait before next retry. -1 if retry should give up.
"""
if (n > self.max_retries or
(n > self.min_retries and
time.time() - start_time > self.max_retry_period)):
return -1
return min(
math.pow(self.backoff_factor, n-1) * self.initial_delay,
self.max_delay)
def _run_until_rpc():
"""Eagerly evaluate tasklets until it is blocking on some RPC.
Usually ndb eventloop el isn't run until some code calls future.get_result().
When an async tasklet is called, the tasklet wrapper evaluates the tasklet
code into a generator, enqueues a callback _help_tasklet_along onto
the el.current queue, and returns a future.
_help_tasklet_along, when called by the el, will
get one yielded value from the generator. If the value if another future,
set up a callback _on_future_complete to invoke _help_tasklet_along
when the dependent future fulfills. If the value if a RPC, set up a
callback _on_rpc_complete to invoke _help_tasklet_along when the RPC fulfills.
Thus _help_tasklet_along drills down
the chain of futures until some future is blocked by RPC. El runs
all callbacks and constantly check pending RPC status.
"""
el = eventloop.get_event_loop()
while el.current:
el.run0()
def _eager_tasklet(tasklet):
"""Decorator to turn tasklet to run eagerly."""
@utils.wrapping(tasklet)
def eager_wrapper(*args, **kwds):
fut = tasklet(*args, **kwds)
_run_until_rpc()
return fut
return eager_wrapper
| bsd-3-clause | -4,413,525,626,268,419,000 | 31.991758 | 80 | 0.662586 | false |
kdani3/searx | searx/engines/bing_images.py | 3 | 2634 | """
Bing (Images)
@website https://www.bing.com/images
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
max. 5000 query/month
@using-api no (because of query limit)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, img_src
@todo currently there are up to 35 images receive per page,
because bing does not parse count=10.
limited response to 10 images
"""
from urllib import urlencode
from lxml import html
from yaml import load
import re
# engine dependent config
categories = ['images']
paging = True
safesearch = True
# search-url
base_url = 'https://www.bing.com/'
search_string = 'images/search?{query}&count=10&first={offset}'
thumb_url = "https://www.bing.com/th?id={ihk}"
# safesearch definitions
safesearch_types = {2: 'STRICT',
1: 'DEMOTE',
0: 'OFF'}
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
# required for cookie
if params['language'] == 'all':
language = 'en-US'
else:
language = params['language'].replace('_', '-')
search_path = search_string.format(
query=urlencode({'q': query}),
offset=offset)
params['cookies']['SRCHHPGUSR'] = \
'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0] +\
'&ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
params['url'] = base_url + search_path
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.content)
# init regex for yaml-parsing
p = re.compile('({|,)([a-z]+):(")')
# parse results
for result in dom.xpath('//div[@class="dg_u"]'):
link = result.xpath('./a')[0]
# parse yaml-data (it is required to add a space, to make it parsable)
yaml_data = load(p.sub(r'\1\2: \3', link.attrib.get('m')))
title = link.attrib.get('t1')
ihk = link.attrib.get('ihk')
# url = 'http://' + link.attrib.get('t3')
url = yaml_data.get('surl')
img_src = yaml_data.get('imgurl')
# append result
results.append({'template': 'images.html',
'url': url,
'title': title,
'content': '',
'thumbnail_src': thumb_url.format(ihk=ihk),
'img_src': img_src})
# TODO stop parsing if 10 images are found
if len(results) >= 10:
break
# return results
return results
| agpl-3.0 | -2,446,826,301,775,577,600 | 25.877551 | 78 | 0.563781 | false |
cjhak/b2share | invenio/modules/communities/signals.py | 14 | 3231 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
User community signals - useful for hooking into the community
creation process.
"""
from blinker import Namespace
_signals = Namespace()
before_save_collection = _signals.signal('before-save-collection')
"""
This signal is sent right before collection is saved.
Sender is the community. Extra data pass is:
* is_new
* provisional
"""
after_save_collection = _signals.signal('after-save-collection')
"""
This signal is sent right after a collection is saved.
Sender is the community. Extra data pass is:
* collection
* provisional
"""
before_save_collections = _signals.signal('before-save-collections')
"""
This signal is sent right before all collections are saved.
Sender is the community.
"""
after_save_collections = _signals.signal('after-save-collections')
"""
This signal is sent right after all collections are saved.
Sender is the community.
"""
before_delete_collection = _signals.signal('before-delete-collection')
"""
This signal is sent right before a collection is deleted.
Sender is the community. Extra data pass is:
* collection
* provisional
"""
after_delete_collection = _signals.signal('after-delete-collection')
"""
This signal is sent right after a collection is deleted.
Sender is the community. Extra data pass is:
* provisional
"""
before_delete_collections = _signals.signal('before-delete-collections')
"""
This signal is sent right before all collections are deleted.
Sender is the community.
"""
after_delete_collections = _signals.signal('after-delete-collections')
"""
This signal is sent right after all collections are deleted.
Sender is the community.
"""
pre_curation = _signals.signal('pre-curation')
"""
This signal is sent right before a record is accepted or rejected.
Sender is the user community. Extra data pass is:
* action: accept or reject
* recid: Record ID
* pretend: True if record changes is actually not persisted
"""
post_curation = _signals.signal('post-curation')
"""
This signal is sent right after a record is accepted or rejected.
Sender is the user community.
* action: accept or reject
* recid: Record ID
* record: Record which was uploaded
* pretend: True if record changes is actually not persisted
Note, the record which was accept/reject is most likely not updated
yet in the database, since bibupload has to run first.
"""
curate_record = _signals.signal('curate-record')
"""
This signal is sent right before curation process removes a record.
"""
| gpl-2.0 | 8,069,635,877,204,214,000 | 26.853448 | 74 | 0.749304 | false |
getstackd/stackd | vendor/boost-context/tools/build/v2/test/static_and_shared_library.py | 44 | 1094 | #!/usr/bin/python
# Copyright 2002, 2003 Dave Abrahams
# Copyright 2002, 2003, 2005 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", "")
t.write("lib/c.cpp", "int bar() { return 0; }\n")
t.write("lib/jamfile.jam", """\
static-lib auxilliary1 : c.cpp ;
lib auxilliary2 : c.cpp ;
""")
def reset():
t.rm("lib/bin")
t.run_build_system(subdir='lib')
t.expect_addition("lib/bin/$toolset/debug/" * BoostBuild.List("c.obj "
"auxilliary1.lib auxilliary2.dll"))
t.expect_nothing_more()
reset()
t.run_build_system(["link=shared"], subdir="lib")
t.expect_addition("lib/bin/$toolset/debug/" * BoostBuild.List("c.obj "
"auxilliary1.lib auxilliary2.dll"))
t.expect_nothing_more()
reset()
t.run_build_system(["link=static"], subdir="lib")
t.expect_addition("lib/bin/$toolset/debug/link-static/" * BoostBuild.List(
"c.obj auxilliary1.lib auxilliary2.lib"))
t.expect_nothing_more()
t.cleanup()
| mit | -6,804,140,613,158,794,000 | 27.789474 | 81 | 0.699269 | false |
indhub/mxnet | tools/coreml/converter/_mxnet_converter.py | 41 | 8850 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import _layers
import coremltools as _coremltools
import coremltools.models.datatypes as _datatypes
from coremltools.models import neural_network as _neural_network
import json as _json
import mxnet as _mxnet
import numpy as _np
_MXNET_LAYER_REGISTRY = {
'FullyConnected' : _layers.convert_dense,
'Activation' : _layers.convert_activation,
'SoftmaxOutput' : _layers.convert_softmax,
'Convolution' : _layers.convert_convolution,
'Pooling' : _layers.convert_pooling,
'Flatten' : _layers.convert_flatten,
'transpose' : _layers.convert_transpose,
'Concat' : _layers.convert_concat,
'BatchNorm' : _layers.convert_batchnorm,
'elemwise_add' : _layers.convert_elementwise_add,
'Reshape' : _layers.convert_reshape,
'Deconvolution' : _layers.convert_deconvolution,
}
_MXNET_SKIP_LAYERS = [
'_MulScalar',
'Dropout',
]
def _mxnet_remove_batch(input_data):
for blob in input_data:
input_data[blob] = _np.reshape(input_data[blob], input_data[blob].shape[1:])
return input_data
def check_error(model, path, shapes, output = 'softmax_output', verbose = True):
"""
Check the difference between predictions from MXNet and CoreML.
"""
coreml_model = _coremltools.models.MLModel(path)
input_data = {}
input_data_copy = {}
for ip in shapes:
input_data[ip] = _np.random.rand(*shapes[ip]).astype('f')
input_data_copy[ip] = _np.copy(input_data[ip])
dataIter = _mxnet.io.NDArrayIter(input_data_copy)
mx_out = model.predict(dataIter).flatten()
e_out_dict = coreml_model.predict(_mxnet_remove_batch(input_data))
e_out = e_out_dict[output].flatten()
error = _np.linalg.norm(e_out - mx_out)
if verbose:
print "First few predictions from CoreML : %s" % e_out[0:10]
print "First few predictions from MXNet : %s" % e_out[0:10]
print "L2 Error on random data %s" % error
return error
def _set_input_output_layers(builder, input_names, output_names):
input_layers_indices = []
output_layers_indices = []
layers = builder.spec.neuralNetwork.layers
for idx, l in enumerate(layers):
if set(input_names).intersection(l.input):
input_layers_indices.append(idx)
if set(output_names).intersection(l.output):
output_layers_indices.append(idx)
builder.input_layers_indices = input_layers_indices
builder.output_layers_indices = output_layers_indices
builder.input_layers_is1d = [False for _ in input_names]
builder.output_layers_is1d = [False for _ in output_names]
def _get_layer_converter_fn(layer):
"""Get the right converter function for MXNet
"""
if layer in _MXNET_LAYER_REGISTRY:
return _MXNET_LAYER_REGISTRY[layer]
else:
raise TypeError("MXNet layer of type %s is not supported." % layer)
def convert(model, input_shape, order = None, class_labels = None, mode = None, preprocessor_args = None):
"""Convert an MXNet model to the protobuf spec.
Parameters
----------
model: MXNet model
A trained MXNet neural network model.
order: Order of inputs
class_labels: A string or list of strings.
As a string it represents the name of the file which contains the classification labels (one per line).
As a list of strings it represents a list of categories that map the index of the output of a neural network to labels in a classifier.
mode: str ('classifier', 'regressor' or None)
Mode of the converted coreml model.
When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed.
When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed.
**kwargs :
Provide keyword arguments for:
- input shapes. Supplied as a dictionary object with keyword "input_shape".
- pre-processing arguments: Supplied as a dictionary object with keyword "preprocessor_args". The parameters in the dictionary
tell the converted coreml model how to pre-process any input before an inference is run on it.
For the list of pre-processing arguments see
http://pythonhosted.org/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters
Returns
-------
model: A coreml model.
"""
if not isinstance(input_shape, dict):
raise TypeError("Must provide a dictionary for input shape. e.g input_shape={'data':(3,224,224)}")
def remove_batch(dim):
return dim[1:]
if order is None:
input_names = input_shape.keys()
input_dims = map(remove_batch, input_shape.values())
else:
names = input_shape.keys()
shapes = map(remove_batch, input_shape.values())
input_names = [names[i] for i in order]
input_dims = [shapes[i] for i in order]
net = model.symbol
# Infer shapes and store in a dictionary
shapes = net.infer_shape(**input_shape)
arg_names = net.list_arguments()
output_names = net.list_outputs()
aux_names = net.list_auxiliary_states()
shape_dict = {}
for idx, op in enumerate(arg_names):
shape_dict[op] = shapes[0][idx]
for idx, op in enumerate(output_names):
shape_dict[op] = shapes[1][idx]
for idx, op in enumerate(aux_names):
shape_dict[op] = shapes[2][idx]
# Get the inputs and outputs
output_dims = shapes[1]
input_types = [_datatypes.Array(*dim) for dim in input_dims]
output_types = [_datatypes.Array(*dim) for dim in output_dims]
# Make the builder
input_features = zip(input_names, input_types)
output_features = zip(output_names, output_types)
builder = _neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
# Get out the layers
net = _json.loads(net.tojson())
nodes = net['nodes']
for i, node in enumerate(nodes):
node['id'] = i
if node['name'] in shape_dict:
node['shape'] = shape_dict[node['name']]
node['outputs'] = []
if 'inputs' in node:
for ip in node['inputs']:
nodes[ip[0]]['outputs'].append([i, 0])
else:
node['inputs'] = []
# Mark the head nodes
for head in net['heads']:
head_id = head[0]
head_node = nodes[head_id]
head_node['outputs'] = [head]
head_node['name'] += "_output"
head_node['shape'] = shape_dict[head_node['name']]
# For skipped layers, make sure nodes are modified
for node in nodes:
op = node['op']
inputs = node['inputs']
outputs = node['outputs']
if op in _MXNET_SKIP_LAYERS:
nodes[inputs[0][0]]['outputs'][0] = outputs[0]
nodes[outputs[0][0]]['inputs'][0] = inputs[0]
# Find the input and output names for this node
for idx, node in enumerate(nodes):
op = node['op']
if op == 'null' or op in _MXNET_SKIP_LAYERS:
continue
name = node['name']
print("%d : %s, %s" % (idx, name, op))
converter_func = _get_layer_converter_fn(op)
converter_func(net, node, model, builder)
# Set the right inputs and outputs
_set_input_output_layers(builder, input_names, output_names)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
if preprocessor_args is not None:
builder.set_pre_processing_parameters(**preprocessor_args)
if class_labels is not None:
if type(class_labels) is str:
labels = [l.strip() for l in open(class_labels).readlines()]
elif type(class_labels) is list:
labels = class_labels
else:
raise TypeError("synset variable of unknown type. Type found: %s. Expected either string or list of strings." % type(class_labels))
builder.set_class_labels(class_labels = labels)
# Return the model
return _coremltools.models.MLModel(builder.spec) | apache-2.0 | -6,223,411,545,413,807,000 | 37.316017 | 181 | 0.652655 | false |
apache/incubator-singa | tool/opencl/clsrc_to_str.py | 3 | 3166 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''Extract Opencl source code into c++ strings, for runtime use.
This file is executed only if .cl files are updated.
It is executed in the ROOT folder of SINGA source repo.
'''
from future.utils import iteritems
distribution = "./src/core/tensor/distribution.cl"
tensormath = "./src/core/tensor/tensor_math_opencl.cl"
im2col = "./src/model/layer/im2col.cl"
pooling = "./src/model/layer/pooling.cl"
files = {"distribution_str": distribution, "tensormath_str": tensormath,
"im2col_str": im2col, "pooling_str": pooling}
if __name__ == "__main__":
fullpath = './src/core/device/opencl_func.h'
with open(fullpath, 'w') as fout:
fout.write("// This file is auto-generated by tool/opencl/clsrc_to_str."
" do not edit manually.\n")
license = """
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
fout.write(license)
fout.write("#ifdef USE_OPENCL\n\n")
fout.write("#include <string>\n\n")
fout.write("namespace singa {\n namespace opencl {\n")
for name, path in iteritems(files):
with open(path, 'r') as fin:
src = fin.read()
src = repr(src)
src = src[1:-1]
src = src.replace('\"', '\\"') # Escape double quotes
src = src.replace('\\t', '') # Strip out tabs
fout.write("const std::string " + name + " = \"")
fout.write(src)
fout.write("\";")
fout.write("\n } // namespace opencl \n} // namespace singa\n\n")
fout.write("#endif")
fout.close()
| apache-2.0 | 8,641,011,227,584,171,000 | 41.213333 | 80 | 0.664561 | false |
AndKe/MAVProxy | MAVProxy/modules/mavproxy_mmap/mmap_server.py | 6 | 1971 | import BaseHTTPServer
import json
import os.path
import thread
import urlparse
DOC_DIR = os.path.join(os.path.dirname(__file__), 'mmap_app')
class Server(BaseHTTPServer.HTTPServer):
def __init__(self, handler, address='', port=9999, module_state=None):
BaseHTTPServer.HTTPServer.__init__(self, (address, port), handler)
self.allow_reuse_address = True
self.module_state = module_state
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
scheme, host, path, params, query, frag = urlparse.urlparse(self.path)
if path == '/data':
state = self.server.module_state
data = {'lat': state.lat,
'lon': state.lon,
'heading': state.heading,
'alt': state.alt,
'airspeed': state.airspeed,
'groundspeed': state.groundspeed}
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps(data))
else:
# Remove leading '/'.
path = path[1:]
# Ignore all directories. E.g. for ../../bar/a.txt serve
# DOC_DIR/a.txt.
unused_head, path = os.path.split(path)
# for / serve index.html.
if path == '':
path = 'index.html'
content = None
error = None
try:
import pkg_resources
name = __name__
if name == "__main__":
name = "MAVProxy.modules.mavproxy_mmap.????"
content = pkg_resources.resource_stream(name, "mmap_app/%s" % path).read()
except IOError as e:
error = str(e)
if content:
self.send_response(200)
self.end_headers()
self.wfile.write(content)
else:
self.send_response(404)
self.end_headers()
self.wfile.write('Error: %s' % (error,))
def start_server(address, port, module_state):
server = Server(
Handler, address=address, port=port, module_state=module_state)
thread.start_new_thread(server.serve_forever, ())
return server
| gpl-3.0 | 6,527,972,530,615,412,000 | 29.796875 | 82 | 0.601218 | false |
udacity/deep-learning | language-translation/problem_unittests.py | 1 | 20559 | import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
import itertools
import collections
import helper
def _print_success_message():
print('Tests Passed')
def test_text_to_ids(text_to_ids):
test_source_text = 'new jersey is sometimes quiet during autumn , and it is snowy in april .\nthe united states is usually chilly during july , and it is usually freezing in november .\ncalifornia is usually quiet during march , and it is usually hot in june .\nthe united states is sometimes mild during june , and it is cold in september .'
test_target_text = 'new jersey est parfois calme pendant l\' automne , et il est neigeux en avril .\nles états-unis est généralement froid en juillet , et il gèle habituellement en novembre .\ncalifornia est généralement calme en mars , et il est généralement chaud en juin .\nles états-unis est parfois légère en juin , et il fait froid en septembre .'
test_source_text = test_source_text.lower()
test_target_text = test_target_text.lower()
source_vocab_to_int, source_int_to_vocab = helper.create_lookup_tables(test_source_text)
target_vocab_to_int, target_int_to_vocab = helper.create_lookup_tables(test_target_text)
test_source_id_seq, test_target_id_seq = text_to_ids(test_source_text, test_target_text, source_vocab_to_int, target_vocab_to_int)
assert len(test_source_id_seq) == len(test_source_text.split('\n')),\
'source_id_text has wrong length, it should be {}.'.format(len(test_source_text.split('\n')))
assert len(test_target_id_seq) == len(test_target_text.split('\n')), \
'target_id_text has wrong length, it should be {}.'.format(len(test_target_text.split('\n')))
target_not_iter = [type(x) for x in test_source_id_seq if not isinstance(x, collections.Iterable)]
assert not target_not_iter,\
'Element in source_id_text is not iteratable. Found type {}'.format(target_not_iter[0])
target_not_iter = [type(x) for x in test_target_id_seq if not isinstance(x, collections.Iterable)]
assert not target_not_iter, \
'Element in target_id_text is not iteratable. Found type {}'.format(target_not_iter[0])
source_changed_length = [(words, word_ids)
for words, word_ids in zip(test_source_text.split('\n'), test_source_id_seq)
if len(words.split()) != len(word_ids)]
assert not source_changed_length,\
'Source text changed in size from {} word(s) to {} id(s): {}'.format(
len(source_changed_length[0][0].split()), len(source_changed_length[0][1]), source_changed_length[0][1])
target_missing_end = [word_ids for word_ids in test_target_id_seq if word_ids[-1] != target_vocab_to_int['<EOS>']]
assert not target_missing_end,\
'Missing <EOS> id at the end of {}'.format(target_missing_end[0])
target_bad_size = [(words.split(), word_ids)
for words, word_ids in zip(test_target_text.split('\n'), test_target_id_seq)
if len(word_ids) != len(words.split()) + 1]
assert not target_bad_size,\
'Target text incorrect size. {} should be length {}'.format(
target_bad_size[0][1], len(target_bad_size[0][0]) + 1)
source_bad_id = [(word, word_id)
for word, word_id in zip(
[word for sentence in test_source_text.split('\n') for word in sentence.split()],
itertools.chain.from_iterable(test_source_id_seq))
if source_vocab_to_int[word] != word_id]
assert not source_bad_id,\
'Source word incorrectly converted from {} to id {}.'.format(source_bad_id[0][0], source_bad_id[0][1])
target_bad_id = [(word, word_id)
for word, word_id in zip(
[word for sentence in test_target_text.split('\n') for word in sentence.split()],
[word_id for word_ids in test_target_id_seq for word_id in word_ids[:-1]])
if target_vocab_to_int[word] != word_id]
assert not target_bad_id,\
'Target word incorrectly converted from {} to id {}.'.format(target_bad_id[0][0], target_bad_id[0][1])
_print_success_message()
def test_model_inputs(model_inputs):
with tf.Graph().as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
# Check type
assert input_data.op.type == 'Placeholder',\
'Input is not a Placeholder.'
assert targets.op.type == 'Placeholder',\
'Targets is not a Placeholder.'
assert lr.op.type == 'Placeholder',\
'Learning Rate is not a Placeholder.'
assert keep_prob.op.type == 'Placeholder', \
'Keep Probability is not a Placeholder.'
assert target_sequence_length.op.type == 'Placeholder', \
'Target Sequence Length is not a Placeholder.'
assert max_target_sequence_length.op.type == 'Max', \
'Max Target Sequence Length is not a Max type.'
assert source_sequence_length.op.type == 'Placeholder', \
'Source Sequence Length is not a Placeholder.'
# Check name
assert input_data.name == 'input:0',\
'Input has bad name. Found name {}'.format(input_data.name)
assert target_sequence_length.name == 'target_sequence_length:0',\
'Target Sequence Length has bad name. Found name {}'.format(target_sequence_length.name)
assert source_sequence_length.name == 'source_sequence_length:0',\
'Source Sequence Length has bad name. Found name {}'.format(source_sequence_length.name)
assert keep_prob.name == 'keep_prob:0', \
'Keep Probability has bad name. Found name {}'.format(keep_prob.name)
assert tf.assert_rank(input_data, 2, message='Input data has wrong rank')
assert tf.assert_rank(targets, 2, message='Targets has wrong rank')
assert tf.assert_rank(lr, 0, message='Learning Rate has wrong rank')
assert tf.assert_rank(keep_prob, 0, message='Keep Probability has wrong rank')
assert tf.assert_rank(target_sequence_length, 1, message='Target Sequence Length has wrong rank')
assert tf.assert_rank(max_target_sequence_length, 0, message='Max Target Sequence Length has wrong rank')
assert tf.assert_rank(source_sequence_length, 1, message='Source Sequence Lengthhas wrong rank')
_print_success_message()
def test_encoding_layer(encoding_layer):
rnn_size = 512
batch_size = 64
num_layers = 3
source_sequence_len = 22
source_vocab_size = 20
encoding_embedding_size = 30
with tf.Graph().as_default():
rnn_inputs = tf.placeholder(tf.int32, [batch_size,
source_sequence_len])
source_sequence_length = tf.placeholder(tf.int32,
(None,),
name='source_sequence_length')
keep_prob = tf.placeholder(tf.float32)
enc_output, states = encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size)
assert len(states) == num_layers,\
'Found {} state(s). It should be {} states.'.format(len(states), num_layers)
bad_types = [type(state) for state in states if not isinstance(state, tf.contrib.rnn.LSTMStateTuple)]
assert not bad_types,\
'Found wrong type: {}'.format(bad_types[0])
bad_shapes = [state_tensor.get_shape()
for state in states
for state_tensor in state
if state_tensor.get_shape().as_list() not in [[None, rnn_size], [batch_size, rnn_size]]]
assert not bad_shapes,\
'Found wrong shape: {}'.format(bad_shapes[0])
_print_success_message()
def test_decoding_layer(decoding_layer):
batch_size = 64
vocab_size = 1000
embedding_size = 200
sequence_length = 22
rnn_size = 512
num_layers = 3
target_vocab_to_int = {'<EOS>': 1, '<GO>': 3}
with tf.Graph().as_default():
target_sequence_length_p = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length_p, name='max_target_len')
dec_input = tf.placeholder(tf.int32, [batch_size, sequence_length])
dec_embed_input = tf.placeholder(tf.float32, [batch_size, sequence_length, embedding_size])
dec_embeddings = tf.placeholder(tf.float32, [vocab_size, embedding_size])
keep_prob = tf.placeholder(tf.float32)
state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
encoder_state = (state, state, state)
train_decoder_output, infer_logits_output = decoding_layer( dec_input,
encoder_state,
target_sequence_length_p,
max_target_sequence_length,
rnn_size,
num_layers,
target_vocab_to_int,
vocab_size,
batch_size,
keep_prob,
embedding_size)
assert isinstance(train_decoder_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(train_decoder_output))
assert isinstance(infer_logits_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(infer_logits_output))
assert train_decoder_output.rnn_output.get_shape().as_list() == [batch_size, None, vocab_size], \
'Wrong shape returned. Found {}'.format(train_decoder_output.rnn_output.get_shape())
assert infer_logits_output.sample_id.get_shape().as_list() == [batch_size, None], \
'Wrong shape returned. Found {}'.format(infer_logits_output.sample_id.get_shape())
_print_success_message()
def test_seq2seq_model(seq2seq_model):
batch_size = 64
vocab_size = 300
embedding_size = 100
sequence_length = 22
rnn_size = 512
num_layers = 3
target_vocab_to_int = {'<EOS>': 1, '<GO>': 3}
with tf.Graph().as_default():
dec_input = tf.placeholder(tf.int32, [batch_size, sequence_length])
dec_embed_input = tf.placeholder(tf.float32, [batch_size, sequence_length, embedding_size])
dec_embeddings = tf.placeholder(tf.float32, [vocab_size, embedding_size])
keep_prob = tf.placeholder(tf.float32)
enc_state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
input_data = tf.placeholder(tf.int32, [batch_size, sequence_length])
target_data = tf.placeholder(tf.int32, [batch_size, sequence_length])
keep_prob = tf.placeholder(tf.float32)
source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
target_sequence_length_p = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length_p, name='max_target_len')
train_decoder_output, infer_logits_output = seq2seq_model( input_data,
target_data,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length_p,
max_target_sequence_length,
vocab_size,
vocab_size,
embedding_size,
embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
# input_data, target_data, keep_prob, batch_size, sequence_length,
# 200, target_vocab_size, 64, 80, rnn_size, num_layers, target_vocab_to_int)
assert isinstance(train_decoder_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(train_decoder_output))
assert isinstance(infer_logits_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(infer_logits_output))
assert train_decoder_output.rnn_output.get_shape().as_list() == [batch_size, None, vocab_size], \
'Wrong shape returned. Found {}'.format(train_decoder_output.rnn_output.get_shape())
assert infer_logits_output.sample_id.get_shape().as_list() == [batch_size, None], \
'Wrong shape returned. Found {}'.format(infer_logits_output.sample_id.get_shape())
_print_success_message()
def test_sentence_to_seq(sentence_to_seq):
sentence = 'this is a test sentence'
vocab_to_int = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, 'this': 3, 'is': 6, 'a': 5, 'sentence': 4}
output = sentence_to_seq(sentence, vocab_to_int)
assert len(output) == 5,\
'Wrong length. Found a length of {}'.format(len(output))
assert output[3] == 2,\
'Missing <UNK> id.'
assert np.array_equal(output, [3, 6, 5, 2, 4]),\
'Incorrect ouput. Found {}'.format(output)
_print_success_message()
def test_process_encoding_input(process_encoding_input):
batch_size = 2
seq_length = 3
target_vocab_to_int = {'<GO>': 3}
with tf.Graph().as_default():
target_data = tf.placeholder(tf.int32, [batch_size, seq_length])
dec_input = process_encoding_input(target_data, target_vocab_to_int, batch_size)
assert dec_input.get_shape() == (batch_size, seq_length),\
'Wrong shape returned. Found {}'.format(dec_input.get_shape())
test_target_data = [[10, 20, 30], [40, 18, 23]]
with tf.Session() as sess:
test_dec_input = sess.run(dec_input, {target_data: test_target_data})
assert test_dec_input[0][0] == target_vocab_to_int['<GO>'] and\
test_dec_input[1][0] == target_vocab_to_int['<GO>'],\
'Missing GO Id.'
_print_success_message()
def test_decoding_layer_train(decoding_layer_train):
batch_size = 64
vocab_size = 1000
embedding_size = 200
sequence_length = 22
rnn_size = 512
num_layers = 3
with tf.Graph().as_default():
with tf.variable_scope("decoding") as decoding_scope:
# dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
dec_embed_input = tf.placeholder(tf.float32, [batch_size, sequence_length, embedding_size])
keep_prob = tf.placeholder(tf.float32)
target_sequence_length_p = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length_p, name='max_target_len')
for layer in range(num_layers):
with tf.variable_scope('decoder_{}'.format(layer)):
lstm = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
dec_cell = tf.contrib.rnn.DropoutWrapper(lstm,
input_keep_prob=keep_prob)
output_layer = Dense(vocab_size,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
name='output_layer')
# output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
encoder_state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
train_decoder_output = decoding_layer_train(encoder_state, dec_cell,
dec_embed_input,
target_sequence_length_p,
max_target_sequence_length,
output_layer,
keep_prob)
# encoder_state, dec_cell, dec_embed_input, sequence_length,
# decoding_scope, output_fn, keep_prob)
assert isinstance(train_decoder_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(train_decoder_output))
assert train_decoder_output.rnn_output.get_shape().as_list() == [batch_size, None, vocab_size], \
'Wrong shape returned. Found {}'.format(train_decoder_output.rnn_output.get_shape())
_print_success_message()
def test_decoding_layer_infer(decoding_layer_infer):
batch_size = 64
vocab_size = 1000
sequence_length = 22
embedding_size = 200
rnn_size = 512
num_layers = 3
with tf.Graph().as_default():
with tf.variable_scope("decoding") as decoding_scope:
dec_embeddings = tf.Variable(tf.random_uniform([vocab_size, embedding_size]))
dec_embed_input = tf.placeholder(tf.float32, [batch_size, sequence_length, embedding_size])
keep_prob = tf.placeholder(tf.float32)
target_sequence_length_p = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length_p, name='max_target_len')
for layer in range(num_layers):
with tf.variable_scope('decoder_{}'.format(layer)):
lstm = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
dec_cell = tf.contrib.rnn.DropoutWrapper(lstm,
input_keep_prob=keep_prob)
output_layer = Dense(vocab_size,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
name='output_layer')
# output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
encoder_state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
infer_logits_output = decoding_layer_infer( encoder_state,
dec_cell,
dec_embeddings,
1,
2,
max_target_sequence_length,
vocab_size,
output_layer,
batch_size,
keep_prob)
# encoder_state, dec_cell, dec_embeddings, 10, 20,
# sequence_length, vocab_size, decoding_scope, output_fn, keep_prob)
assert isinstance(infer_logits_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(infer_logits_output))
assert infer_logits_output.sample_id.get_shape().as_list() == [batch_size, None], \
'Wrong shape returned. Found {}'.format(infer_logits_output.sample_id.get_shape())
_print_success_message()
| mit | -374,803,904,249,020,350 | 48.753027 | 357 | 0.564726 | false |
runtimejs/runtime | deps/v8/tools/release/merge_to_branch.py | 13 | 10549 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from collections import OrderedDict
import sys
from common_includes import *
def IsSvnNumber(rev):
return rev.isdigit() and len(rev) < 8
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")):
if self._options.force:
os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE"))
elif self._options.step == 0: # pragma: no cover
self.Die("A merge is already in progress")
open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
self.InitialEnvironmentChecks(self.default_cwd)
if self._options.branch:
self["merge_to_branch"] = self._options.branch
else: # pragma: no cover
self.Die("Please specify a branch to merge to")
self.CommonPrepare()
self.PrepareBranch()
class CreateBranch(Step):
MESSAGE = "Create a fresh branch for the patch."
def RunStep(self):
self.GitCreateBranch(self.Config("BRANCHNAME"),
self.vc.RemoteBranch(self["merge_to_branch"]))
class SearchArchitecturePorts(Step):
MESSAGE = "Search for corresponding architecture ports."
def RunStep(self):
self["full_revision_list"] = list(OrderedDict.fromkeys(
self._options.revisions))
port_revision_list = []
for revision in self["full_revision_list"]:
# Search for commits which matches the "Port XXX" pattern.
git_hashes = self.GitLog(reverse=True, format="%H",
grep="Port %s" % revision,
branch=self.vc.RemoteMasterBranch())
for git_hash in git_hashes.splitlines():
revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
# Is this revision included in the original revision list?
if git_hash in self["full_revision_list"]:
print("Found port of %s -> %s (already included): %s"
% (revision, git_hash, revision_title))
else:
print("Found port of %s -> %s: %s"
% (revision, git_hash, revision_title))
port_revision_list.append(git_hash)
# Do we find any port?
if len(port_revision_list) > 0:
if self.Confirm("Automatically add corresponding ports (%s)?"
% ", ".join(port_revision_list)):
#: 'y': Add ports to revision list.
self["full_revision_list"].extend(port_revision_list)
class CreateCommitMessage(Step):
MESSAGE = "Create commit message."
def RunStep(self):
# Stringify: ["abcde", "12345"] -> "abcde, 12345"
self["revision_list"] = ", ".join(self["full_revision_list"])
if not self["revision_list"]: # pragma: no cover
self.Die("Revision list is empty.")
action_text = "Merged %s"
# The commit message title is added below after the version is specified.
msg_pieces = [
"\n".join(action_text % s for s in self["full_revision_list"]),
]
msg_pieces.append("\n\n")
for commit_hash in self["full_revision_list"]:
patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash)
msg_pieces.append("%s\n\n" % patch_merge_desc)
bugs = []
for commit_hash in self["full_revision_list"]:
msg = self.GitLog(n=1, git_hash=commit_hash)
for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg, re.M):
bugs.extend(s.strip() for s in bug.split(","))
bug_aggregate = ",".join(sorted(filter(lambda s: s and s != "none", bugs)))
if bug_aggregate:
msg_pieces.append("BUG=%s\nLOG=N\n" % bug_aggregate)
self["new_commit_msg"] = "".join(msg_pieces)
class ApplyPatches(Step):
MESSAGE = "Apply patches for selected revisions."
def RunStep(self):
for commit_hash in self["full_revision_list"]:
print("Applying patch for %s to %s..."
% (commit_hash, self["merge_to_branch"]))
patch = self.GitGetPatch(commit_hash)
TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"))
if self._options.patch:
self.ApplyPatch(self._options.patch)
class PrepareVersion(Step):
MESSAGE = "Prepare version file."
def RunStep(self):
# This is used to calculate the patch level increment.
self.ReadAndPersistVersion()
class IncrementVersion(Step):
MESSAGE = "Increment version number."
def RunStep(self):
new_patch = str(int(self["patch"]) + 1)
if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will "
"fire up your EDITOR on %s so you can make arbitrary "
"changes. When you're done, save the file and exit your "
"EDITOR.)" % VERSION_FILE):
text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
text = MSub(r"(?<=#define V8_PATCH_LEVEL)(?P<space>\s+)\d*$",
r"\g<space>%s" % new_patch,
text)
TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
else:
self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
self.ReadAndPersistVersion("new_")
self["version"] = "%s.%s.%s.%s" % (self["new_major"],
self["new_minor"],
self["new_build"],
self["new_patch"])
class CommitLocal(Step):
MESSAGE = "Commit to local branch."
def RunStep(self):
# Add a commit message title.
self["commit_title"] = "Version %s (cherry-pick)" % self["version"]
self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"],
self["new_commit_msg"])
TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
class CommitRepository(Step):
MESSAGE = "Commit to the repository."
def RunStep(self):
self.GitCheckout(self.Config("BRANCHNAME"))
self.WaitForLGTM()
self.GitPresubmit()
self.vc.CLLand()
class TagRevision(Step):
MESSAGE = "Create the tag."
def RunStep(self):
print "Creating tag %s" % self["version"]
self.vc.Tag(self["version"],
self.vc.RemoteBranch(self["merge_to_branch"]),
self["commit_title"])
class CleanUp(Step):
MESSAGE = "Cleanup."
def RunStep(self):
self.CommonCleanup()
print "*** SUMMARY ***"
print "version: %s" % self["version"]
print "branch: %s" % self["merge_to_branch"]
if self["revision_list"]:
print "patches: %s" % self["revision_list"]
class MergeToBranch(ScriptsBase):
def _Description(self):
return ("Performs the necessary steps to merge revisions from "
"master to other branches, including candidates.")
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--branch", help="The branch to merge to.")
parser.add_argument("revisions", nargs="*",
help="The revisions to merge.")
parser.add_argument("-f", "--force",
help="Delete sentinel file.",
default=False, action="store_true")
parser.add_argument("-m", "--message",
help="A commit message for the patch.")
parser.add_argument("-p", "--patch",
help="A patch file to apply as part of the merge.")
def _ProcessOptions(self, options):
if len(options.revisions) < 1:
if not options.patch:
print "Either a patch file or revision numbers must be specified"
return False
if not options.message:
print "You must specify a merge comment if no patches are specified"
return False
options.bypass_upload_hooks = True
# CC ulan to make sure that fixes are merged to Google3.
options.cc = "[email protected]"
# Make sure to use git hashes in the new workflows.
for revision in options.revisions:
if (IsSvnNumber(revision) or
(revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
print "Please provide full git hashes of the patches to merge."
print "Got: %s" % revision
return False
return True
def _Config(self):
return {
"BRANCHNAME": "prepare-merge",
"PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
"ALREADY_MERGING_SENTINEL_FILE":
"/tmp/v8-merge-to-branch-tempfile-already-merging",
"TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
"COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
}
def _Steps(self):
return [
Preparation,
CreateBranch,
SearchArchitecturePorts,
CreateCommitMessage,
ApplyPatches,
PrepareVersion,
IncrementVersion,
CommitLocal,
UploadStep,
CommitRepository,
TagRevision,
CleanUp,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(MergeToBranch().Run())
| apache-2.0 | 7,732,654,756,987,864,000 | 35.375862 | 79 | 0.635226 | false |
molobrakos/home-assistant | homeassistant/components/websocket_api/http.py | 5 | 6900 | """View to accept incoming websocket connection."""
import asyncio
from contextlib import suppress
from functools import partial
import json
import logging
from aiohttp import web, WSMsgType
import async_timeout
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from homeassistant.helpers.json import JSONEncoder
from .const import (
MAX_PENDING_MSG, CANCELLATION_ERRORS, URL, ERR_UNKNOWN_ERROR,
SIGNAL_WEBSOCKET_CONNECTED, SIGNAL_WEBSOCKET_DISCONNECTED,
DATA_CONNECTIONS)
from .auth import AuthPhase, auth_required_message
from .error import Disconnect
from .messages import error_message
JSON_DUMP = partial(json.dumps, cls=JSONEncoder, allow_nan=False)
class WebsocketAPIView(HomeAssistantView):
"""View to serve a websockets endpoint."""
name = "websocketapi"
url = URL
requires_auth = False
async def get(self, request):
"""Handle an incoming websocket connection."""
return await WebSocketHandler(
request.app['hass'], request).async_handle()
class WebSocketHandler:
"""Handle an active websocket client connection."""
def __init__(self, hass, request):
"""Initialize an active connection."""
self.hass = hass
self.request = request
self.wsock = None
self._to_write = asyncio.Queue(maxsize=MAX_PENDING_MSG, loop=hass.loop)
self._handle_task = None
self._writer_task = None
self._logger = logging.getLogger(
"{}.connection.{}".format(__name__, id(self)))
async def _writer(self):
"""Write outgoing messages."""
# Exceptions if Socket disconnected or cancelled by connection handler
with suppress(RuntimeError, ConnectionResetError,
*CANCELLATION_ERRORS):
while not self.wsock.closed:
message = await self._to_write.get()
if message is None:
break
self._logger.debug("Sending %s", message)
try:
await self.wsock.send_json(message, dumps=JSON_DUMP)
except (ValueError, TypeError) as err:
self._logger.error('Unable to serialize to JSON: %s\n%s',
err, message)
await self.wsock.send_json(error_message(
message['id'], ERR_UNKNOWN_ERROR,
'Invalid JSON in response'))
@callback
def _send_message(self, message):
"""Send a message to the client.
Closes connection if the client is not reading the messages.
Async friendly.
"""
try:
self._to_write.put_nowait(message)
except asyncio.QueueFull:
self._logger.error("Client exceeded max pending messages [2]: %s",
MAX_PENDING_MSG)
self._cancel()
@callback
def _cancel(self):
"""Cancel the connection."""
self._handle_task.cancel()
self._writer_task.cancel()
async def async_handle(self):
"""Handle a websocket response."""
request = self.request
wsock = self.wsock = web.WebSocketResponse(heartbeat=55)
await wsock.prepare(request)
self._logger.debug("Connected")
# Py3.7+
if hasattr(asyncio, 'current_task'):
# pylint: disable=no-member
self._handle_task = asyncio.current_task()
else:
self._handle_task = asyncio.Task.current_task(loop=self.hass.loop)
@callback
def handle_hass_stop(event):
"""Cancel this connection."""
self._cancel()
unsub_stop = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, handle_hass_stop)
self._writer_task = self.hass.async_create_task(self._writer())
auth = AuthPhase(self._logger, self.hass, self._send_message, request)
connection = None
disconnect_warn = None
try:
self._send_message(auth_required_message())
# Auth Phase
try:
with async_timeout.timeout(10):
msg = await wsock.receive()
except asyncio.TimeoutError:
disconnect_warn = \
'Did not receive auth message within 10 seconds'
raise Disconnect
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
raise Disconnect
if msg.type != WSMsgType.TEXT:
disconnect_warn = 'Received non-Text message.'
raise Disconnect
try:
msg = msg.json()
except ValueError:
disconnect_warn = 'Received invalid JSON.'
raise Disconnect
self._logger.debug("Received %s", msg)
connection = await auth.async_handle(msg)
self.hass.data[DATA_CONNECTIONS] = \
self.hass.data.get(DATA_CONNECTIONS, 0) + 1
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_CONNECTED)
# Command phase
while not wsock.closed:
msg = await wsock.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
break
elif msg.type != WSMsgType.TEXT:
disconnect_warn = 'Received non-Text message.'
break
try:
msg = msg.json()
except ValueError:
disconnect_warn = 'Received invalid JSON.'
break
self._logger.debug("Received %s", msg)
connection.async_handle(msg)
except asyncio.CancelledError:
self._logger.info("Connection closed by client")
except Disconnect:
pass
except Exception: # pylint: disable=broad-except
self._logger.exception("Unexpected error inside websocket API")
finally:
unsub_stop()
if connection is not None:
connection.async_close()
try:
self._to_write.put_nowait(None)
# Make sure all error messages are written before closing
await self._writer_task
except asyncio.QueueFull:
self._writer_task.cancel()
await wsock.close()
if disconnect_warn is None:
self._logger.debug("Disconnected")
else:
self._logger.warning("Disconnected: %s", disconnect_warn)
self.hass.data[DATA_CONNECTIONS] -= 1
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_DISCONNECTED)
return wsock
| apache-2.0 | -9,204,397,512,361,476,000 | 32.333333 | 79 | 0.572754 | false |
Renzo-Olivares/android_kk_kernel_htc_monarudo | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 | 2,773,450,650,529,025,500 | 37.930451 | 162 | 0.572498 | false |
CopeX/odoo | addons/account/report/account_analytic_entries_report.py | 306 | 3879 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class analytic_entries_report(osv.osv):
_name = "analytic.entries.report"
_description = "Analytic Entries Statistics"
_auto = False
_columns = {
'date': fields.date('Date', readonly=True),
'user_id': fields.many2one('res.users', 'User',readonly=True),
'name': fields.char('Description', size=64, readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'currency_id': fields.many2one('res.currency', 'Currency', required=True),
'account_id': fields.many2one('account.analytic.account', 'Account', required=False),
'general_account_id': fields.many2one('account.account', 'General Account', required=True),
'journal_id': fields.many2one('account.analytic.journal', 'Journal', required=True),
'move_id': fields.many2one('account.move.line', 'Move', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'amount': fields.float('Amount', readonly=True),
'unit_amount': fields.integer('Unit Amount', readonly=True),
'nbr': fields.integer('# Entries', readonly=True), # TDE FIXME master: rename into nbr_entries
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'analytic_entries_report')
cr.execute("""
create or replace view analytic_entries_report as (
select
min(a.id) as id,
count(distinct a.id) as nbr,
a.date as date,
a.user_id as user_id,
a.name as name,
analytic.partner_id as partner_id,
a.company_id as company_id,
a.currency_id as currency_id,
a.account_id as account_id,
a.general_account_id as general_account_id,
a.journal_id as journal_id,
a.move_id as move_id,
a.product_id as product_id,
a.product_uom_id as product_uom_id,
sum(a.amount) as amount,
sum(a.unit_amount) as unit_amount
from
account_analytic_line a, account_analytic_account analytic
where analytic.id = a.account_id
group by
a.date, a.user_id,a.name,analytic.partner_id,a.company_id,a.currency_id,
a.account_id,a.general_account_id,a.journal_id,
a.move_id,a.product_id,a.product_uom_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,598,718,204,987,262,500 | 49.376623 | 103 | 0.575406 | false |
angad/libjingle-mac | scons-2.2.0/engine/SCons/Tool/ipkg.py | 14 | 2532 | """SCons.Tool.ipkg
Tool-specific initialization for ipkg.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The ipkg tool calls the ipkg-build. Its only argument should be the
packages fake_root.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ipkg.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os
import SCons.Builder
def generate(env):
"""Add Builders and construction variables for ipkg to an Environment."""
try:
bld = env['BUILDERS']['Ipkg']
except KeyError:
bld = SCons.Builder.Builder( action = '$IPKGCOM',
suffix = '$IPKGSUFFIX',
source_scanner = None,
target_scanner = None)
env['BUILDERS']['Ipkg'] = bld
env['IPKG'] = 'ipkg-build'
env['IPKGCOM'] = '$IPKG $IPKGFLAGS ${SOURCE}'
env['IPKGUSER'] = os.popen('id -un').read().strip()
env['IPKGGROUP'] = os.popen('id -gn').read().strip()
env['IPKGFLAGS'] = SCons.Util.CLVar('-o $IPKGUSER -g $IPKGGROUP')
env['IPKGSUFFIX'] = '.ipk'
def exists(env):
return env.Detect('ipkg-build')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause | -336,516,745,691,473,700 | 36.791045 | 107 | 0.685229 | false |
SebDieBln/QGIS | python/ext-libs/pytz/exceptions.py | 657 | 1333 | '''
Custom exceptions raised by pytz.
'''
__all__ = [
'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError',
'NonExistentTimeError',
]
class UnknownTimeZoneError(KeyError):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
'''
pass
class InvalidTimeError(Exception):
'''Base class for invalid time exceptions.'''
class AmbiguousTimeError(InvalidTimeError):
'''Exception raised when attempting to create an ambiguous wallclock time.
At the end of a DST transition period, a particular wallclock time will
occur twice (once before the clocks are set back, once after). Both
possibilities may be correct, unless further information is supplied.
See DstTzInfo.normalize() for more info
'''
class NonExistentTimeError(InvalidTimeError):
'''Exception raised when attempting to create a wallclock time that
cannot exist.
At the start of a DST transition period, the wallclock time jumps forward.
The instants jumped over never occur.
'''
| gpl-2.0 | -8,459,153,895,597,767,000 | 26.770833 | 78 | 0.726932 | false |
rajathkumarmp/BinPy | BinPy/algorithms/ExpressionConvert.py | 4 | 9668 | def makeCompatible(expr):
'''Used by convertExpression to convert logical operators to english words.'''
expr = expr.replace('~&', ' NAND ')
expr = expr.replace('~|', ' NOR ')
expr = expr.replace('~^', ' XNOR ')
expr = expr.replace('&', ' AND ')
expr = expr.replace('|', ' OR ')
expr = expr.replace('~', ' NOT ')
expr = expr.replace('^', ' XOR ')
return '((' + expr + '))'
def createList(expr):
'''Creates a list which can be used by convertExpression for conversion.'''
list1 = expr.split('(')
list2 = []
list3 = []
while ('' in list1):
list1.remove('')
for string in list1:
l = string.split()
list2.extend(l)
for string in list2:
sublist = []
if ')' in string:
while ')' in string:
index = string.find(')')
sublist.append(string[:index])
sublist.append(')')
string = string[index + 1:]
sublist.append(string)
list3.extend(sublist)
else:
list3.extend([string])
while ('' in list3):
list3.remove('')
return (list3)
def mergeNot(case, expr):
'''Combines NOR gate with othes to minimize the number of gates used.'''
if expr[-1] == ')':
index = expr.find('(')
gate = expr[:index].upper()
if gate == 'OR' and case == 'general':
return 'NOR' + expr[index:]
elif gate == 'AND' and case == 'general':
return 'NAND' + expr[index:]
elif gate == 'NOT':
return expr[index + 1:-1]
elif gate == 'XOR'and case == 'general':
return 'XNOR' + expr[index:]
elif gate == 'XNOR'and case == 'general':
return 'XOR' + expr[index:]
elif gate == 'NAND'and case == 'general':
return 'AND' + expr[index:]
elif gate == 'NOR'and case == 'general':
return 'OR' + expr[index:]
return 'NOT(' + expr + ')'
def to_and_or_not(gate, op1, op2):
'''Converts a general two input gate and two of its operands to use only OR, NOT, or AND gates'''
if gate == 'AND' or gate == 'OR':
return gate + '(' + op1 + ', ' + op2 + ')'
elif gate == 'NAND':
return 'NOT(AND(' + '(' + op1 + ', ' + op2 + ')'
elif gate == 'NOR':
return 'NOT(OR(' + '(' + op1 + ', ' + op2 + ')'
elif gate == 'XOR':
return ('OR(AND(' + op1 + ', ' + mergeNot('general', op2)
+ '), AND(' + mergeNot('general', op1) + ', ' + op2 + '))')
elif gate == 'XNOR':
return (
'OR(AND(' +
mergeNot(
'general',
op1) +
', ' +
mergeNot(
'general',
op2) +
'), AND(' +
op1 +
', ' +
op2 +
'))')
def to_nand(gate, op1, op2):
'''Converts a general two input gate and two of its operands to use only NAND gates'''
if gate == 'AND':
return 'NOT(NAND(' + op1 + ', ' + op2 + '))'
elif gate == 'OR':
return ('NAND(' + mergeNot('special', op1) + ', '
+ mergeNot('special', op2) + ')')
elif gate == 'NAND':
return gate + '(' + op1 + ', ' + op2 + ')'
elif gate == 'NOR':
return 'NOT(' + to_nand('OR', op1, op2) + ')'
elif gate == 'XOR':
return (
'NAND(NAND(' +
op1 +
', NAND(' +
op1 +
', ' +
op2 +
')), NAND(' +
op2 +
', NAND(' +
op1 +
', ' +
op2 +
')))')
elif gate == 'XNOR':
return 'NOT(' + to_nand('XOR', op1, op2) + ')'
def to_nor(gate, op1, op2):
'''Converts a general two input gate and two of its operands to use only NOR gates'''
if gate == 'OR':
return 'NOT(NOR(' + op1 + ', ' + op2 + '))'
elif gate == 'AND':
return ('NOR(' + mergeNot('special', op1) + ', '
+ mergeNot('special', op2) + ')')
elif gate == 'NOR':
return gate + '(' + op1 + ', ' + op2 + ')'
elif gate == 'NAND':
return 'NOT(' + to_nor('AND', op1, op2) + ')'
elif gate == 'XNOR':
return ('NOR(NOR(' + op1 + ', NOR(' + op1 + ', '
+ op2 + ')), NOR(' + op2 + ', NOR(' + op1 + ', ' + op2 + ')))')
elif gate == 'XOR':
return 'NOT(' + to_nor('XNOR', op1, op2) + ')'
def remove_not(gate, exp):
'''Converts a NOT gate and its operand to use the specified gate only.
The input gate must be NAND or NOR only.'''
while 'NOT' in exp:
index = exp.find('NOT(')
index2 = index
index3 = exp.find('(', index)
while True:
index2 = exp.find(')', index2 + 1)
index3 = exp.find('(', index3 + 1)
if index3 == -1 or index3 > index2:
break
exp = exp[:index] + gate + '(' + exp[index + 4:index2] + \
', ' + exp[index + 4:index2] + ')' + exp[index2 + 1:]
return exp
def convertExpression(expr, two_input=0, only_nand=0,
only_nor=0, only_and_or_not=0):
''' Converts logical expression to an implementable form.
Make two_input 1 if only two input gates must be used.
Make only_nand 1 if only 2 input nand gates must be used.
Make only_nor 1 if only 2 input nor gates must be used.
Make only_and_or_not 1 if only 2 input AND, OR and NOTs be used.
Error occurs if more than one variable is put to 1.
convertExpression('( NOT(a) and NOT(b)) or (C and Not(d) and E and F)')
OR(AND(NOT(a), NOT(b)), AND(C, NOT(d), E, F))
convertExpression('( NOT(a) and NOT(b)) or (C and Not(d) and E and F)', two_input=1)
OR(AND(NOT(a), NOT(b)), AND(C, AND(NOT(d), E)))
convertExpression('( NOT(a) and NOT(b)) or (C and Not(d) and E and F)', only_nand=1)
NAND(NAND(NAND(a, a), NAND(b, b)), NAND(C, NAND(NAND(NAND(d, d), E), NAND(NAND(d, d), E))))
convertExpression('( NOT(a) and NOT(b)) or (C and Not(d) and E and F)', only_nor=1)
NOR(NOR(NOR(a, b), NOR(NOR(C, C), NOR(NOR(d, NOR(E, E)),...
NOR(d, NOR(E, E))))), NOR(NOR(a, b), NOR(NOR(C, C), NOR(NOR(d, NOR(E, E)), NOR(d, NOR(E, E))))))
convertExpression('( NOT(a) and NOT(b)) or (C and Not(d) and E and F)', only_and_or_not=1)
OR(AND(NOT(a), NOT(b)), AND(C, AND(NOT(d), AND(E, F))))
'''
expr = makeCompatible(expr)
list1 = createList(expr)
while ')' in list1:
index = list1.index(')')
if index != len(list1) - 1 and list1[index + 1] == ')':
last = 0
else:
last = 1
if len(list1) > 1:
op2 = list1.pop(index - 1)
gate = list1.pop(index - 2)
gate = gate.upper()
if gate != 'NOT':
try:
op1 = list1.pop(index - 3)
except:
list1.insert(index - 1, gate)
list1.insert(index - 2, op2)
break
previous_gate = op1[:len(gate)]
previous_gate = previous_gate.upper()
next_gate = op2[:len(gate)]
next_gate = next_gate.upper()
if (two_input == 0 and gate != 'NAND'and gate != 'NOR')and (
only_nand == 0 and only_nor == 0 and only_and_or_not == 0):
if (gate == previous_gate) and (gate == next_gate.upper()):
new_element = gate + \
'(' + op1[len(gate) + 1:-1] + \
', ' + op2[len(gate) + 1:-1] + ')'
elif (gate == previous_gate) and (gate != next_gate.upper()):
new_element = gate + \
'(' + op1[len(gate) + 1:-1] + ', ' + op2 + ')'
elif (gate != previous_gate) and (gate == next_gate.upper()):
new_element = gate + \
'(' + op1 + ', ' + op2[len(gate) + 1:-1] + ')'
else:
new_element = gate + '(' + op1 + ', ' + op2 + ')'
else:
if only_nand == 0 and only_nor == 0 and only_and_or_not == 0:
new_element = gate + '(' + op1 + ', ' + op2 + ')'
elif only_nand == 1 and only_nor == 0 and only_and_or_not == 0:
new_element = to_nand(gate, op1, op2)
elif only_nand == 0 and only_nor == 1 and only_and_or_not == 0:
new_element = to_nor(gate, op1, op2)
elif only_nand == 0 and only_nor == 0 and only_and_or_not == 1:
new_element = to_and_or_not(gate, op1, op2)
else:
raise Exception("Invalid Input")
list1.insert(index - 3, new_element)
if (last != 1) or list1.index(')') == 1:
temp1 = list1.index(')')
temp2 = list1.pop(temp1)
else:
if only_nand == 0 and only_nor == 0 and only_and_or_not == 0:
new_element = mergeNot('general', op2)
else:
new_element = mergeNot('special', op2)
list1.insert(index - 2, new_element)
temp1 = list1.index(')')
temp2 = list1.pop(temp1)
if list1.count(')') == len(list1) - 1:
break
if only_nand == 1:
return (remove_not('NAND', list1[0]))
elif only_nor == 1:
return (remove_not('NOR', list1[0]))
else:
return (list1[0])
| bsd-3-clause | 4,081,134,234,751,134,700 | 37.672 | 101 | 0.450248 | false |
AnishShah/tensorflow | tensorflow/python/data/util/nest.py | 17 | 17827 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
NOTE(mrry): This fork of the `tensorflow.python.util.nest` module
makes two changes:
1. It removes support for lists as a level of nesting in nested structures.
2. It adds support for `SparseTensorValue` as an atomic element.
The motivation for this change is twofold:
1. It seems more natural for lists to be treated (e.g. in Dataset constructors)
as tensors, rather than lists of (lists of...) tensors.
2. This is needed because `SparseTensorValue` is implemented as a `namedtuple`
that would normally be flattened and we want to be able to create sparse
tensor from `SparseTensorValue's similarly to creating tensors from numpy
arrays.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.framework import sparse_tensor as _sparse_tensor
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(_six.iterkeys(dict_))
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if isinstance(instance, dict):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
return type(instance)((key, result[key]) for key in _six.iterkeys(instance))
elif (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, _collections.Sequence) and
all(isinstance(f, _six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
if isinstance(iterable, dict):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield iterable[key]
elif isinstance(iterable, _sparse_tensor.SparseTensorValue):
yield iterable
else:
for value in iterable:
yield value
# See the swig file (../../util/util.i) for documentation.
is_sequence = _pywrap_tensorflow.IsSequenceForData
# See the swig file (../../util/util.i) for documentation.
flatten = _pywrap_tensorflow.FlattenForData
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences should be same as
well. For dictionary, "type" of dictionary is considered to include its
keys. In other words, two dictionaries with different keys are considered
to have a different "type". If set to `False`, two iterables are
considered same as long as they yield the elements that have same
structures.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
_pywrap_tensorflow.AssertSameStructureForData(nest1, nest2, check_types)
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in _yield_value(structure):
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not (is_sequence(flat_sequence) or isinstance(flat_sequence, list)):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that accepts as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is check_types")
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
if check_types and isinstance(shallow_tree, dict):
if set(input_tree) != set(shallow_tree):
raise ValueError(
"The two structures don't have the same keys. Input "
"structure has keys %s, while shallow structure has keys %s." %
(list(_six.iterkeys(input_tree)),
list(_six.iterkeys(shallow_tree))))
input_tree = list(sorted(_six.iteritems(input_tree)))
shallow_tree = list(sorted(_six.iteritems(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function, therefore, will return something with the same base structure
as `shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
| apache-2.0 | 5,682,351,320,448,373,000 | 36.530526 | 80 | 0.679082 | false |
zielmicha/pam_ssh | pam_ssh.py | 1 | 1482 | import os
import sys
import pwd
import socket
import json
auth_token = None
def rpc(name, args):
sock = socket.socket(socket.AF_UNIX)
sock.connect('/var/run/pam_ssh.sock')
f = sock.makefile('r+')
f.write(json.dumps([name, args]) + '\n')
f.flush()
resp = int(f.readline())
return resp
def pam_sm_authenticate(pamh, flags, argv):
global auth_token
username = pamh.get_user()
pw = pwd.getpwnam(username)
if pw.pw_uid < 1000:
return pamh.PAM_AUTH_ERR
auth_token = pamh.authtok
if len(auth_token) > 1024:
return pamh.PAM_AUTH_ERR
if not auth_token:
return pamh.PAM_AUTH_ERR
code = rpc('auth', dict(user=username, auth_token=auth_token))
if code == 0:
return pamh.PAM_SUCCESS
else:
return pamh.PAM_AUTH_ERR
def pam_sm_setcred(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_acct_mgmt(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_open_session(pamh, flags, argv):
user = pamh.get_user()
pw = pwd.getpwnam(user)
token = auth_token
if pw.pw_uid < 1000:
return pamh.PAM_SUCCESS
code = rpc('open_session', dict(user=user,
auth_token=auth_token))
if code == 0:
return pamh.PAM_SUCCESS
else:
return pamh.PAM_AUTH_ERR
def pam_sm_close_session(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_chauthtok(pamh, flags, argv):
return pamh.PAM_SUCCESS
| mit | -6,009,037,723,151,387,000 | 21.454545 | 66 | 0.620108 | false |
nkrishnaswami/census | uscensus/data/whooshindex.py | 1 | 3248 | from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from whoosh.analysis.filters import StopFilter
from whoosh.analysis import (KeywordAnalyzer, StandardAnalyzer)
from whoosh.filedb.filestore import FileStorage, RamStorage
from whoosh.fields import Schema, KEYWORD, ID, TEXT
from whoosh.qparser import QueryParser
from whoosh.writing import AsyncWriter
KWAnalyzer = KeywordAnalyzer(lowercase=True) | StopFilter()
Analyzer = StandardAnalyzer()
ApiSchemaFields = OrderedDict((
('api_id', ID(unique=True, stored=True)),
('title', KEYWORD(analyzer=KWAnalyzer)),
('description', TEXT(analyzer=Analyzer)),
('geographies', KEYWORD(analyzer=KWAnalyzer)),
('concepts', KEYWORD(analyzer=KWAnalyzer)),
('keywords', KEYWORD(analyzer=KWAnalyzer)),
('tags', KEYWORD(analyzer=KWAnalyzer)),
('variables', KEYWORD(analyzer=KWAnalyzer)),
('vintage', ID),
))
VariableSchemaFields = OrderedDict((
('api_id', ID(stored=True)),
('variable', ID(stored=True)),
('group', ID(stored=True)),
('label', TEXT(analyzer=Analyzer)),
('concept', KEYWORD(analyzer=Analyzer)),
))
class Index(object):
"""Census API metadata indexer."""
def __init__(self, name, schema_fields, dflt_query_field, path=None):
"""Initialize Whoosh index specified fields.
Arguments:
* schema_fields: an OrderedDict of column names to whoosh
field types.
* path: if specified, the path in which to create a
persistent index. If not specified, index to RAM.
"""
self.schema_fields = schema_fields
# Initialize index
fs = FileStorage(path).create() if path else RamStorage()
if fs.index_exists():
self.index = fs.open_index(name)
schema = self.index.schema()
else:
schema = Schema(**self.schema_fields)
self.index = fs.create_index(schema, name)
self.qparser = QueryParser(dflt_query_field,
schema=schema)
self.writer = None
def __enter__(self):
self.writer = AsyncWriter(
self.index, writerargs=dict(limitmb=1000))
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
self.writer.cancel()
else:
self.writer.commit()
def add(self, iterator, **kwargs):
"""Add entries to the index
Arguments:
* iterator: iterator over tuples of field metadata, viz.
api_id, title, description, variables, geographies, concepts,
keywords, tags, and vintage.
"""
for vals in iterator:
self.writer.add_document(
**dict(zip(self.schema_fields, vals)))
def query(self, querystring):
"""Find API IDs matching querystring"""
query = self.qparser.parse(querystring)
with self.index.searcher() as searcher:
results = searcher.search(query, limit=None)
ret = []
for hit in results:
val = dict(hit.items())
val['score'] = hit.score
ret.append(val)
return ret
| apache-2.0 | 4,803,558,129,442,744,000 | 33.924731 | 73 | 0.615148 | false |
arpitprogressive/arpittest | apps/admin/migrations/0012_auto__add_field_subsector_career_guide.py | 1 | 13328 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SubSector.career_guide'
db.add_column(u'admin_subsector', 'career_guide',
self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SubSector.career_guide'
db.delete_column(u'admin_subsector', 'career_guide')
models = {
'admin.company': {
'Meta': {'object_name': 'Company'},
'company_type': ('django.db.models.fields.CharField', [], {'default': "'N/A'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'nasscom_membership_number': ('django.db.models.fields.CharField', [], {'default': "'N/A'", 'max_length': '20'}),
'training_provider': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '3'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'})
},
'admin.institution': {
'Meta': {'object_name': 'Institution'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'international': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'})
},
'admin.job': {
'Meta': {'object_name': 'Job'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['admin.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_internship': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'job_role': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['admin.QualificationPack']"}),
'job_title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
u'admin.logentry': {
'Meta': {'ordering': "(u'-action_time',)", 'object_name': 'LogEntry', 'db_table': "u'django_admin_log'"},
'action_flag': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'action_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'change_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_repr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'admin.occupation': {
'Meta': {'object_name': 'Occupation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'tracks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['admin.Track']", 'null': 'True', 'blank': 'True'})
},
'admin.occupationalstandard': {
'Meta': {'unique_together': "(('code', 'version'),)", 'object_name': 'OccupationalStandard'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'code': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '9', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': 'None'}),
'drafted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'knowledge': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'last_reviewed_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'next_review_on': ('django.db.models.fields.DateField', [], {}),
'performace_criteria': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'scope': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'skills': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'title': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'db_index': 'True'})
},
'admin.qualificationpack': {
'Meta': {'unique_together': "(('code', 'version'),)", 'object_name': 'QualificationPack'},
'alias': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '9', 'blank': 'True'}),
'drafted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'experience': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'job_role': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'last_reviewed_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'max_educational_qualification': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'min_educational_qualification': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'next_jobs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['admin.QualificationPack']", 'null': 'True', 'blank': 'True'}),
'next_review_on': ('django.db.models.fields.DateField', [], {}),
'nveqf_level': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'occupation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['admin.Occupation']"}),
'os_compulsory': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'os_compulsory'", 'blank': 'True', 'to': "orm['admin.OccupationalStandard']"}),
'os_optional': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'os_optional'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['admin.OccupationalStandard']"}),
'role_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tracks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['admin.Track']", 'null': 'True', 'blank': 'True'}),
'training': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'})
},
'admin.sector': {
'Meta': {'object_name': 'Sector', 'index_together': "[['name']]"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '9', 'db_index': 'True'})
},
'admin.subsector': {
'Meta': {'unique_together': "(('sector', 'name'),)", 'object_name': 'SubSector', 'index_together': "[['name', 'sector']]"},
'career_guide': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobility_map': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Sector']"})
},
'admin.track': {
'Meta': {'object_name': 'Track'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['admin'] | bsd-3-clause | 589,842,523,671,612,700 | 79.295181 | 218 | 0.548995 | false |
okolisny/integration_tests | cfme/tests/cloud_infra_common/test_snapshots_rest.py | 1 | 4663 | # -*- coding: utf-8 -*-
import pytest
import fauxfactory
from cfme import test_requirements
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.common.vm import VM
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils import error, testgen
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.rest import assert_response
from cfme.utils.version import current_version
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.uncollectif(
lambda: current_version() < '5.8'),
pytest.mark.long_running,
pytest.mark.tier(2),
test_requirements.snapshot
]
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc,
[VMwareProvider, OpenStackProvider])
testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope='module')
@pytest.yield_fixture(scope='module')
def vm_obj(provider, setup_provider_modscope, small_template_modscope):
"""Creates new VM or instance"""
vm_name = random_vm_name('snpsht')
new_vm = VM.factory(vm_name, provider, template_name=small_template_modscope.name)
if not provider.mgmt.does_vm_exist(vm_name):
new_vm.create_on_provider(find_in_cfme=True, allow_skip='default')
yield new_vm
try:
provider.mgmt.delete_vm(new_vm.name)
except Exception:
logger.warning("Failed to delete vm `{}`.".format(new_vm.name))
@pytest.fixture(scope='module')
def collection(appliance, provider):
"""Returns "vms" or "instances" collection based on provider type"""
if provider.one_of(InfraProvider):
return appliance.rest_api.collections.vms
return appliance.rest_api.collections.instances
@pytest.yield_fixture(scope='function')
def vm_snapshot(appliance, collection, vm_obj):
"""Creates VM/instance snapshot using REST API
Returns:
Tuple with VM and snapshot resources in REST API
"""
uid = fauxfactory.gen_alphanumeric(8)
snap_name = 'snpshot_{}'.format(uid)
vm = collection.get(name=vm_obj.name)
vm.snapshots.action.create(
name=snap_name,
description='snapshot {}'.format(uid),
memory=False)
assert_response(appliance)
snap, __ = wait_for(
lambda: vm.snapshots.find_by(name=snap_name) or False,
num_sec=600, delay=5)
snap = snap[0]
yield vm, snap
collection.reload()
to_delete = vm.snapshots.find_by(name=snap_name)
if to_delete:
vm.snapshots.action.delete(to_delete[0])
class TestRESTSnapshots(object):
"""Tests actions with VM/instance snapshots using REST API"""
def test_create_snapshot(self, vm_snapshot):
"""Creates VM/instance snapshot using REST API
Metadata:
test_flag: rest
"""
vm, snapshot = vm_snapshot
vm.snapshots.get(name=snapshot.name)
@pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE'])
def test_delete_snapshot_from_detail(self, appliance, vm_snapshot, method):
"""Deletes VM/instance snapshot from detail using REST API
Metadata:
test_flag: rest
"""
vm, snapshot = vm_snapshot
if method == 'post':
del_action = snapshot.action.delete.POST
else:
del_action = snapshot.action.delete.DELETE
del_action()
assert_response(appliance)
snapshot.wait_not_exists(num_sec=300, delay=5)
with error.expected('ActiveRecord::RecordNotFound'):
del_action()
assert_response(appliance, http_status=404)
def test_delete_snapshot_from_collection(self, appliance, vm_snapshot):
"""Deletes VM/instance snapshot from collection using REST API
Metadata:
test_flag: rest
"""
vm, snapshot = vm_snapshot
vm.snapshots.action.delete.POST(snapshot)
assert_response(appliance)
snapshot.wait_not_exists(num_sec=300, delay=5)
with error.expected('ActiveRecord::RecordNotFound'):
vm.snapshots.action.delete.POST(snapshot)
assert_response(appliance, http_status=404)
@pytest.mark.uncollectif(lambda provider:
not provider.one_of(InfraProvider) or current_version() < '5.8')
def test_revert_snapshot(self, appliance, vm_snapshot):
"""Reverts VM/instance snapshot using REST API
Metadata:
test_flag: rest
"""
__, snapshot = vm_snapshot
snapshot.action.revert()
assert_response(appliance)
| gpl-2.0 | -3,708,382,020,540,596,700 | 30.506757 | 86 | 0.668883 | false |
geerlingguy/ansible-modules-extras | monitoring/bigpanda.py | 20 | 5211 | #!/usr/bin/python
DOCUMENTATION = '''
---
module: bigpanda
author: "Hagai Kariti (@hkariti)"
short_description: Notify BigPanda about deployments
version_added: "1.8"
description:
- Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
options:
component:
description:
- "The name of the component being deployed. Ex: billing"
required: true
alias: name
version:
description:
- The deployment version.
required: true
token:
description:
- API token.
required: true
state:
description:
- State of the deployment.
required: true
choices: ['started', 'finished', 'failed']
hosts:
description:
- Name of affected host name. Can be a list.
required: false
default: machine's hostname
alias: host
env:
description:
- The environment name, typically 'production', 'staging', etc.
required: false
owner:
description:
- The person responsible for the deployment.
required: false
description:
description:
- Free text description of the deployment.
required: false
url:
description:
- Base URL of the API server.
required: False
default: https://api.bigpanda.io
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
# informational: requirements for nodes
requirements: [ urllib, urllib2 ]
'''
EXAMPLES = '''
- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started
...
- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=finished
or using a deployment object:
- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started
register: deployment
- bigpanda: state=finished
args: deployment
If outside servers aren't reachable from your machine, use local_action and pass the hostname:
- local_action: bigpanda component=myapp version=1.3 hosts={{ansible_hostname}} token={{ bigpanda_token }} state=started
register: deployment
...
- local_action: bigpanda state=finished
args: deployment
'''
# ===========================================
# Module execution.
#
import socket
def main():
module = AnsibleModule(
argument_spec=dict(
component=dict(required=True, aliases=['name']),
version=dict(required=True),
token=dict(required=True),
state=dict(required=True, choices=['started', 'finished', 'failed']),
hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']),
env=dict(required=False),
owner=dict(required=False),
description=dict(required=False),
message=dict(required=False),
source_system=dict(required=False, default='ansible'),
validate_certs=dict(default='yes', type='bool'),
url=dict(required=False, default='https://api.bigpanda.io'),
),
supports_check_mode=True,
check_invalid_arguments=False,
)
token = module.params['token']
state = module.params['state']
url = module.params['url']
# Build the common request body
body = dict()
for k in ('component', 'version', 'hosts'):
v = module.params[k]
if v is not None:
body[k] = v
if not isinstance(body['hosts'], list):
body['hosts'] = [body['hosts']]
# Insert state-specific attributes to body
if state == 'started':
for k in ('source_system', 'env', 'owner', 'description'):
v = module.params[k]
if v is not None:
body[k] = v
request_url = url + '/data/events/deployments/start'
else:
message = module.params['message']
if message is not None:
body['errorMessage'] = message
if state == 'finished':
body['status'] = 'success'
else:
body['status'] = 'failure'
request_url = url + '/data/events/deployments/end'
# Build the deployment object we return
deployment = dict(token=token, url=url)
deployment.update(body)
if 'errorMessage' in deployment:
message = deployment.pop('errorMessage')
deployment['message'] = message
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True, **deployment)
# Send the data to bigpanda
data = json.dumps(body)
headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'}
try:
response, info = fetch_url(module, request_url, data=data, headers=headers)
if info['status'] == 200:
module.exit_json(changed=True, **deployment)
else:
module.fail_json(msg=json.dumps(info))
except Exception, e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 | -4,162,750,816,331,604,500 | 29.296512 | 157 | 0.634043 | false |
xuvw/viewfinder | backend/prod/babysitter.py | 13 | 1670 | #!/usr/bin/env python
#
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Babysitter server starts instances of servers defined in a deployment
template.
Each server instance is started, monitored, and restarted as
necessary. Log files for each server are archived to S3 as
appropriate, custom cloud watch metrics are reported, and AWS SNS is
used to notify of any unrecoverable failures.
Start(): Launch the babysitter application (called from main)
"""
__author__ = '[email protected] (Spencer Kimball)'
import sys
from tornado import ioloop, options, template
from viewfinder.backend.base import admin_server, basic_auth, handler
options.define("babysitter_port", default=1025,
help="Port for babysitter status")
class _MainHandler(basic_auth.BasicAuthHandler):
"""Displays the servers being babysat, with status information."""
_TEMPLATE = template.Template("""
<html>
<title>Babysitter Status</title>
<body>Admin: {{ user }}</body>
</html>
""")
@handler.authenticated()
def get(self):
self.write(_MainHandler._TEMPLATE.generate(
user=self.get_current_user()))
def Start(servers=None):
"""Starts the babysitter tornado web server with SSL.
:arg servers: server deployment specification.
"""
print "in babysitter"
options.parse_command_line()
babysitter = admin_server.AdminServer(
handlers=[(r"/", _MainHandler), ],
port=options.options.babysitter_port)
print "connect to babysitter via https://{0}:{1}/".format(
'localhost', options.options.babysitter_port)
ioloop.IOLoop.instance().start()
def main():
Start()
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | 1,311,488,130,185,594,600 | 23.925373 | 72 | 0.714371 | false |
akarki15/mozillians | vendor-local/lib/python/tablib/packages/openpyxl3/writer/workbook.py | 116 | 10769 | # file openpyxl/writer/workbook.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write the workbook global settings to the archive."""
# package imports
from ..shared.xmltools import Element, SubElement
from ..cell import absolute_coordinate
from ..shared.xmltools import get_document_content
from ..shared.ooxml import NAMESPACES, ARC_CORE, ARC_WORKBOOK, \
ARC_APP, ARC_THEME, ARC_STYLE, ARC_SHARED_STRINGS
from ..shared.date_time import datetime_to_W3CDTF
def write_properties_core(properties):
"""Write the core properties to xml."""
root = Element('cp:coreProperties', {'xmlns:cp': NAMESPACES['cp'],
'xmlns:xsi': NAMESPACES['xsi'], 'xmlns:dc': NAMESPACES['dc'],
'xmlns:dcterms': NAMESPACES['dcterms'],
'xmlns:dcmitype': NAMESPACES['dcmitype'], })
SubElement(root, 'dc:creator').text = properties.creator
SubElement(root, 'cp:lastModifiedBy').text = properties.last_modified_by
SubElement(root, 'dcterms:created', \
{'xsi:type': 'dcterms:W3CDTF'}).text = \
datetime_to_W3CDTF(properties.created)
SubElement(root, 'dcterms:modified',
{'xsi:type': 'dcterms:W3CDTF'}).text = \
datetime_to_W3CDTF(properties.modified)
return get_document_content(root)
def write_content_types(workbook):
"""Write the content-types xml."""
root = Element('Types', {'xmlns': 'http://schemas.openxmlformats.org/package/2006/content-types'})
SubElement(root, 'Override', {'PartName': '/' + ARC_THEME, 'ContentType': 'application/vnd.openxmlformats-officedocument.theme+xml'})
SubElement(root, 'Override', {'PartName': '/' + ARC_STYLE, 'ContentType': 'application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml'})
SubElement(root, 'Default', {'Extension': 'rels', 'ContentType': 'application/vnd.openxmlformats-package.relationships+xml'})
SubElement(root, 'Default', {'Extension': 'xml', 'ContentType': 'application/xml'})
SubElement(root, 'Override', {'PartName': '/' + ARC_WORKBOOK, 'ContentType': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml'})
SubElement(root, 'Override', {'PartName': '/' + ARC_APP, 'ContentType': 'application/vnd.openxmlformats-officedocument.extended-properties+xml'})
SubElement(root, 'Override', {'PartName': '/' + ARC_CORE, 'ContentType': 'application/vnd.openxmlformats-package.core-properties+xml'})
SubElement(root, 'Override', {'PartName': '/' + ARC_SHARED_STRINGS, 'ContentType': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml'})
drawing_id = 1
chart_id = 1
for sheet_id, sheet in enumerate(workbook.worksheets):
SubElement(root, 'Override',
{'PartName': '/xl/worksheets/sheet%d.xml' % (sheet_id + 1),
'ContentType': 'application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml'})
if sheet._charts:
SubElement(root, 'Override',
{'PartName' : '/xl/drawings/drawing%d.xml' % (sheet_id + 1),
'ContentType' : 'application/vnd.openxmlformats-officedocument.drawing+xml'})
drawing_id += 1
for chart in sheet._charts:
SubElement(root, 'Override',
{'PartName' : '/xl/charts/chart%d.xml' % chart_id,
'ContentType' : 'application/vnd.openxmlformats-officedocument.drawingml.chart+xml'})
chart_id += 1
if chart._shapes:
SubElement(root, 'Override',
{'PartName' : '/xl/drawings/drawing%d.xml' % drawing_id,
'ContentType' : 'application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml'})
drawing_id += 1
return get_document_content(root)
def write_properties_app(workbook):
"""Write the properties xml."""
worksheets_count = len(workbook.worksheets)
root = Element('Properties', {'xmlns': 'http://schemas.openxmlformats.org/officeDocument/2006/extended-properties',
'xmlns:vt': 'http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes'})
SubElement(root, 'Application').text = 'Microsoft Excel'
SubElement(root, 'DocSecurity').text = '0'
SubElement(root, 'ScaleCrop').text = 'false'
SubElement(root, 'Company')
SubElement(root, 'LinksUpToDate').text = 'false'
SubElement(root, 'SharedDoc').text = 'false'
SubElement(root, 'HyperlinksChanged').text = 'false'
SubElement(root, 'AppVersion').text = '12.0000'
# heading pairs part
heading_pairs = SubElement(root, 'HeadingPairs')
vector = SubElement(heading_pairs, 'vt:vector',
{'size': '2', 'baseType': 'variant'})
variant = SubElement(vector, 'vt:variant')
SubElement(variant, 'vt:lpstr').text = 'Worksheets'
variant = SubElement(vector, 'vt:variant')
SubElement(variant, 'vt:i4').text = '%d' % worksheets_count
# title of parts
title_of_parts = SubElement(root, 'TitlesOfParts')
vector = SubElement(title_of_parts, 'vt:vector',
{'size': '%d' % worksheets_count, 'baseType': 'lpstr'})
for ws in workbook.worksheets:
SubElement(vector, 'vt:lpstr').text = '%s' % ws.title
return get_document_content(root)
def write_root_rels(workbook):
"""Write the relationships xml."""
root = Element('Relationships', {'xmlns':
'http://schemas.openxmlformats.org/package/2006/relationships'})
SubElement(root, 'Relationship', {'Id': 'rId1', 'Target': ARC_WORKBOOK,
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/officeDocument'})
SubElement(root, 'Relationship', {'Id': 'rId2', 'Target': ARC_CORE,
'Type': 'http://schemas.openxmlformats.org/package/2006/relationships/metadata/core-properties'})
SubElement(root, 'Relationship', {'Id': 'rId3', 'Target': ARC_APP,
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/extended-properties'})
return get_document_content(root)
def write_workbook(workbook):
"""Write the core workbook xml."""
root = Element('workbook', {'xmlns': 'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'xml:space': 'preserve', 'xmlns:r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'})
SubElement(root, 'fileVersion', {'appName': 'xl', 'lastEdited': '4',
'lowestEdited': '4', 'rupBuild': '4505'})
SubElement(root, 'workbookPr', {'defaultThemeVersion': '124226',
'codeName': 'ThisWorkbook'})
book_views = SubElement(root, 'bookViews')
SubElement(book_views, 'workbookView', {'activeTab': '%d' % workbook.get_index(workbook.get_active_sheet()),
'autoFilterDateGrouping': '1', 'firstSheet': '0', 'minimized': '0',
'showHorizontalScroll': '1', 'showSheetTabs': '1',
'showVerticalScroll': '1', 'tabRatio': '600',
'visibility': 'visible'})
# worksheets
sheets = SubElement(root, 'sheets')
for i, sheet in enumerate(workbook.worksheets):
sheet_node = SubElement(sheets, 'sheet', {'name': sheet.title,
'sheetId': '%d' % (i + 1), 'r:id': 'rId%d' % (i + 1)})
if not sheet.sheet_state == sheet.SHEETSTATE_VISIBLE:
sheet_node.set('state', sheet.sheet_state)
# named ranges
defined_names = SubElement(root, 'definedNames')
for named_range in workbook.get_named_ranges():
name = SubElement(defined_names, 'definedName',
{'name': named_range.name})
# as there can be many cells in one range, generate the list of ranges
dest_cells = []
cell_ids = []
for worksheet, range_name in named_range.destinations:
cell_ids.append(workbook.get_index(worksheet))
dest_cells.append("'%s'!%s" % (worksheet.title.replace("'", "''"),
absolute_coordinate(range_name)))
# for local ranges, we must check all the cells belong to the same sheet
base_id = cell_ids[0]
if named_range.local_only and all([x == base_id for x in cell_ids]):
name.set('localSheetId', '%s' % base_id)
# finally write the cells list
name.text = ','.join(dest_cells)
SubElement(root, 'calcPr', {'calcId': '124519', 'calcMode': 'auto',
'fullCalcOnLoad': '1'})
return get_document_content(root)
def write_workbook_rels(workbook):
"""Write the workbook relationships xml."""
root = Element('Relationships', {'xmlns':
'http://schemas.openxmlformats.org/package/2006/relationships'})
for i in range(len(workbook.worksheets)):
SubElement(root, 'Relationship', {'Id': 'rId%d' % (i + 1),
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet',
'Target': 'worksheets/sheet%s.xml' % (i + 1)})
rid = len(workbook.worksheets) + 1
SubElement(root, 'Relationship',
{'Id': 'rId%d' % rid, 'Target': 'sharedStrings.xml',
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/sharedStrings'})
SubElement(root, 'Relationship',
{'Id': 'rId%d' % (rid + 1), 'Target': 'styles.xml',
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles'})
SubElement(root, 'Relationship',
{'Id': 'rId%d' % (rid + 2), 'Target': 'theme/theme1.xml',
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme'})
return get_document_content(root)
| bsd-3-clause | -4,148,648,247,367,899,000 | 51.789216 | 168 | 0.653357 | false |
chrismeyersfsu/ansible-modules-core | network/nxos/nxos_vxlan_vtep.py | 27 | 17268 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_vxlan_vtep
version_added: "2.2"
short_description: Manages VXLAN Network Virtualization Endpoint (NVE).
description:
- Manages VXLAN Network Virtualization Endpoint (NVE) overlay interface
that terminates VXLAN tunnels.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- The module is used to manage NVE properties, not to create NVE
interfaces. Use M(nxos_interface) if you wish to do so.
- C(state=absent) removes the interface.
- Default, where supported, restores params default value.
options:
interface:
description:
- Interface name for the VXLAN Network Virtualization Endpoint.
required: true
description:
description:
- Description of the NVE interface.
required: false
default: null
host_reachability:
description:
- Specify mechanism for host reachability advertisement.
required: false
choices: ['true', 'false']
default: null
shutdown:
description:
- Administratively shutdown the NVE interface.
required: false
choices: ['true','false']
default: false
source_interface:
description:
- Specify the loopback interface whose IP address should be
used for the NVE interface.
required: false
default: null
source_interface_hold_down_time:
description:
- Suppresses advertisement of the NVE loopback address until
the overlay has converged.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vxlan_vtep:
interface: nve1
description: default
host_reachability: default
source_interface: Loopback0
source_interface_hold_down_time: 30
shutdown: default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"description": "simple description", "host_reachability": true,
"interface": "nve1", "shutdown": true, "source_interface": "loopback0",
"source_interface_hold_down_time": "30"}
existing:
description: k/v pairs of existing VXLAN VTEP configuration
returned: verbose mode
type: dict
sample: {}
end_state:
description: k/v pairs of VXLAN VTEP configuration after module execution
returned: verbose mode
type: dict
sample: {"description": "simple description", "host_reachability": true,
"interface": "nve1", "shutdown": true, "source_interface": "loopback0",
"source_interface_hold_down_time": "30"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface nve1", "source-interface loopback0",
"source-interface hold-down-time 30", "description simple description",
"shutdown", "host-reachability protocol bgp"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
BOOL_PARAMS = [
'shutdown',
'host_reachability'
]
PARAM_TO_COMMAND_KEYMAP = {
'description': 'description',
'host_reachability': 'host-reachability protocol bgp',
'interface': 'interface',
'shutdown': 'shutdown',
'source_interface': 'source-interface',
'source_interface_hold_down_time': 'source-interface hold-down-time'
}
PARAM_TO_DEFAULT_KEYMAP = {
'description': False,
'shutdown': True,
}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
NO_SHUT_REGEX = re.compile(r'\s+no shutdown\s*$', re.M)
value = False
if arg == 'shutdown':
try:
if NO_SHUT_REGEX.search(config):
value = False
elif REGEX.search(config):
value = True
except TypeError:
value = False
else:
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
NO_DESC_REGEX = re.compile(r'\s+{0}\s*$'.format('no description'), re.M)
SOURCE_INTF_REGEX = re.compile(r'(?:{0}\s)(?P<value>\S+)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if arg == 'description':
if NO_DESC_REGEX.search(config):
value = ''
elif PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value').strip()
elif arg == 'source_interface':
for line in config.splitlines():
try:
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = SOURCE_INTF_REGEX.search(config).group('value').strip()
break
except AttributeError:
value = ''
else:
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value').strip()
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
interface_string = 'interface {0}'.format(module.params['interface'].lower())
parents = [interface_string]
config = netcfg.get_section(parents)
if config:
for arg in args:
existing[arg] = get_value(arg, config, module)
existing['interface'] = module.params['interface'].lower()
else:
if interface_string in str(netcfg):
existing['interface'] = module.params['interface'].lower()
for arg in args:
existing[arg] = ''
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def fix_commands(commands, module):
source_interface_command = ''
no_source_interface_command = ''
for command in commands:
if 'no source-interface hold-down-time' in command:
pass
elif 'source-interface hold-down-time' in command:
pass
elif 'no source-interface' in command:
no_source_interface_command = command
elif 'source-interface' in command:
source_interface_command = command
if source_interface_command:
commands.pop(commands.index(source_interface_command))
commands.insert(0, source_interface_command)
if no_source_interface_command:
commands.pop(commands.index(no_source_interface_command))
commands.append(no_source_interface_command)
return commands
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.iteritems():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
commands.append('no {0}'.format(key.lower()))
module.exit_json(commands=commands)
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
commands = fix_commands(commands, module)
parents = ['interface {0}'.format(module.params['interface'].lower())]
candidate.add(commands, parents=parents)
else:
if not existing and module.params['interface']:
commands = ['interface {0}'.format(module.params['interface'].lower())]
candidate.add(commands, parents=[])
def state_absent(module, existing, proposed, candidate):
commands = ['no interface {0}'.format(module.params['interface'].lower())]
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
description=dict(required=False, type='str'),
host_reachability=dict(required=False, type='bool'),
shutdown=dict(required=False, type='bool'),
source_interface=dict(required=False, type='str'),
source_interface_hold_down_time=dict(required=False, type='str'),
m_facts=dict(required=False, default=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
interface = module.params['interface'].lower()
args = [
'interface',
'description',
'host_reachability',
'shutdown',
'source_interface',
'source_interface_hold_down_time'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.iteritems():
if key != 'interface':
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
elif str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
if key in BOOL_PARAMS:
value = False
else:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
if not existing:
WARNINGS.append("The proposed NVE interface did not exist. "
"It's recommended to use nxos_interface to create "
"all logical interfaces.")
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,734,985,602,124,862,000 | 31.828897 | 110 | 0.577948 | false |
robotlinker/robotlinker_core | src/rosbridge_suite/rosbridge_server/src/tornado/curl_httpclient.py | 20 | 19862 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Non-blocking HTTP client implementation using pycurl."""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import logging
import pycurl
import threading
import time
from tornado import httputil
from tornado import ioloop
from tornado.log import gen_log
from tornado import stack_context
from tornado.escape import utf8, native_str
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main
from tornado.util import bytes_type
try:
from io import BytesIO # py3
except ImportError:
from cStringIO import StringIO as BytesIO # py2
class CurlAsyncHTTPClient(AsyncHTTPClient):
def initialize(self, io_loop, max_clients=10, defaults=None):
super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
self._curls = [_curl_create() for i in range(max_clients)]
self._free_list = self._curls[:]
self._requests = collections.deque()
self._fds = {}
self._timeout = None
# libcurl has bugs that sometimes cause it to not report all
# relevant file descriptors and timeouts to TIMERFUNCTION/
# SOCKETFUNCTION. Mitigate the effects of such bugs by
# forcing a periodic scan of all active requests.
self._force_timeout_callback = ioloop.PeriodicCallback(
self._handle_force_timeout, 1000, io_loop=io_loop)
self._force_timeout_callback.start()
# Work around a bug in libcurl 7.29.0: Some fields in the curl
# multi object are initialized lazily, and its destructor will
# segfault if it is destroyed without having been used. Add
# and remove a dummy handle to make sure everything is
# initialized.
dummy_curl_handle = pycurl.Curl()
self._multi.add_handle(dummy_curl_handle)
self._multi.remove_handle(dummy_curl_handle)
def close(self):
self._force_timeout_callback.stop()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
for curl in self._curls:
curl.close()
self._multi.close()
super(CurlAsyncHTTPClient, self).close()
def fetch_impl(self, request, callback):
self._requests.append((request, callback))
self._process_queue()
self._set_timeout(0)
def _handle_socket(self, event, fd, multi, data):
"""Called by libcurl when it wants to change the file descriptors
it cares about.
"""
event_map = {
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
pycurl.POLL_IN: ioloop.IOLoop.READ,
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
}
if event == pycurl.POLL_REMOVE:
if fd in self._fds:
self.io_loop.remove_handler(fd)
del self._fds[fd]
else:
ioloop_event = event_map[event]
# libcurl sometimes closes a socket and then opens a new
# one using the same FD without giving us a POLL_NONE in
# between. This is a problem with the epoll IOLoop,
# because the kernel can tell when a socket is closed and
# removes it from the epoll automatically, causing future
# update_handler calls to fail. Since we can't tell when
# this has happened, always use remove and re-add
# instead of update.
if fd in self._fds:
self.io_loop.remove_handler(fd)
self.io_loop.add_handler(fd, self._handle_events,
ioloop_event)
self._fds[fd] = ioloop_event
def _set_timeout(self, msecs):
"""Called by libcurl to schedule a timeout."""
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = self.io_loop.add_timeout(
self.io_loop.time() + msecs / 1000.0, self._handle_timeout)
def _handle_events(self, fd, events):
"""Called by IOLoop when there is activity on one of our
file descriptors.
"""
action = 0
if events & ioloop.IOLoop.READ:
action |= pycurl.CSELECT_IN
if events & ioloop.IOLoop.WRITE:
action |= pycurl.CSELECT_OUT
while True:
try:
ret, num_handles = self._multi.socket_action(fd, action)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _handle_timeout(self):
"""Called by IOLoop when the requested timeout has passed."""
with stack_context.NullContext():
self._timeout = None
while True:
try:
ret, num_handles = self._multi.socket_action(
pycurl.SOCKET_TIMEOUT, 0)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout >= 0:
self._set_timeout(new_timeout)
def _handle_force_timeout(self):
"""Called by IOLoop periodically to ask libcurl to process any
events it may have forgotten about.
"""
with stack_context.NullContext():
while True:
try:
ret, num_handles = self._multi.socket_all()
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _finish_pending_requests(self):
"""Process any requests that were completed by the last
call to multi.socket_action.
"""
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
if num_q == 0:
break
self._process_queue()
def _process_queue(self):
with stack_context.NullContext():
while True:
started = 0
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
(request, callback) = self._requests.popleft()
curl.info = {
"headers": httputil.HTTPHeaders(),
"buffer": BytesIO(),
"request": request,
"callback": callback,
"curl_start_time": time.time(),
}
_curl_setup_request(curl, request, curl.info["buffer"],
curl.info["headers"])
self._multi.add_handle(curl)
if not started:
break
def _finish(self, curl, curl_error=None, curl_message=None):
info = curl.info
curl.info = None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info["buffer"]
if curl_error:
error = CurlError(curl_error, curl_message)
code = error.code
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(pycurl.HTTP_CODE)
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
# the various curl timings are documented at
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
time_info = dict(
queue=info["curl_start_time"] - info["request"].start_time,
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME),
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
total=curl.getinfo(pycurl.TOTAL_TIME),
redirect=curl.getinfo(pycurl.REDIRECT_TIME),
)
try:
info["callback"](HTTPResponse(
request=info["request"], code=code, headers=info["headers"],
buffer=buffer, effective_url=effective_url, error=error,
reason=info['headers'].get("X-Http-Reason", None),
request_time=time.time() - info["curl_start_time"],
time_info=time_info))
except Exception:
self.handle_callback_exception(info["callback"])
def handle_callback_exception(self, callback):
self.io_loop.handle_callback_exception(callback)
class CurlError(HTTPError):
def __init__(self, errno, message):
HTTPError.__init__(self, 599, message)
self.errno = errno
def _curl_create():
curl = pycurl.Curl()
if gen_log.isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug)
return curl
def _curl_setup_request(curl, request, buffer, headers):
curl.setopt(pycurl.URL, native_str(request.url))
# libcurl's magic "Expect: 100-continue" behavior causes delays
# with servers that don't support it (which include, among others,
# Google's OpenID endpoint). Additionally, this behavior has
# a bug in conjunction with the curl_multi_socket_action API
# (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
# which increases the delays. It's more trouble than it's worth,
# so just turn off the feature (yes, setting Expect: to an empty
# value is the official way to disable this)
if "Expect" not in request.headers:
request.headers["Expect"] = ""
# libcurl adds Pragma: no-cache by default; disable that too
if "Pragma" not in request.headers:
request.headers["Pragma"] = ""
# Request headers may be either a regular dict or HTTPHeaders object
if isinstance(request.headers, httputil.HTTPHeaders):
curl.setopt(pycurl.HTTPHEADER,
[native_str("%s: %s" % i) for i in request.headers.get_all()])
else:
curl.setopt(pycurl.HTTPHEADER,
[native_str("%s: %s" % i) for i in request.headers.items()])
if request.header_callback:
curl.setopt(pycurl.HEADERFUNCTION,
lambda line: request.header_callback(native_str(line)))
else:
curl.setopt(pycurl.HEADERFUNCTION,
lambda line: _curl_header_callback(headers,
native_str(line)))
if request.streaming_callback:
write_function = request.streaming_callback
else:
write_function = buffer.write
if bytes_type is str: # py2
curl.setopt(pycurl.WRITEFUNCTION, write_function)
else: # py3
# Upstream pycurl doesn't support py3, but ubuntu 12.10 includes
# a fork/port. That version has a bug in which it passes unicode
# strings instead of bytes to the WRITEFUNCTION. This means that
# if you use a WRITEFUNCTION (which tornado always does), you cannot
# download arbitrary binary data. This needs to be fixed in the
# ported pycurl package, but in the meantime this lambda will
# make it work for downloading (utf8) text.
curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s)))
curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
if request.user_agent:
curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
else:
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
if request.network_interface:
curl.setopt(pycurl.INTERFACE, request.network_interface)
if request.decompress_response:
curl.setopt(pycurl.ENCODING, "gzip,deflate")
else:
curl.setopt(pycurl.ENCODING, "none")
if request.proxy_host and request.proxy_port:
curl.setopt(pycurl.PROXY, request.proxy_host)
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
credentials = '%s:%s' % (request.proxy_username,
request.proxy_password)
curl.setopt(pycurl.PROXYUSERPWD, credentials)
else:
curl.setopt(pycurl.PROXY, '')
curl.unsetopt(pycurl.PROXYUSERPWD)
if request.validate_cert:
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 2)
else:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
if request.ca_certs is not None:
curl.setopt(pycurl.CAINFO, request.ca_certs)
else:
# There is no way to restore pycurl.CAINFO to its default value
# (Using unsetopt makes it reject all certificates).
# I don't see any way to read the default value from python so it
# can be restored later. We'll have to just leave CAINFO untouched
# if no ca_certs file was specified, and require that if any
# request uses a custom ca_certs file, they all must.
pass
if request.allow_ipv6 is False:
# Curl behaves reasonably when DNS resolution gives an ipv6 address
# that we can't reach, so allow ipv6 unless the user asks to disable.
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
else:
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
# Set the request method through curl's irritating interface which makes
# up names for almost every single method
curl_options = {
"GET": pycurl.HTTPGET,
"POST": pycurl.POST,
"PUT": pycurl.UPLOAD,
"HEAD": pycurl.NOBODY,
}
custom_methods = set(["DELETE", "OPTIONS", "PATCH"])
for o in curl_options.values():
curl.setopt(o, False)
if request.method in curl_options:
curl.unsetopt(pycurl.CUSTOMREQUEST)
curl.setopt(curl_options[request.method], True)
elif request.allow_nonstandard_methods or request.method in custom_methods:
curl.setopt(pycurl.CUSTOMREQUEST, request.method)
else:
raise KeyError('unknown method ' + request.method)
# Handle curl's cryptic options for every individual HTTP method
if request.method in ("POST", "PUT"):
if request.body is None:
raise AssertionError(
'Body must not be empty for "%s" request'
% request.method)
request_buffer = BytesIO(utf8(request.body))
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
if request.method == "POST":
def ioctl(cmd):
if cmd == curl.IOCMD_RESTARTREAD:
request_buffer.seek(0)
curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body))
else:
curl.setopt(pycurl.INFILESIZE, len(request.body))
elif request.method == "GET":
if request.body is not None:
raise AssertionError('Body must be empty for GET request')
if request.auth_username is not None:
userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
if request.auth_mode is None or request.auth_mode == "basic":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
elif request.auth_mode == "digest":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
else:
raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
curl.setopt(pycurl.USERPWD, native_str(userpwd))
gen_log.debug("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
curl.unsetopt(pycurl.USERPWD)
gen_log.debug("%s %s", request.method, request.url)
if request.client_cert is not None:
curl.setopt(pycurl.SSLCERT, request.client_cert)
if request.client_key is not None:
curl.setopt(pycurl.SSLKEY, request.client_key)
if threading.activeCount() > 1:
# libcurl/pycurl is not thread-safe by default. When multiple threads
# are used, signals should be disabled. This has the side effect
# of disabling DNS timeouts in some environments (when libcurl is
# not linked against ares), so we don't do it when there is only one
# thread. Applications that use many short-lived threads may need
# to set NOSIGNAL manually in a prepare_curl_callback since
# there may not be any other threads running at the time we call
# threading.activeCount.
curl.setopt(pycurl.NOSIGNAL, 1)
if request.prepare_curl_callback is not None:
request.prepare_curl_callback(curl)
def _curl_header_callback(headers, header_line):
# header_line as returned by curl includes the end-of-line characters.
header_line = header_line.strip()
if header_line.startswith("HTTP/"):
headers.clear()
try:
(__, __, reason) = httputil.parse_response_start_line(header_line)
header_line = "X-Http-Reason: %s" % reason
except httputil.HTTPInputError:
return
if not header_line:
return
headers.parse_line(header_line)
def _curl_debug(debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
gen_log.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
for line in debug_msg.splitlines():
gen_log.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:
gen_log.debug('%s %r', debug_types[debug_type], debug_msg)
if __name__ == "__main__":
AsyncHTTPClient.configure(CurlAsyncHTTPClient)
main()
| apache-2.0 | -1,064,162,552,540,069,000 | 40.639413 | 90 | 0.615296 | false |
jankeromnes/depot_tools | third_party/gsutil/oauth2_plugin/__init__.py | 51 | 1121 | # Copyright 2011 Google Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Package marker file."""
| bsd-3-clause | 5,472,198,004,391,916,000 | 49.954545 | 74 | 0.769848 | false |
mcalmer/spacewalk | backend/common/rhn_pkg.py | 10 | 4128 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
from rhn.i18n import bstr
from spacewalk.common import checksum
def get_package_header(filename=None, file_obj=None, fd=None):
# pylint: disable=E1103
if filename is not None:
stream = open(filename, mode='rb')
need_close = True
elif file_obj is not None:
stream = file_obj
else:
stream = os.fdopen(os.dup(fd), "r")
need_close = True
if stream.name.endswith('.deb'):
packaging = 'deb'
elif stream.name.endswith('.rpm'):
packaging = 'rpm'
else:
packaging = 'mpm'
a_pkg = package_from_stream(stream, packaging)
a_pkg.read_header()
if need_close:
stream.close()
return a_pkg.header
def package_from_stream(stream, packaging):
if packaging == 'deb':
from spacewalk.common import rhn_deb
a_pkg = rhn_deb.DEB_Package(stream)
elif packaging == 'rpm':
from spacewalk.common import rhn_rpm
a_pkg = rhn_rpm.RPM_Package(stream)
elif packaging == 'mpm':
from spacewalk.common import rhn_mpm
a_pkg = rhn_mpm.MPM_Package(stream)
else:
a_pkg = None
return a_pkg
def package_from_filename(filename):
if filename.endswith('.deb'):
packaging = 'deb'
elif filename.endswith('.rpm') or filename.endswith('.hdr'):
packaging = 'rpm'
else:
packaging = 'mpm'
stream = open(filename, mode='rb')
return package_from_stream(stream, packaging)
BUFFER_SIZE = 16384
DEFAULT_CHECKSUM_TYPE = 'md5'
class A_Package:
"""virtual class that implements shared methods for RPM/MPM/DEB package object"""
# pylint: disable=R0902
def __init__(self, input_stream=None):
self.header = None
self.header_start = 0
self.header_end = 0
self.input_stream = input_stream
self.checksum_type = DEFAULT_CHECKSUM_TYPE
self.checksum = None
self.payload_stream = None
self.payload_size = None
def read_header(self):
"""reads header from self.input_file"""
pass
def save_payload(self, output_stream):
"""saves payload to output_stream"""
c_hash = checksum.getHashlibInstance(self.checksum_type, False)
if output_stream:
output_start = output_stream.tell()
self._stream_copy(self.input_stream, output_stream, c_hash)
self.checksum = c_hash.hexdigest()
if output_stream:
self.payload_stream = output_stream
self.payload_size = output_stream.tell() - output_start
def payload_checksum(self):
# just read and compute checksum
start = self.input_stream.tell()
self.save_payload(None)
self.payload_size = self.input_stream.tell() - start + self.header_end
self.payload_stream = self.input_stream
@staticmethod
def _stream_copy(source, dest, c_hash=None):
"""copies data from the source stream to the destination stream"""
while True:
buf = source.read(BUFFER_SIZE)
if not buf:
break
if dest:
dest.write(buf)
if c_hash:
c_hash.update(buf)
@staticmethod
def _read_bytes(stream, amt):
ret = bstr('')
while amt:
buf = stream.read(min(amt, BUFFER_SIZE))
if not buf:
return ret
ret = ret + buf
amt = amt - len(buf)
return ret
class InvalidPackageError(Exception):
pass
| gpl-2.0 | 2,157,320,165,874,124,800 | 29.352941 | 85 | 0.620155 | false |
SatoshiNXSimudrone/sl4a-damon-clone | python/src/Lib/distutils/tests/test_bdist_wininst.py | 47 | 1101 | """Tests for distutils.command.bdist_wininst."""
import unittest
import os
from distutils.dist import Distribution
from distutils.command.bdist_wininst import bdist_wininst
from distutils.tests import support
class BuildWinInstTestCase(support.TempdirManager,
unittest.TestCase):
def test_get_exe_bytes(self):
# issue5731: command was broken on non-windows platforms
# this test makes sure it works now for every platform
# let's create a command
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
dist = Distribution()
cmd = bdist_wininst(dist)
cmd.ensure_finalized()
# let's run the code that finds the right wininst*.exe file
# and make sure it finds it and returns its content
# no matter what platform we have
exe_file = cmd.get_exe_bytes()
self.assert_(len(exe_file) > 10)
def test_suite():
return unittest.makeSuite(BuildWinInstTestCase)
if __name__ == '__main__':
test_support.run_unittest(test_suite())
| apache-2.0 | -6,195,993,804,117,585,000 | 31.382353 | 67 | 0.656676 | false |
ah744/ScaffCC_RKQC | clang/tools/scan-view/Reporter.py | 65 | 8135 | """Methods for reporting bugs."""
import subprocess, sys, os
__all__ = ['ReportFailure', 'BugReport', 'getReporters']
#
class ReportFailure(Exception):
"""Generic exception for failures in bug reporting."""
def __init__(self, value):
self.value = value
# Collect information about a bug.
class BugReport:
def __init__(self, title, description, files):
self.title = title
self.description = description
self.files = files
# Reporter interfaces.
import os
import email, mimetypes, smtplib
from email import encoders
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
#===------------------------------------------------------------------------===#
# ReporterParameter
#===------------------------------------------------------------------------===#
class ReporterParameter:
def __init__(self, n):
self.name = n
def getName(self):
return self.name
def getValue(self,r,bugtype,getConfigOption):
return getConfigOption(r.getName(),self.getName())
def saveConfigValue(self):
return True
class TextParameter (ReporterParameter):
def getHTML(self,r,bugtype,getConfigOption):
return """\
<tr>
<td class="form_clabel">%s:</td>
<td class="form_value"><input type="text" name="%s_%s" value="%s"></td>
</tr>"""%(self.getName(),r.getName(),self.getName(),self.getValue(r,bugtype,getConfigOption))
class SelectionParameter (ReporterParameter):
def __init__(self, n, values):
ReporterParameter.__init__(self,n)
self.values = values
def getHTML(self,r,bugtype,getConfigOption):
default = self.getValue(r,bugtype,getConfigOption)
return """\
<tr>
<td class="form_clabel">%s:</td><td class="form_value"><select name="%s_%s">
%s
</select></td>"""%(self.getName(),r.getName(),self.getName(),'\n'.join(["""\
<option value="%s"%s>%s</option>"""%(o[0],
o[0] == default and ' selected="selected"' or '',
o[1]) for o in self.values]))
#===------------------------------------------------------------------------===#
# Reporters
#===------------------------------------------------------------------------===#
class EmailReporter:
def getName(self):
return 'Email'
def getParameters(self):
return map(lambda x:TextParameter(x),['To', 'From', 'SMTP Server', 'SMTP Port'])
# Lifted from python email module examples.
def attachFile(self, outer, path):
# Guess the content type based on the file's extension. Encoding
# will be ignored, although we should check for simple things like
# gzip'd or compressed files.
ctype, encoding = mimetypes.guess_type(path)
if ctype is None or encoding is not None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
fp = open(path)
# Note: we should handle calculating the charset
msg = MIMEText(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(path, 'rb')
msg = MIMEBase(maintype, subtype)
msg.set_payload(fp.read())
fp.close()
# Encode the payload using Base64
encoders.encode_base64(msg)
# Set the filename parameter
msg.add_header('Content-Disposition', 'attachment', filename=os.path.basename(path))
outer.attach(msg)
def fileReport(self, report, parameters):
mainMsg = """\
BUG REPORT
---
Title: %s
Description: %s
"""%(report.title, report.description)
if not parameters.get('To'):
raise ReportFailure('No "To" address specified.')
if not parameters.get('From'):
raise ReportFailure('No "From" address specified.')
msg = MIMEMultipart()
msg['Subject'] = 'BUG REPORT: %s'%(report.title)
# FIXME: Get config parameters
msg['To'] = parameters.get('To')
msg['From'] = parameters.get('From')
msg.preamble = mainMsg
msg.attach(MIMEText(mainMsg, _subtype='text/plain'))
for file in report.files:
self.attachFile(msg, file)
try:
s = smtplib.SMTP(host=parameters.get('SMTP Server'),
port=parameters.get('SMTP Port'))
s.sendmail(msg['From'], msg['To'], msg.as_string())
s.close()
except:
raise ReportFailure('Unable to send message via SMTP.')
return "Message sent!"
class BugzillaReporter:
def getName(self):
return 'Bugzilla'
def getParameters(self):
return map(lambda x:TextParameter(x),['URL','Product'])
def fileReport(self, report, parameters):
raise NotImplementedError
class RadarClassificationParameter(SelectionParameter):
def __init__(self):
SelectionParameter.__init__(self,"Classification",
[['1', 'Security'], ['2', 'Crash/Hang/Data Loss'],
['3', 'Performance'], ['4', 'UI/Usability'],
['6', 'Serious Bug'], ['7', 'Other']])
def saveConfigValue(self):
return False
def getValue(self,r,bugtype,getConfigOption):
if bugtype.find("leak") != -1:
return '3'
elif bugtype.find("dereference") != -1:
return '2'
elif bugtype.find("missing ivar release") != -1:
return '3'
else:
return '7'
class RadarReporter:
@staticmethod
def isAvailable():
# FIXME: Find this .scpt better
path = os.path.join(os.path.dirname(__file__),'Resources/GetRadarVersion.scpt')
try:
p = subprocess.Popen(['osascript',path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
return False
data,err = p.communicate()
res = p.wait()
# FIXME: Check version? Check for no errors?
return res == 0
def getName(self):
return 'Radar'
def getParameters(self):
return [ TextParameter('Component'), TextParameter('Component Version'),
RadarClassificationParameter() ]
def fileReport(self, report, parameters):
component = parameters.get('Component', '')
componentVersion = parameters.get('Component Version', '')
classification = parameters.get('Classification', '')
personID = ""
diagnosis = ""
config = ""
if not component.strip():
component = 'Bugs found by clang Analyzer'
if not componentVersion.strip():
componentVersion = 'X'
script = os.path.join(os.path.dirname(__file__),'Resources/FileRadar.scpt')
args = ['osascript', script, component, componentVersion, classification, personID, report.title,
report.description, diagnosis, config] + map(os.path.abspath, report.files)
# print >>sys.stderr, args
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
raise ReportFailure("Unable to file radar (AppleScript failure).")
data, err = p.communicate()
res = p.wait()
if res:
raise ReportFailure("Unable to file radar (AppleScript failure).")
try:
values = eval(data)
except:
raise ReportFailure("Unable to process radar results.")
# We expect (int: bugID, str: message)
if len(values) != 2 or not isinstance(values[0], int):
raise ReportFailure("Unable to process radar results.")
bugID,message = values
bugID = int(bugID)
if not bugID:
raise ReportFailure(message)
return "Filed: <a href=\"rdar://%d/\">%d</a>"%(bugID,bugID)
###
def getReporters():
reporters = []
if RadarReporter.isAvailable():
reporters.append(RadarReporter())
reporters.append(EmailReporter())
return reporters
| bsd-2-clause | -3,733,677,169,329,846,000 | 31.802419 | 105 | 0.579226 | false |
varlog00/Sigil | src/Resource_Files/plugin_launchers/python/sigil_gumboc_tags.py | 6 | 4774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals
TagNames = [
"A",
"ABBR",
"ACRONYM",
"ADDRESS",
"ALTGLYPH",
"ALTGLYPHDEF",
"ALTGLYPHITEM",
"ANIMATE",
"ANIMATECOLOR",
"ANIMATEMOTION",
"ANIMATETRANSFORM",
"ANNOTATION_XML",
"APPLET",
"AREA",
"ARTICLE",
"ASIDE",
"AUDIO",
"B",
"BASE",
"BASEFONT",
"BDI",
"BDO",
"BGSOUND",
"BIG",
"BLINK",
"BLOCKQUOTE",
"BODY",
"BR",
"BUTTON",
"CANVAS",
"CAPTION",
"CENTER",
"CIRCLE",
"CITE",
"CLIPPATH",
"CODE",
"COL",
"COLGROUP",
"COLOR_PROFILE",
"CURSOR",
"DATA",
"DATALIST",
"DD",
"DEFS",
"DEL",
"DESC",
"DETAILS",
"DFN",
"DIR",
"DIV",
"DL",
"DT",
"ELLIPSE",
"EM",
"EMBED",
"FEBLEND",
"FECOLORMATRIX",
"FECOMPONENTTRANSFER",
"FECOMPOSITE",
"FECONVOLVEMATRIX",
"FEDIFFUSELIGHTING",
"FEDISPLACEMENTMAP",
"FEDISTANTLIGHT",
"FEFLOOD",
"FEFUNCA",
"FEFUNCB",
"FEFUNCG",
"FEFUNCR",
"FEGAUSSIANBLUR",
"FEIMAGE",
"FEMERGE",
"FEMERGENODE",
"FEMORPHOLOGY",
"FEOFFSET",
"FEPOINTLIGHT",
"FESPECTACTUALRLIGHTING",
"FESPOTLIGHT",
"FETILE",
"FETURBULENCE",
"FIELDSET",
"FIGCAPTION",
"FIGURE",
"FILTER",
"FONT",
"FONT_FACE",
"FONT_FACE_FORMAT",
"FONT_FACE_NAME",
"FONT_FACE_SRC",
"FONT_FACE_URI",
"FOOTER",
"FOREIGNOBJECT",
"FORM",
"FRAME",
"FRAMESET",
"G",
"GLYPH",
"GLYPHREF",
"H1",
"H2",
"H3",
"H4",
"H5",
"H6",
"HEAD",
"HEADER",
"HGROUP",
"HKERN",
"HR",
"HTML",
"I",
"IFRAME",
"IMAGE",
"IMG",
"INPUT",
"INS",
"ISINDEX",
"KBD",
"KEYGEN",
"LABEL",
"LEGEND",
"LI",
"LINE",
"LINEARGRADIENT",
"LINK",
"LISTING",
"MACTION",
"MAIN",
"MALIGNGROUP",
"MALIGNMARK",
"MAP",
"MARK",
"MARKER",
"MARQUEE",
"MASK",
"MATH",
"MENCLOSE",
"MENU",
"MENUITEM",
"MERROR",
"META",
"METADATA",
"METER",
"MFENCED",
"MFRAC",
"MGLYPH",
"MI",
"MISSING_GLYPH",
"MLABELEDTR",
"MLONGDIV",
"MMULTISCRIPTS",
"MN",
"MO",
"MOVER",
"MPADDED",
"MPATH",
"MPHANTOM",
"MPRESCRIPTS",
"MROOT",
"MROW",
"MS",
"MSCARRIES",
"MSCARRY",
"MSGROUP",
"MSLINE",
"MSPACE",
"MSQRT",
"MSROW",
"MSTACK",
"MSTYLE",
"MSUB",
"MSUBSUP",
"MSUP",
"MTABLE",
"MTD",
"MTEXT",
"MTR",
"MULTICOL",
"MUNDER",
"MUNDEROVER",
"NAV",
"NEXTID",
"NOBR",
"NOEMBED",
"NOFRAMES",
"NONE",
"NOSCRIPT",
"OBJECT",
"OL",
"OPTGROUP",
"OPTION",
"OUTPUT",
"P",
"PARAM",
"PATH",
"PATTERN",
"PLAINTEXT",
"POLYGON",
"POLYLINE",
"PRE",
"PROGRESS",
"Q",
"RADIALGRADIENT",
"RB",
"RECT",
"RP",
"RT",
"RTC",
"RUBY",
"S",
"SAMP",
"SCRIPT",
"SECTION",
"SELECT",
"SEMANTICS",
"SET",
"SMALL",
"SOURCE",
"SPACER",
"SPAN",
"STOP",
"STRIKE",
"STRONG",
"STYLE",
"SUB",
"SUMMARY",
"SUP",
"SVG",
"SWITCH",
"SYMBOL",
"TABLE",
"TBODY",
"TD",
"TEMPLATE",
"TEXT",
"TEXTPATH",
"TEXTAREA",
"TFOOT",
"TH",
"THEAD",
"TIME",
"TITLE",
"TR",
"TRACK",
"TREF",
"TSPAN",
"TT",
"U",
"UL",
"USE",
"VAR",
"VIDEO",
"VIEW",
"VKERN",
"WBR",
"XMP",
]
| gpl-3.0 | -3,132,407,543,923,527,700 | 17.152091 | 48 | 0.346041 | false |
chamikaramj/beam | sdks/python/apache_beam/transforms/write_ptransform_test.py | 9 | 4178 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the write transform."""
import logging
import unittest
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.test_pipeline import TestPipeline
from apache_beam.transforms.ptransform import PTransform
from apache_beam.transforms.util import assert_that, is_empty
class _TestSink(iobase.Sink):
TEST_INIT_RESULT = 'test_init_result'
def __init__(self, return_init_result=True, return_write_results=True):
self.return_init_result = return_init_result
self.return_write_results = return_write_results
def initialize_write(self):
if self.return_init_result:
return _TestSink.TEST_INIT_RESULT
def finalize_write(self, init_result, writer_results):
self.init_result_at_finalize = init_result
self.write_results_at_finalize = writer_results
def open_writer(self, init_result, uid):
writer = _TestWriter(init_result, uid, self.return_write_results)
return writer
class _TestWriter(iobase.Writer):
STATE_UNSTARTED, STATE_WRITTEN, STATE_CLOSED = 0, 1, 2
TEST_WRITE_RESULT = 'test_write_result'
def __init__(self, init_result, uid, return_write_results=True):
self.state = _TestWriter.STATE_UNSTARTED
self.init_result = init_result
self.uid = uid
self.write_output = []
self.return_write_results = return_write_results
def close(self):
assert self.state in (
_TestWriter.STATE_WRITTEN, _TestWriter.STATE_UNSTARTED)
self.state = _TestWriter.STATE_CLOSED
if self.return_write_results:
return _TestWriter.TEST_WRITE_RESULT
def write(self, value):
if self.write_output:
assert self.state == _TestWriter.STATE_WRITTEN
else:
assert self.state == _TestWriter.STATE_UNSTARTED
self.state = _TestWriter.STATE_WRITTEN
self.write_output.append(value)
class WriteToTestSink(PTransform):
def __init__(self, return_init_result=True, return_write_results=True):
self.return_init_result = return_init_result
self.return_write_results = return_write_results
self.last_sink = None
self.label = 'write_to_test_sink'
def expand(self, pcoll):
self.last_sink = _TestSink(return_init_result=self.return_init_result,
return_write_results=self.return_write_results)
return pcoll | beam.io.Write(self.last_sink)
class WriteTest(unittest.TestCase):
DATA = ['some data', 'more data', 'another data', 'yet another data']
def _run_write_test(self,
data,
return_init_result=True,
return_write_results=True):
write_to_test_sink = WriteToTestSink(return_init_result,
return_write_results)
p = TestPipeline()
result = p | beam.Create(data) | write_to_test_sink | beam.Map(list)
assert_that(result, is_empty())
p.run()
sink = write_to_test_sink.last_sink
self.assertIsNotNone(sink)
def test_write(self):
self._run_write_test(WriteTest.DATA)
def test_write_with_empty_pcollection(self):
data = []
self._run_write_test(data)
def test_write_no_init_result(self):
self._run_write_test(WriteTest.DATA, return_init_result=False)
def test_write_no_write_results(self):
self._run_write_test(WriteTest.DATA, return_write_results=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 | 8,816,244,651,456,138,000 | 32.15873 | 78 | 0.700096 | false |
rjsproxy/wagtail | wagtail/wagtailadmin/tests/test_widgets.py | 10 | 2561 | from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from wagtail.wagtailadmin import widgets
from wagtail.wagtailcore.models import Page
from wagtail.tests.testapp.models import SimplePage, EventPage
class TestAdminPageChooserWidget(TestCase):
def setUp(self):
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage(
title="foobarbaz",
slug="foobarbaz",
)
self.root_page.add_child(instance=self.child_page)
def test_render_html(self):
widget = widgets.AdminPageChooser()
html = widget.render_html('test', None, {})
self.assertIn("<input name=\"test\" type=\"hidden\" />", html)
def test_render_js_init(self):
widget = widgets.AdminPageChooser()
js_init = widget.render_js_init('test-id', 'test', None)
self.assertEqual(js_init, "createPageChooser(\"test-id\", [\"wagtailcore.page\"], null);")
def test_render_html_with_value(self):
widget = widgets.AdminPageChooser()
html = widget.render_html('test', self.child_page, {})
self.assertIn("<input name=\"test\" type=\"hidden\" value=\"%d\" />" % self.child_page.id, html)
def test_render_js_init_with_value(self):
widget = widgets.AdminPageChooser()
js_init = widget.render_js_init('test-id', 'test', self.child_page)
self.assertEqual(js_init, "createPageChooser(\"test-id\", [\"wagtailcore.page\"], %d);" % self.root_page.id)
# def test_render_html_init_with_content_type omitted as HTML does not
# change when selecting a content type
def test_render_js_init_with_content_type(self):
content_type = ContentType.objects.get_for_model(SimplePage)
widget = widgets.AdminPageChooser(content_type=content_type)
js_init = widget.render_js_init('test-id', 'test', None)
self.assertEqual(js_init, "createPageChooser(\"test-id\", [\"tests.simplepage\"], null);")
def test_render_js_init_with_multiple_content_types(self):
content_types = [
# Not using get_for_models as we need deterministic ordering
ContentType.objects.get_for_model(SimplePage),
ContentType.objects.get_for_model(EventPage),
]
widget = widgets.AdminPageChooser(content_type=content_types)
js_init = widget.render_js_init('test-id', 'test', None)
self.assertEqual(js_init, "createPageChooser(\"test-id\", [\"tests.simplepage\", \"tests.eventpage\"], null);")
| bsd-3-clause | 4,118,165,669,624,323,000 | 39.015625 | 119 | 0.655213 | false |
imsparsh/python-for-android | python3-alpha/python3-src/Lib/importlib/test/extension/test_case_sensitivity.py | 50 | 1152 | import sys
from test import support
import unittest
from importlib import _bootstrap
from .. import util
from . import util as ext_util
@util.case_insensitive_tests
class ExtensionModuleCaseSensitivityTest(unittest.TestCase):
def find_module(self):
good_name = ext_util.NAME
bad_name = good_name.upper()
assert good_name != bad_name
finder = _bootstrap._FileFinder(ext_util.PATH,
_bootstrap._ExtensionFinderDetails())
return finder.find_module(bad_name)
def test_case_sensitive(self):
with support.EnvironmentVarGuard() as env:
env.unset('PYTHONCASEOK')
loader = self.find_module()
self.assertIsNone(loader)
def test_case_insensitivity(self):
with support.EnvironmentVarGuard() as env:
env.set('PYTHONCASEOK', '1')
loader = self.find_module()
self.assertTrue(hasattr(loader, 'load_module'))
def test_main():
if ext_util.FILENAME is None:
return
support.run_unittest(ExtensionModuleCaseSensitivityTest)
if __name__ == '__main__':
test_main()
| apache-2.0 | -7,427,643,396,477,557,000 | 26.428571 | 77 | 0.634549 | false |
nathanielvarona/airflow | airflow/migrations/versions/4446e08588_dagrun_start_end.py | 8 | 1400 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""dagrun start end
Revision ID: 4446e08588
Revises: 561833c1c74b
Create Date: 2015-12-10 11:26:18.439223
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4446e08588'
down_revision = '561833c1c74b'
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
op.add_column('dag_run', sa.Column('end_date', sa.DateTime(), nullable=True))
op.add_column('dag_run', sa.Column('start_date', sa.DateTime(), nullable=True))
def downgrade(): # noqa: D103
op.drop_column('dag_run', 'start_date')
op.drop_column('dag_run', 'end_date')
| apache-2.0 | 4,143,054,880,276,355,600 | 30.818182 | 83 | 0.737143 | false |
Dhivyap/ansible | test/units/modules/storage/netapp/test_na_ontap_vscan_scanner_pool.py | 38 | 6534 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests for Ansible module: na_ontap_vscan_scanner_pool '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_vscan_scanner_pool \
import NetAppOntapVscanScannerPool as scanner_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'scanner':
xml = self.build_scanner_pool_info(self.params)
self.xml_out = xml
return xml
@staticmethod
def build_scanner_pool_info(sanner_details):
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'vscan-scanner-pool-info': {
'scanner-pool': sanner_details['scanner_pool'],
'scanner-policy': sanner_details['scanner_policy']
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' Unit tests for na_ontap_job_schedule '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_scanner = {
'state': 'present',
'scanner_pool': 'test_pool',
'vserver': 'test_vserver',
'hostnames': ['host1', 'host2'],
'privileged_users': ['domain\\admin', 'domain\\carchi8py'],
'scanner_policy': 'primary'
}
def mock_args(self):
return {
'state': self.mock_scanner['state'],
'scanner_pool': self.mock_scanner['scanner_pool'],
'vserver': self.mock_scanner['vserver'],
'hostnames': self.mock_scanner['hostnames'],
'privileged_users': self.mock_scanner['privileged_users'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'scanner_policy': self.mock_scanner['scanner_policy']
}
def get_scanner_mock_object(self, kind=None):
scanner_obj = scanner_module()
scanner_obj.asup_log_for_cserver = Mock(return_value=None)
if kind is None:
scanner_obj.server = MockONTAPConnection()
else:
scanner_obj.server = MockONTAPConnection(kind='scanner', data=self.mock_scanner)
return scanner_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
scanner_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_scanner(self):
''' Test if get_scanner_pool returns None for non-existent job '''
set_module_args(self.mock_args())
result = self.get_scanner_mock_object().get_scanner_pool()
assert not result
def test_get_existing_scanner(self):
''' Test if get_scanner_pool returns None for non-existent job '''
set_module_args(self.mock_args())
result = self.get_scanner_mock_object('scanner').get_scanner_pool()
assert result
def test_successfully_create(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object('scanner').apply()
assert not exc.value.args[0]['changed']
def test_apply_policy(self):
data = self.mock_args()
data['scanner_policy'] = 'secondary'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object('scanner').apply()
assert exc.value.args[0]['changed']
def test_successfully_delete(self):
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object('scanner').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object().apply()
assert not exc.value.args[0]['changed']
| gpl-3.0 | 4,656,287,166,837,647,000 | 35.502793 | 92 | 0.616927 | false |
ReachingOut/unisubs | bin/update-integration.py | 4 | 2255 | #!/usr/bin/env python
import optparse
import os
import sys
import subprocess
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def optional_dir():
return os.path.join(root_dir, 'optional')
def repo_dir(repo_name):
return os.path.join(root_dir, repo_name)
def get_repo_names():
return [f for f in os.listdir(optional_dir())
if not f.startswith(".")]
def get_repo_commit(repo_name):
path = os.path.join(optional_dir(), repo_name)
return open(path).read().strip()
def run_command(*args):
subprocess.check_call(args, stdout=open(os.devnull, 'w'))
def run_git_clone(repo_name):
os.chdir(root_dir)
url = "[email protected]:pculture/{0}.git".format(repo_name)
print "{0}: cloning".format(repo_name)
run_command("git", "clone", url)
commit_id = get_repo_commit(repo_name)
os.chdir(repo_dir(repo_name))
print "{0}: reset to {1}".format(repo_name, commit_id)
run_command("git", "reset", "--hard", commit_id)
def run_git_reset(repo_name, skip_fetch):
os.chdir(repo_dir(repo_name))
if not skip_fetch:
print "{0}: fetching".format(repo_name)
run_command("git", "fetch", "origin")
else:
print "{0}: skipping fetch".format(repo_name)
commit_id = get_repo_commit(repo_name)
print "{0} reset to {1}".format(repo_name, commit_id)
run_command("git", "reset", "--hard", commit_id)
def make_option_parser():
parser = optparse.OptionParser()
parser.add_option("--skip-fetch'", dest="skip_fetch",
action='store_true', help="don't run git fetch")
parser.add_option("--clone-missing", dest="clone_missing",
action='store_true', help="clone missing repositories")
return parser
def main(argv):
parser = make_option_parser()
(options, args) = parser.parse_args(argv)
for repo_name in get_repo_names():
if os.path.exists(repo_dir(repo_name)):
run_git_reset(repo_name, options.skip_fetch)
elif options.clone_missing:
run_git_clone(repo_name)
else:
print ("{0}: directory doesn't exist use --clone-missing "
"to create it".format(repo_name))
if __name__ == '__main__':
main(sys.argv)
| agpl-3.0 | -3,270,821,882,048,445,000 | 31.681159 | 77 | 0.621286 | false |
benschulz/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_handshake_sig_wsh.py | 499 | 1859 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrong web_socket_do_extra_handshake signature.
"""
def no_web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write(
'sub/wrong_handshake_sig_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 | -4,693,898,114,739,469,000 | 40.311111 | 72 | 0.759548 | false |
tensorflow/agents | tf_agents/policies/random_tf_policy.py | 1 | 7986 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy implementation that generates random actions."""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
from typing import cast
import tensorflow as tf
from tf_agents.distributions import masked
from tf_agents.policies import tf_policy
from tf_agents.policies import utils as policy_utilities
from tf_agents.specs import bandit_spec_utils
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from tf_agents.utils import nest_utils
def _calculate_log_probability(outer_dims, action_spec):
"""Helper function for calculating log prob of a uniform distribution.
Each item in the returned tensor will be equal to:
|action_spec.shape| * log_prob_of_each_component_of_action_spec.
Note that this method expects the same value for all outer_dims because
we're sampling uniformly from the same distribution for each batch row.
Args:
outer_dims: TensorShape.
action_spec: BoundedTensorSpec.
Returns:
A tensor of type float32 with shape outer_dims.
"""
# Equivalent of what a tfp.distribution.Categorical would return.
if action_spec.dtype.is_integer:
log_prob = -tf.math.log(action_spec.maximum - action_spec.minimum + 1.0)
# Equivalent of what a tfp.distribution.Uniform would return.
else:
log_prob = -tf.math.log(action_spec.maximum - action_spec.minimum)
# Note that log_prob may be a vector. We first reduce it to a scalar, and then
# adjust by the number of times that vector is repeated in action_spec.
log_prob = tf.reduce_sum(log_prob) * (
action_spec.shape.num_elements() / log_prob.shape.num_elements())
# Regardless of the type of the action, the log_prob should be float32.
return tf.cast(tf.fill(outer_dims, log_prob), tf.float32)
# TODO(b/161005095): Refactor into RandomTFPolicy and RandomBanditTFPolicy.
class RandomTFPolicy(tf_policy.TFPolicy):
"""Returns random samples of the given action_spec.
Note: the values in the info_spec (except for the log_probability) are random
values that have nothing to do with the emitted actions.
Note: The returned info.log_probabiliy will be an object matching the
structure of action_spec, where each value is a tensor of size [batch_size].
"""
def __init__(self, time_step_spec: ts.TimeStep,
action_spec: types.NestedTensorSpec, *args, **kwargs):
observation_and_action_constraint_splitter = (
kwargs.get('observation_and_action_constraint_splitter', None))
self._accepts_per_arm_features = (
kwargs.pop('accepts_per_arm_features', False))
if observation_and_action_constraint_splitter is not None:
if not isinstance(action_spec, tensor_spec.BoundedTensorSpec):
raise NotImplementedError(
'RandomTFPolicy only supports action constraints for '
'BoundedTensorSpec action specs.')
action_spec = tensor_spec.from_spec(action_spec)
action_spec = cast(tensor_spec.BoundedTensorSpec, action_spec)
scalar_shape = action_spec.shape.rank == 0
single_dim_shape = (
action_spec.shape.rank == 1 and action_spec.shape.dims == [1])
if not scalar_shape and not single_dim_shape:
raise NotImplementedError(
'RandomTFPolicy only supports action constraints for action specs '
'shaped as () or (1,) or their equivalent list forms.')
super(RandomTFPolicy, self).__init__(time_step_spec, action_spec, *args,
**kwargs)
def _variables(self):
return []
def _action(self, time_step, policy_state, seed):
observation_and_action_constraint_splitter = (
self.observation_and_action_constraint_splitter)
outer_dims = nest_utils.get_outer_shape(time_step, self._time_step_spec)
if observation_and_action_constraint_splitter is not None:
observation, mask = observation_and_action_constraint_splitter(
time_step.observation)
action_spec = tensor_spec.from_spec(self.action_spec)
action_spec = cast(tensor_spec.BoundedTensorSpec, action_spec)
zero_logits = tf.cast(tf.zeros_like(mask), tf.float32)
masked_categorical = masked.MaskedCategorical(zero_logits, mask)
action_ = tf.cast(masked_categorical.sample() + action_spec.minimum,
action_spec.dtype)
# If the action spec says each action should be shaped (1,), add another
# dimension so the final shape is (B, 1) rather than (B,).
if action_spec.shape.rank == 1:
action_ = tf.expand_dims(action_, axis=-1)
policy_info = tensor_spec.sample_spec_nest(
self._info_spec, outer_dims=outer_dims)
else:
observation = time_step.observation
action_spec = cast(tensor_spec.BoundedTensorSpec, self.action_spec)
if self._accepts_per_arm_features:
max_num_arms = action_spec.maximum - action_spec.minimum + 1
batch_size = tf.shape(time_step.step_type)[0]
num_actions = observation.get(
bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY,
tf.ones(shape=(batch_size,), dtype=tf.int32) * max_num_arms)
mask = tf.sequence_mask(num_actions, max_num_arms)
zero_logits = tf.cast(tf.zeros_like(mask), tf.float32)
masked_categorical = masked.MaskedCategorical(zero_logits, mask)
action_ = tf.nest.map_structure(
lambda t: tf.cast(masked_categorical.sample() + t.minimum, t.dtype),
action_spec)
else:
action_ = tensor_spec.sample_spec_nest(
self._action_spec, seed=seed, outer_dims=outer_dims)
policy_info = tensor_spec.sample_spec_nest(
self._info_spec, outer_dims=outer_dims)
# Update policy info with chosen arm features.
if self._accepts_per_arm_features:
def _gather_fn(t):
return tf.gather(params=t, indices=action_, batch_dims=1)
chosen_arm_features = tf.nest.map_structure(
_gather_fn, observation[bandit_spec_utils.PER_ARM_FEATURE_KEY])
if policy_utilities.has_chosen_arm_features(self._info_spec):
policy_info = policy_info._replace(
chosen_arm_features=chosen_arm_features)
# TODO(b/78181147): Investigate why this control dependency is required.
if time_step is not None:
with tf.control_dependencies(tf.nest.flatten(time_step)):
action_ = tf.nest.map_structure(tf.identity, action_)
if self.emit_log_probability:
if (self._accepts_per_arm_features
or observation_and_action_constraint_splitter is not None):
action_spec = cast(tensor_spec.BoundedTensorSpec, self.action_spec)
log_probability = masked_categorical.log_prob(
action_ - action_spec.minimum)
else:
log_probability = tf.nest.map_structure(
lambda s: _calculate_log_probability(outer_dims, s),
self._action_spec)
policy_info = policy_step.set_log_probability(policy_info,
log_probability)
step = policy_step.PolicyStep(action_, policy_state, policy_info)
return step
def _distribution(self, time_step, policy_state):
raise NotImplementedError(
'RandomTFPolicy does not support distributions yet.')
| apache-2.0 | -3,484,737,187,316,047,400 | 41.705882 | 80 | 0.690834 | false |
jordiclariana/ansible | lib/ansible/module_utils/asa.py | 14 | 4321 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2016 Peter Sprygada, <[email protected]>
# Copyright (c) 2016 Patrick Ogenstad, <@ogenstad>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
from ansible.module_utils.network import NetworkError, NetworkModule
from ansible.module_utils.network import add_argument, register_transport
from ansible.module_utils.network import to_list
from ansible.module_utils.shell import CliBase
from ansible.module_utils.netcli import Command
add_argument('context', dict(required=False))
class Cli(CliBase):
CLI_PROMPTS_RE = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
CLI_ERRORS_RE = [
re.compile(r"error:", re.I),
]
NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
def __init__(self, *args, **kwargs):
super(Cli, self).__init__(*args, **kwargs)
def connect(self, params, **kwargs):
super(Cli, self).connect(params, kickstart=False, **kwargs)
if params['context']:
self.change_context(params, **kwargs)
def authorize(self, params, **kwargs):
passwd = params['auth_pass']
errors = self.shell.errors
# Disable errors (if already in enable mode)
self.shell.errors = []
cmd = Command('enable', prompt=self.NET_PASSWD_RE, response=passwd)
self.execute([cmd, 'no terminal pager'])
# Reapply error handling
self.shell.errors = errors
def change_context(self, params):
context = params['context']
if context == 'system':
command = 'changeto system'
else:
command = 'changeto context %s' % context
self.execute(command)
### Config methods ###
def configure(self, commands):
cmds = ['configure terminal']
cmds.extend(to_list(commands))
if cmds[-1] == 'exit':
cmds[-1] = 'end'
elif cmds[-1] != 'end':
cmds.append('end')
responses = self.execute(cmds)
return responses[1:]
def get_config(self, include=None):
if include not in [None, 'defaults', 'passwords']:
raise ValueError('include must be one of None, defaults, passwords')
cmd = 'show running-config'
if include == 'passwords':
cmd = 'more system:running-config'
elif include == 'defaults':
cmd = 'show running-config all'
else:
cmd = 'show running-config'
return self.run_commands(cmd)[0]
def load_config(self, commands):
return self.configure(commands)
def save_config(self):
self.execute(['write memory'])
Cli = register_transport('cli', default=True)(Cli)
| gpl-3.0 | 5,460,363,249,954,310,000 | 37.580357 | 92 | 0.65957 | false |
SciTools/mo_pack | lib/mo_pack/tests/test_wgdos.py | 3 | 3859 | # (C) British Crown Copyright 2015, Met Office
#
# This file is part of mo_pack.
#
# mo_pack is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mo_pack is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with mo_pack. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for the `mo_pack.compress_wgdos` and `mo_pack.decompress_wgdos`
functions.
"""
from __future__ import absolute_import, division, print_function
import os
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import mo_pack
class TestPackWGDOS(unittest.TestCase):
def assert_equal_when_decompressed(self, compressed_data, expected_array,
mdi=0):
x, y = expected_array.shape
decompressed_data = mo_pack.decompress_wgdos(
compressed_data, x, y, mdi)
np.testing.assert_array_equal(decompressed_data, expected_array)
def test_pack_wgdos(self):
data = np.arange(42, dtype=np.float32).reshape(7, 6)
compressed_data = mo_pack.compress_wgdos(data)
self.assert_equal_when_decompressed(compressed_data, data)
def test_mdi(self):
data = np.arange(12, dtype=np.float32).reshape(3, 4)
compressed_data = mo_pack.compress_wgdos(data,
missing_data_indicator=4.0)
expected_data = data
data[1, 0] = 4.0
self.assert_equal_when_decompressed(compressed_data, data, mdi=4.0)
def test_accuracy(self):
data = np.array([[0.1234, 0.2345, 0.3456], [0.4567, 0.5678, 0.6789]],
dtype=np.float32)
compressed = mo_pack.compress_wgdos(data, accuracy=-4)
decompressed_data = mo_pack.decompress_wgdos(compressed, 2, 3)
expected = np.array([[0.12340003, 0.18590003, 0.34560001],
[0.40810001, 0.56779999, 0.63029999]],
dtype=np.float32)
np.testing.assert_array_equal(decompressed_data, expected)
class TestdecompressWGDOS(unittest.TestCase):
def test_incorrect_size(self):
data = np.arange(77, dtype=np.float32).reshape(7, 11)
compressed_data = mo_pack.compress_wgdos(data)
with self.assertRaises(ValueError):
decompressed_data = mo_pack.decompress_wgdos(compressed_data, 5, 6)
def test_different_shape(self):
data = np.arange(24, dtype=np.float32).reshape(8, 3)
compressed_data = mo_pack.compress_wgdos(data)
decompressed_data = mo_pack.decompress_wgdos(compressed_data, 4, 6)
np.testing.assert_array_equal(decompressed_data, data.reshape(4, 6))
def test_real_data(self):
test_dir = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(test_dir, 'test_data',
'nae.20100104-06_0001_0001.pp')
with open(fname, 'rb') as fh:
fh.seek(268)
data = mo_pack.decompress_wgdos(fh.read(339464), 360, 600)
assert_almost_equal(data.mean(), 130.84694, decimal=1)
expected = [[388.78125, 389.46875, 384.0625, 388.46875],
[388.09375, 381.375, 374.28125, 374.875],
[382.34375, 373.671875, 371.171875, 368.25],
[385.265625, 373.921875, 368.5, 365.3125]]
assert_array_equal(data[:4, :4], expected)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,593,516,665,692,684,000 | 39.621053 | 79 | 0.63462 | false |
NewCell/Call-Text-v1 | jni/pjsip/sources/tests/cdash/cfg_site_sample.py | 107 | 1530 | #
# cfg_site_sample.py - Sample site configuration
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import builder
# Your site name
SITE_NAME="Newham3"
# The URL where tests will be submitted to
URL = "http://192.168.0.2/dash/submit.php?project=PJSIP"
# Test group
GROUP = "Experimental"
# PJSIP base directory
BASE_DIR = "/root/project/pjproject"
# List of additional ccdash options
#OPTIONS = ["-o", "out.xml", "-y"]
OPTIONS = []
# What's the content of config_site.h
CONFIG_SITE = ""
# What's the content of user.mak
USER_MAK = ""
# List of regular expression of test patterns to be excluded
EXCLUDE = []
# List of regular expression of test patterns to be included (even
# if they match EXCLUDE patterns)
NOT_EXCLUDE = []
#"configure", "update", "build.*make", "build", "run.py mod_run.*100_simple"]
| gpl-3.0 | 5,001,404,778,936,304,000 | 29.6 | 77 | 0.728758 | false |
Yannig/ansible-modules-core | cloud/amazon/rds.py | 7 | 40737 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take.
required: true
default: null
aliases: []
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
aliases: []
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
aliases: []
db_engine:
description:
- The type of database. Used only when command=create.
required: false
default: null
aliases: []
choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
aliases: []
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
aliases: []
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
aliases: []
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
aliases: []
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
aliases: []
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
aliases: []
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
aliases: []
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
aliases: []
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
aliases: []
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
aliases: []
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
aliases: []
port:
description:
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
required: false
default: null
aliases: []
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
aliases: []
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
aliases: []
maint_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
aliases: []
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
aliases: []
backup_retention:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
aliases: []
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
aliases: []
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no
choices: [ "yes", "no" ]
aliases: []
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
aliases: []
version_added: 1.5
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
aliases: []
version_added: 1.9
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false
default: null
aliases: []
version_added: 1.9
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false
default: null
aliases: []
version_added: 1.9
requirements:
- "python >= 2.6"
- "boto"
author: Bruce Pennypacker, Will Thames
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
'''
import sys
import time
try:
import boto.rds
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto.rds2
has_rds2 = True
except ImportError:
has_rds2 = False
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError, e:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError, e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound, e:
return None
except Exception, e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound, e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class,
db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.instance.create_time,
'status' : self.status,
'availability_zone' : self.instance.availability_zone,
'backup_retention' : self.instance.backup_retention_period,
'backup_window' : self.instance.preferred_backup_window,
'maintenance_window' : self.instance.preferred_maintenance_window,
'multi_zone' : self.instance.multi_az,
'instance_type' : self.instance.instance_class,
'username' : self.instance.master_username,
'iops' : self.instance.iops
}
# Endpoint exists only if the instance is available
if self.status == 'available':
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception, e:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if self.status == 'available':
d['endpoint'] = self.instance["Endpoint"]["Address"]
d['port'] = self.instance["Endpoint"]["Port"]
else:
d['endpoint'] = None
d['port'] = None
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot.snapshot_create_time,
'status' : self.status,
'availability_zone' : self.snapshot.availability_zone,
'instance_id' : self.snapshot.instance_id,
'instance_created' : self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot['SnapshotCreateTime'],
'status' : self.status,
'availability_zone' : self.snapshot['AvailabilityZone'],
'instance_id' : self.snapshot['DBInstanceIdentifier'],
'instance_created' : self.snapshot['InstanceCreateTime'],
'snapshot_type' : self.snapshot['SnapshotType'],
'iops' : self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
wait_timeout = module.params.get('wait_timeout') + time.time()
while wait_timeout > time.time() and resource.status != status:
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="Problem with snapshot %s" % resource.snapshot)
resource = conn.get_db_snapshot(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="Problem with instance %s" % resource.instance)
resource = conn.get_db_instance(resource.name)
if resource is None:
break
return resource
def create_db_instance(module, conn):
subnet = module.params.get('subnet')
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group','port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if has_rds2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException, e:
module.fail_json(msg="failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if has_rds2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException, e:
module.fail_json(msg="failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException, e:
module.fail_json(msg="failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException, e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception, e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException, e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result.get_data().get('replication_source'):
changed = False
else:
try:
result = conn.promote_read_replica(instance_name, **params)
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if has_rds2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
}
if has_rds2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if has_rds2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True),
instance_name = dict(required=False),
source_instance = dict(required=False),
db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False),
size = dict(required=False),
instance_type = dict(aliases=['type'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(required=False),
engine_version = dict(required=False),
parameter_group = dict(required=False),
license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone = dict(type='bool', default=False),
iops = dict(required=False),
security_groups = dict(required=False),
vpc_security_groups = dict(type='list', required=False),
port = dict(required=False),
upgrade = dict(type='bool', default=False),
option_group = dict(required=False),
maint_window = dict(required=False),
backup_window = dict(required=False),
backup_retention = dict(required=False),
zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet = dict(required=False),
wait = dict(type='bool', default=False),
wait_timeout = dict(type='int', default=300),
snapshot = dict(required=False),
apply_immediately = dict(type='bool', default=False),
new_instance_name = dict(required=False),
tags = dict(type='dict', required=False),
publicly_accessible = dict(required=False),
character_set_name = dict(required=False),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region not specified and unable to determine region from EC2_REGION.")
# connect to the rds endpoint
if has_rds2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 | -365,942,730,339,192,900 | 38.704678 | 322 | 0.633159 | false |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/toolz/curried/__init__.py | 7 | 2615 | """
Alternate namespace for toolz such that all functions are curried
Currying provides implicit partial evaluation of all functions
Example:
Get usually requires two arguments, an index and a collection
>>> from toolz.curried import get
>>> get(0, ('a', 'b'))
'a'
When we use it in higher order functions we often want to pass a partially
evaluated form
>>> data = [(1, 2), (11, 22), (111, 222)]
>>> list(map(lambda seq: get(0, seq), data))
[1, 11, 111]
The curried version allows simple expression of partial evaluation
>>> list(map(get(0), data))
[1, 11, 111]
See Also:
toolz.functoolz.curry
"""
import toolz
from . import operator
from toolz import (
comp,
complement,
compose,
concat,
concatv,
count,
curry,
diff,
dissoc,
first,
flip,
frequencies,
identity,
interleave,
isdistinct,
isiterable,
juxt,
last,
memoize,
merge_sorted,
peek,
pipe,
second,
thread_first,
thread_last,
)
from .exceptions import merge, merge_with
accumulate = toolz.curry(toolz.accumulate)
assoc = toolz.curry(toolz.assoc)
assoc_in = toolz.curry(toolz.assoc_in)
cons = toolz.curry(toolz.cons)
countby = toolz.curry(toolz.countby)
do = toolz.curry(toolz.do)
drop = toolz.curry(toolz.drop)
excepts = toolz.curry(toolz.excepts)
filter = toolz.curry(toolz.filter)
get = toolz.curry(toolz.get)
get_in = toolz.curry(toolz.get_in)
groupby = toolz.curry(toolz.groupby)
interpose = toolz.curry(toolz.interpose)
itemfilter = toolz.curry(toolz.itemfilter)
itemmap = toolz.curry(toolz.itemmap)
iterate = toolz.curry(toolz.iterate)
join = toolz.curry(toolz.join)
keyfilter = toolz.curry(toolz.keyfilter)
keymap = toolz.curry(toolz.keymap)
map = toolz.curry(toolz.map)
mapcat = toolz.curry(toolz.mapcat)
nth = toolz.curry(toolz.nth)
partial = toolz.curry(toolz.partial)
partition = toolz.curry(toolz.partition)
partition_all = toolz.curry(toolz.partition_all)
partitionby = toolz.curry(toolz.partitionby)
pluck = toolz.curry(toolz.pluck)
random_sample = toolz.curry(toolz.random_sample)
reduce = toolz.curry(toolz.reduce)
reduceby = toolz.curry(toolz.reduceby)
remove = toolz.curry(toolz.remove)
sliding_window = toolz.curry(toolz.sliding_window)
sorted = toolz.curry(toolz.sorted)
tail = toolz.curry(toolz.tail)
take = toolz.curry(toolz.take)
take_nth = toolz.curry(toolz.take_nth)
topk = toolz.curry(toolz.topk)
unique = toolz.curry(toolz.unique)
update_in = toolz.curry(toolz.update_in)
valfilter = toolz.curry(toolz.valfilter)
valmap = toolz.curry(toolz.valmap)
del exceptions
del toolz
| gpl-3.0 | -417,565,616,808,393,100 | 25.15 | 78 | 0.712428 | false |
TeachAtTUM/edx-platform | cms/djangoapps/contentstore/management/commands/reindex_library.py | 18 | 2454 | """ Management command to update libraries' search index """
from __future__ import print_function
from textwrap import dedent
from django.core.management import BaseCommand, CommandError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator
from contentstore.courseware_index import LibrarySearchIndexer
from xmodule.modulestore.django import modulestore
from .prompt import query_yes_no
class Command(BaseCommand):
"""
Command to reindex content libraries (single, multiple or all available)
Examples:
./manage.py reindex_library lib1 lib2 - reindexes libraries with keys lib1 and lib2
./manage.py reindex_library --all - reindexes all available libraries
"""
help = dedent(__doc__)
CONFIRMATION_PROMPT = u"Reindexing all libraries might be a time consuming operation. Do you want to continue?"
def add_arguments(self, parser):
parser.add_argument('library_ids', nargs='*')
parser.add_argument(
'--all',
action='store_true',
dest='all',
help='Reindex all libraries'
)
def _parse_library_key(self, raw_value):
""" Parses library key from string """
result = CourseKey.from_string(raw_value)
if not isinstance(result, LibraryLocator):
raise CommandError(u"Argument {0} is not a library key".format(raw_value))
return result
def handle(self, *args, **options):
"""
By convention set by django developers, this method actually executes command's actions.
So, there could be no better docstring than emphasize this once again.
"""
if (not options['library_ids'] and not options['all']) or (options['library_ids'] and options['all']):
raise CommandError(u"reindex_library requires one or more <library_id>s or the --all flag.")
store = modulestore()
if options['all']:
if query_yes_no(self.CONFIRMATION_PROMPT, default="no"):
library_keys = [library.location.library_key.replace(branch=None) for library in store.get_libraries()]
else:
return
else:
library_keys = map(self._parse_library_key, options['library_ids'])
for library_key in library_keys:
print("Indexing library {}".format(library_key))
LibrarySearchIndexer.do_library_reindex(store, library_key)
| agpl-3.0 | 4,093,019,509,368,758,300 | 36.753846 | 119 | 0.658517 | false |
CVML/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause | 3,725,141,006,832,607,700 | 37.955882 | 77 | 0.687807 | false |
danuzclaudes/robottelo | tests/foreman/ui/test_template.py | 3 | 10224 | # -*- encoding: utf-8 -*-
"""Test class for Template UI"""
from fauxfactory import gen_string
from nailgun import entities
from robottelo.constants import OS_TEMPLATE_DATA_FILE, SNIPPET_DATA_FILE
from robottelo.datafactory import generate_strings_list, invalid_values_list
from robottelo.decorators import run_only_on, tier1, tier2
from robottelo.helpers import get_data_file
from robottelo.test import UITestCase
from robottelo.ui.base import UIError
from robottelo.ui.factory import make_templates
from robottelo.ui.locators import common_locators
from robottelo.ui.session import Session
OS_TEMPLATE_DATA_FILE = get_data_file(OS_TEMPLATE_DATA_FILE)
SNIPPET_DATA_FILE = get_data_file(SNIPPET_DATA_FILE)
class TemplateTestCase(UITestCase):
"""Implements Provisioning Template tests from UI"""
@classmethod
def setUpClass(cls):
super(TemplateTestCase, cls).setUpClass()
cls.organization = entities.Organization().create()
@run_only_on('sat')
@tier1
def test_positive_create_with_name(self):
"""Create new template using different valid names
@Feature: Template - Positive Create
@Assert: New provisioning template of type 'provision' should be
created successfully
"""
with Session(self.browser) as session:
for name in generate_strings_list(length=8):
with self.subTest(name):
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(name))
@run_only_on('sat')
@tier1
def test_negative_create_with_invalid_name(self):
"""Create a new template with invalid names
@Feature: Template - Negative Create
@Assert: Template is not created
"""
with Session(self.browser) as session:
for name in invalid_values_list(interface='ui'):
with self.subTest(name):
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.wait_until_element(
common_locators['name_haserror']))
@run_only_on('sat')
@tier1
def test_negative_create_with_same_name(self):
"""Template - Create a new template with same name
@Feature: Template - Negative Create
@Assert: Template is not created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(name))
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.wait_until_element(
common_locators['name_haserror']))
@run_only_on('sat')
@tier1
def test_negative_create_without_type(self):
"""Template - Create a new template without selecting its type
@Feature: Template - Negative Create
@Assert: Template is not created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
with self.assertRaises(UIError) as context:
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='',
)
self.assertEqual(
context.exception.message,
'Could not create template "{0}" without type'.format(name)
)
@run_only_on('sat')
@tier1
def test_negative_create_without_upload(self):
"""Template - Create a new template without uploading a template
@Feature: Template - Negative Create
@Assert: Template is not created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
with self.assertRaises(UIError) as context:
make_templates(
session,
name=name,
template_path='',
custom_really=True,
template_type='PXELinux',
)
self.assertEqual(
context.exception.message,
'Could not create blank template "{0}"'.format(name)
)
@run_only_on('sat')
@tier1
def test_negative_create_with_too_long_audit(self):
"""Create a new template with 256 characters in audit comments
@Feature: Template - Negative Create
@Assert: Template is not created
"""
with Session(self.browser) as session:
make_templates(
session,
name=gen_string('alpha', 16),
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
audit_comment=gen_string('alpha', 256),
template_type='PXELinux',
)
self.assertIsNotNone(self.template.wait_until_element(
common_locators['haserror']))
@run_only_on('sat')
@tier1
def test_positive_create_with_snippet_type(self):
"""Create new template of type snippet
@Feature: Template - Positive Create
@Assert: New provisioning template of type 'snippet' should be created
successfully
"""
with Session(self.browser) as session:
for name in generate_strings_list(length=8):
with self.subTest(name):
make_templates(
session,
name=name,
template_path=SNIPPET_DATA_FILE,
custom_really=True,
snippet=True,
)
self.assertIsNotNone(self.template.search(name))
@run_only_on('sat')
@tier1
def test_positive_delete(self):
"""Delete an existing template
@Feature: Template - Positive Delete
@Assert: Template is deleted successfully
"""
with Session(self.browser) as session:
for template_name in generate_strings_list(length=8):
with self.subTest(template_name):
entities.ConfigTemplate(
name=template_name,
organization=[self.organization],
).create()
session.nav.go_to_select_org(self.organization.name)
self.template.delete(template_name)
@run_only_on('sat')
@tier1
def test_positive_update_name_and_type(self):
"""Update template name and template type
@Feature: Template - Positive Update
@Assert: The template name and type should be updated successfully
"""
name = gen_string('alpha')
new_name = gen_string('alpha')
with Session(self.browser) as session:
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(name))
self.template.update(name, False, new_name, None, 'PXELinux')
self.assertIsNotNone(self.template.search(new_name))
@run_only_on('sat')
@tier1
def test_positive_update_os(self):
"""Creates new template, along with two OS's and associate list
of OS's with created template
@Feature: Template - Positive Update
@Assert: The template should be updated with newly created OS's
successfully
"""
name = gen_string('alpha')
new_name = gen_string('alpha')
os_list = [
entities.OperatingSystem().create().name for _ in range(2)
]
with Session(self.browser) as session:
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(name))
self.template.update(name, False, new_name, new_os_list=os_list)
self.assertIsNotNone(self.template.search(new_name))
@run_only_on('sat')
@tier2
def test_positive_clone(self):
"""Assure ability to clone a provisioning template
@Feature: Template - Clone
@Steps:
1. Go to Provisioning template UI
2. Choose a template and attempt to clone it
@Assert: The template is cloned
"""
name = gen_string('alpha')
clone_name = gen_string('alpha')
os_list = [
entities.OperatingSystem().create().name for _ in range(2)
]
with Session(self.browser) as session:
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(name))
self.template.clone(
name,
custom_really=False,
clone_name=clone_name,
os_list=os_list,
)
self.assertIsNotNone(self.template.search(clone_name))
| gpl-3.0 | -5,909,588,654,422,227,000 | 33.77551 | 79 | 0.550176 | false |
ClearCorp-dev/odoo | addons/project/company.py | 381 | 1529 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'project_time_mode_id': fields.many2one('product.uom', 'Project Time Unit',
help='This will set the unit of measure used in projects and tasks.\n' \
"If you use the timesheet linked to projects (project_timesheet module), don't " \
"forget to setup the right unit of measure in your employees.",
),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,455,920,305,487,872,000 | 41.472222 | 84 | 0.631131 | false |
JohnOrlando/gnuradio-bitshark | gr-atsc/src/python/atsc_utils.py | 16 | 2354 | #
# Copyright 2006 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import random
import sys
MPEG_SYNC_BYTE = 0x47
def make_fake_transport_stream_packet(npkts):
"""
Return a sequence of 8-bit ints that represents an MPEG Transport Stream packet.
@param npkts: how many 188-byte packets to return
FYI, each ATSC Data Frame contains two Data Fields, each of which contains
312 data segments. Each transport stream packet maps to a data segment.
"""
r = [0] * (npkts * 188)
i = 0
for j in range(npkts):
r[i+0] = MPEG_SYNC_BYTE
r[i+1] = random.randint(0, 127) # top bit (transport error bit) clear
i = i + 2
for n in range(186):
r[i + n] = random.randint(0, 255)
i = i + 186
return r
def pad_stream(src, sizeof_total, sizeof_pad):
sizeof_valid = sizeof_total - sizeof_pad
assert sizeof_valid > 0
assert (len(src) % sizeof_valid) == 0
npkts = len(src) // sizeof_valid
dst = [0] * (npkts * sizeof_total)
for i in range(npkts):
src_s = i * sizeof_valid
dst_s = i * sizeof_total
dst[dst_s:dst_s + sizeof_valid] = src[src_s:src_s + sizeof_valid]
return dst
def depad_stream(src, sizeof_total, sizeof_pad):
sizeof_valid = sizeof_total - sizeof_pad
assert sizeof_valid > 0
assert (len(src) % sizeof_total) == 0
npkts = len(src) // sizeof_total
dst = [0] * (npkts * sizeof_valid)
for i in range(npkts):
src_s = i * sizeof_total
dst_s = i * sizeof_valid
dst[dst_s:dst_s + sizeof_valid] = src[src_s:src_s + sizeof_valid]
return dst
| gpl-3.0 | -5,531,725,058,651,774,000 | 30.810811 | 84 | 0.650807 | false |
saltstar/spark | python/pyspark/sql/streaming.py | 1 | 40553 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import json
if sys.version >= '3':
intlike = int
basestring = unicode = str
else:
intlike = (int, long)
from abc import ABCMeta, abstractmethod
from pyspark import since, keyword_only
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.column import _to_seq
from pyspark.sql.readwriter import OptionUtils, to_str
from pyspark.sql.types import *
from pyspark.sql.utils import StreamingQueryException
__all__ = ["StreamingQuery", "StreamingQueryManager", "DataStreamReader", "DataStreamWriter"]
class StreamingQuery(object):
"""
A handle to a query that is executing continuously in the background as new data arrives.
All these methods are thread-safe.
.. note:: Evolving
.. versionadded:: 2.0
"""
def __init__(self, jsq):
self._jsq = jsq
@property
@since(2.0)
def id(self):
"""Returns the unique id of this query that persists across restarts from checkpoint data.
That is, this id is generated when a query is started for the first time, and
will be the same every time it is restarted from checkpoint data.
There can only be one query with the same id active in a Spark cluster.
Also see, `runId`.
"""
return self._jsq.id().toString()
@property
@since(2.1)
def runId(self):
"""Returns the unique id of this query that does not persist across restarts. That is, every
query that is started (or restarted from checkpoint) will have a different runId.
"""
return self._jsq.runId().toString()
@property
@since(2.0)
def name(self):
"""Returns the user-specified name of the query, or null if not specified.
This name can be specified in the `org.apache.spark.sql.streaming.DataStreamWriter`
as `dataframe.writeStream.queryName("query").start()`.
This name, if set, must be unique across all active queries.
"""
return self._jsq.name()
@property
@since(2.0)
def isActive(self):
"""Whether this streaming query is currently active or not.
"""
return self._jsq.isActive()
@since(2.0)
def awaitTermination(self, timeout=None):
"""Waits for the termination of `this` query, either by :func:`query.stop()` or by an
exception. If the query has terminated with an exception, then the exception will be thrown.
If `timeout` is set, it returns whether the query has terminated or not within the
`timeout` seconds.
If the query has terminated, then all subsequent calls to this method will either return
immediately (if the query was terminated by :func:`stop()`), or throw the exception
immediately (if the query has terminated with exception).
throws :class:`StreamingQueryException`, if `this` query has terminated with an exception
"""
if timeout is not None:
if not isinstance(timeout, (int, float)) or timeout < 0:
raise ValueError("timeout must be a positive integer or float. Got %s" % timeout)
return self._jsq.awaitTermination(int(timeout * 1000))
else:
return self._jsq.awaitTermination()
@property
@since(2.1)
def status(self):
"""
Returns the current status of the query.
"""
return json.loads(self._jsq.status().json())
@property
@since(2.1)
def recentProgress(self):
"""Returns an array of the most recent [[StreamingQueryProgress]] updates for this query.
The number of progress updates retained for each stream is configured by Spark session
configuration `spark.sql.streaming.numRecentProgressUpdates`.
"""
return [json.loads(p.json()) for p in self._jsq.recentProgress()]
@property
@since(2.1)
def lastProgress(self):
"""
Returns the most recent :class:`StreamingQueryProgress` update of this streaming query or
None if there were no progress updates
:return: a map
"""
lastProgress = self._jsq.lastProgress()
if lastProgress:
return json.loads(lastProgress.json())
else:
return None
@since(2.0)
def processAllAvailable(self):
"""Blocks until all available data in the source has been processed and committed to the
sink. This method is intended for testing.
.. note:: In the case of continually arriving data, this method may block forever.
Additionally, this method is only guaranteed to block until data that has been
synchronously appended data to a stream source prior to invocation.
(i.e. `getOffset` must immediately reflect the addition).
"""
return self._jsq.processAllAvailable()
@since(2.0)
def stop(self):
"""Stop this streaming query.
"""
self._jsq.stop()
@since(2.1)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> sq = sdf.writeStream.format('memory').queryName('query_explain').start()
>>> sq.processAllAvailable() # Wait a bit to generate the runtime plans.
>>> sq.explain()
== Physical Plan ==
...
>>> sq.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> sq.stop()
"""
# Cannot call `_jsq.explain(...)` because it will print in the JVM process.
# We should print it in the Python process.
print(self._jsq.explainInternal(extended))
@since(2.1)
def exception(self):
"""
:return: the StreamingQueryException if the query was terminated by an exception, or None.
"""
if self._jsq.exception().isDefined():
je = self._jsq.exception().get()
msg = je.toString().split(': ', 1)[1] # Drop the Java StreamingQueryException type info
stackTrace = '\n\t at '.join(map(lambda x: x.toString(), je.getStackTrace()))
return StreamingQueryException(msg, stackTrace)
else:
return None
class StreamingQueryManager(object):
"""A class to manage all the :class:`StreamingQuery` StreamingQueries active.
.. note:: Evolving
.. versionadded:: 2.0
"""
def __init__(self, jsqm):
self._jsqm = jsqm
@property
@ignore_unicode_prefix
@since(2.0)
def active(self):
"""Returns a list of active queries associated with this SQLContext
>>> sq = sdf.writeStream.format('memory').queryName('this_query').start()
>>> sqm = spark.streams
>>> # get the list of active streaming queries
>>> [q.name for q in sqm.active]
[u'this_query']
>>> sq.stop()
"""
return [StreamingQuery(jsq) for jsq in self._jsqm.active()]
@ignore_unicode_prefix
@since(2.0)
def get(self, id):
"""Returns an active query from this SQLContext or throws exception if an active query
with this name doesn't exist.
>>> sq = sdf.writeStream.format('memory').queryName('this_query').start()
>>> sq.name
u'this_query'
>>> sq = spark.streams.get(sq.id)
>>> sq.isActive
True
>>> sq = sqlContext.streams.get(sq.id)
>>> sq.isActive
True
>>> sq.stop()
"""
return StreamingQuery(self._jsqm.get(id))
@since(2.0)
def awaitAnyTermination(self, timeout=None):
"""Wait until any of the queries on the associated SQLContext has terminated since the
creation of the context, or since :func:`resetTerminated()` was called. If any query was
terminated with an exception, then the exception will be thrown.
If `timeout` is set, it returns whether the query has terminated or not within the
`timeout` seconds.
If a query has terminated, then subsequent calls to :func:`awaitAnyTermination()` will
either return immediately (if the query was terminated by :func:`query.stop()`),
or throw the exception immediately (if the query was terminated with exception). Use
:func:`resetTerminated()` to clear past terminations and wait for new terminations.
In the case where multiple queries have terminated since :func:`resetTermination()`
was called, if any query has terminated with exception, then :func:`awaitAnyTermination()`
will throw any of the exception. For correctly documenting exceptions across multiple
queries, users need to stop all of them after any of them terminates with exception, and
then check the `query.exception()` for each query.
throws :class:`StreamingQueryException`, if `this` query has terminated with an exception
"""
if timeout is not None:
if not isinstance(timeout, (int, float)) or timeout < 0:
raise ValueError("timeout must be a positive integer or float. Got %s" % timeout)
return self._jsqm.awaitAnyTermination(int(timeout * 1000))
else:
return self._jsqm.awaitAnyTermination()
@since(2.0)
def resetTerminated(self):
"""Forget about past terminated queries so that :func:`awaitAnyTermination()` can be used
again to wait for new terminations.
>>> spark.streams.resetTerminated()
"""
self._jsqm.resetTerminated()
class DataStreamReader(OptionUtils):
"""
Interface used to load a streaming :class:`DataFrame` from external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`spark.readStream`
to access this.
.. note:: Evolving.
.. versionadded:: 2.0
"""
def __init__(self, spark):
self._jreader = spark._ssql_ctx.readStream()
self._spark = spark
def _df(self, jdf):
from pyspark.sql.dataframe import DataFrame
return DataFrame(jdf, self._spark)
@since(2.0)
def format(self, source):
"""Specifies the input data source format.
.. note:: Evolving.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> s = spark.readStream.format("text")
"""
self._jreader = self._jreader.format(source)
return self
@since(2.0)
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
.. note:: Evolving.
:param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
>>> s = spark.readStream.schema(sdf_schema)
>>> s = spark.readStream.schema("col0 INT, col1 DOUBLE")
"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if isinstance(schema, StructType):
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
elif isinstance(schema, basestring):
self._jreader = self._jreader.schema(schema)
else:
raise TypeError("schema should be StructType or string")
return self
@since(2.0)
def option(self, key, value):
"""Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
.. note:: Evolving.
>>> s = spark.readStream.option("x", 1)
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
@since(2.0)
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
.. note:: Evolving.
>>> s = spark.readStream.options(x="1", y=2)
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
@since(2.0)
def load(self, path=None, format=None, schema=None, **options):
"""Loads a data stream from a data source and returns it as a :class`DataFrame`.
.. note:: Evolving.
:param path: optional string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
:param schema: optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param options: all other string options
>>> json_sdf = spark.readStream.format("json") \\
... .schema(sdf_schema) \\
... .load(tempfile.mkdtemp())
>>> json_sdf.isStreaming
True
>>> json_sdf.schema == sdf_schema
True
"""
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
if path is not None:
if type(path) != str or len(path.strip()) == 0:
raise ValueError("If the path is provided for stream, it needs to be a " +
"non-empty string. List of paths are not supported.")
return self._df(self._jreader.load(path))
else:
return self._df(self._jreader.load())
@since(2.0)
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None,
allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None,
mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None,
multiLine=None, allowUnquotedControlChars=None):
"""
Loads a JSON file stream and returns the results as a :class:`DataFrame`.
`JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.
For JSON (one record per file), set the ``multiLine`` parameter to ``true``.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
.. note:: Evolving.
:param path: string represents path to the JSON dataset,
or RDD of Strings storing JSON objects.
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param primitivesAsString: infers all primitive values as a string type. If None is set,
it uses the default value, ``false``.
:param prefersDecimal: infers all floating-point values as a decimal type. If the values
do not fit in decimal, then it infers them as doubles. If None is
set, it uses the default value, ``false``.
:param allowComments: ignores Java/C++ style comment in JSON records. If None is set,
it uses the default value, ``false``.
:param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set,
it uses the default value, ``false``.
:param allowSingleQuotes: allows single quotes in addition to double quotes. If None is
set, it uses the default value, ``true``.
:param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is
set, it uses the default value, ``false``.
:param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character
using backslash quoting mechanism. If None is
set, it uses the default value, ``false``.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record, and puts the malformed string into a field configured by \
``columnNameOfCorruptRecord``. To keep corrupt records, an user can set \
a string type field named ``columnNameOfCorruptRecord`` in an user-defined \
schema. If a schema does not have the field, it drops corrupt records during \
parsing. When inferring a schema, it implicitly adds a \
``columnNameOfCorruptRecord`` field in an output schema.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param multiLine: parse one record, which may span multiple lines, per file. If None is
set, it uses the default value, ``false``.
:param allowUnquotedControlChars: allows JSON Strings to contain unquoted control
characters (ASCII characters with value less than 32,
including tab and line feed characters) or not.
>>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema)
>>> json_sdf.isStreaming
True
>>> json_sdf.schema == sdf_schema
True
"""
self._set_opts(
schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal,
allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames,
allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero,
allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat,
timestampFormat=timestampFormat, multiLine=multiLine,
allowUnquotedControlChars=allowUnquotedControlChars)
if isinstance(path, basestring):
return self._df(self._jreader.json(path))
else:
raise TypeError("path can be only a single string")
@since(2.3)
def orc(self, path):
"""Loads a ORC file stream, returning the result as a :class:`DataFrame`.
.. note:: Evolving.
>>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp())
>>> orc_sdf.isStreaming
True
>>> orc_sdf.schema == sdf_schema
True
"""
if isinstance(path, basestring):
return self._df(self._jreader.orc(path))
else:
raise TypeError("path can be only a single string")
@since(2.0)
def parquet(self, path):
"""Loads a Parquet file stream, returning the result as a :class:`DataFrame`.
You can set the following Parquet-specific option(s) for reading Parquet files:
* ``mergeSchema``: sets whether we should merge schemas collected from all \
Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \
The default value is specified in ``spark.sql.parquet.mergeSchema``.
.. note:: Evolving.
>>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp())
>>> parquet_sdf.isStreaming
True
>>> parquet_sdf.schema == sdf_schema
True
"""
if isinstance(path, basestring):
return self._df(self._jreader.parquet(path))
else:
raise TypeError("path can be only a single string")
@ignore_unicode_prefix
@since(2.0)
def text(self, path):
"""
Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
Each line in the text file is a new row in the resulting DataFrame.
.. note:: Evolving.
:param paths: string, or list of strings, for input path(s).
>>> text_sdf = spark.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
>>> "value" in str(text_sdf.schema)
True
"""
if isinstance(path, basestring):
return self._df(self._jreader.text(path))
else:
raise TypeError("path can be only a single string")
@since(2.0)
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None,
comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None,
ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None,
negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None,
maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None,
columnNameOfCorruptRecord=None, multiLine=None):
"""Loads a CSV file stream and returns the result as a :class:`DataFrame`.
This function will go through the input once to determine the input schema if
``inferSchema`` is enabled. To avoid going through the entire data once, disable
``inferSchema`` option or specify the schema explicitly using ``schema``.
.. note:: Evolving.
:param path: string, or list of strings, for input path(s).
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param sep: sets the single character as a separator for each field and value.
If None is set, it uses the default value, ``,``.
:param encoding: decodes the CSV files by the given encoding type. If None is set,
it uses the default value, ``UTF-8``.
:param quote: sets the single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If you would like to turn off quotations, you need to set an
empty string.
:param escape: sets the single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``.
:param comment: sets the single character used for skipping lines beginning with this
character. By default (None), it is disabled.
:param header: uses the first line as names of columns. If None is set, it uses the
default value, ``false``.
:param inferSchema: infers the input schema automatically from data. It requires one extra
pass over the data. If None is set, it uses the default value, ``false``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string. Since 2.0.1, this ``nullValue`` param
applies to all supported types including the string type.
:param nanValue: sets the string representation of a non-number value. If None is set, it
uses the default value, ``NaN``.
:param positiveInf: sets the string representation of a positive infinity value. If None
is set, it uses the default value, ``Inf``.
:param negativeInf: sets the string representation of a negative infinity value. If None
is set, it uses the default value, ``Inf``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param maxColumns: defines a hard limit of how many columns a record can have. If None is
set, it uses the default value, ``20480``.
:param maxCharsPerColumn: defines the maximum number of characters allowed for any given
value being read. If None is set, it uses the default value,
``-1`` meaning unlimited length.
:param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0.
If specified, it is ignored.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record, and puts the malformed string into a field configured by \
``columnNameOfCorruptRecord``. To keep corrupt records, an user can set \
a string type field named ``columnNameOfCorruptRecord`` in an \
user-defined schema. If a schema does not have the field, it drops corrupt \
records during parsing. When a length of parsed CSV tokens is shorter than \
an expected length of a schema, it sets `null` for extra fields.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param multiLine: parse one record, which may span multiple lines. If None is
set, it uses the default value, ``false``.
>>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema)
>>> csv_sdf.isStreaming
True
>>> csv_sdf.schema == sdf_schema
True
"""
self._set_opts(
schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment,
header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue,
nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf,
dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns,
maxCharsPerColumn=maxCharsPerColumn,
maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode,
columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine)
if isinstance(path, basestring):
return self._df(self._jreader.csv(path))
else:
raise TypeError("path can be only a single string")
class DataStreamWriter(object):
"""
Interface used to write a streaming :class:`DataFrame` to external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`DataFrame.writeStream`
to access this.
.. note:: Evolving.
.. versionadded:: 2.0
"""
def __init__(self, df):
self._df = df
self._spark = df.sql_ctx
self._jwrite = df._jdf.writeStream()
def _sq(self, jsq):
from pyspark.sql.streaming import StreamingQuery
return StreamingQuery(jsq)
@since(2.0)
def outputMode(self, outputMode):
"""Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink.
Options include:
* `append`:Only the new rows in the streaming DataFrame/Dataset will be written to
the sink
* `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink
every time these is some updates
* `update`:only the rows that were updated in the streaming DataFrame/Dataset will be
written to the sink every time there are some updates. If the query doesn't contain
aggregations, it will be equivalent to `append` mode.
.. note:: Evolving.
>>> writer = sdf.writeStream.outputMode('append')
"""
if not outputMode or type(outputMode) != str or len(outputMode.strip()) == 0:
raise ValueError('The output mode must be a non-empty string. Got: %s' % outputMode)
self._jwrite = self._jwrite.outputMode(outputMode)
return self
@since(2.0)
def format(self, source):
"""Specifies the underlying output data source.
.. note:: Evolving.
:param source: string, name of the data source, which for now can be 'parquet'.
>>> writer = sdf.writeStream.format('json')
"""
self._jwrite = self._jwrite.format(source)
return self
@since(2.0)
def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
.. note:: Evolving.
"""
self._jwrite = self._jwrite.option(key, to_str(value))
return self
@since(2.0)
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
.. note:: Evolving.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self
@since(2.0)
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
.. note:: Evolving.
:param cols: name of columns
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self
@since(2.0)
def queryName(self, queryName):
"""Specifies the name of the :class:`StreamingQuery` that can be started with
:func:`start`. This name must be unique among all the currently active queries
in the associated SparkSession.
.. note:: Evolving.
:param queryName: unique name for the query
>>> writer = sdf.writeStream.queryName('streaming_query')
"""
if not queryName or type(queryName) != str or len(queryName.strip()) == 0:
raise ValueError('The queryName must be a non-empty string. Got: %s' % queryName)
self._jwrite = self._jwrite.queryName(queryName)
return self
@keyword_only
@since(2.0)
def trigger(self, processingTime=None, once=None):
"""Set the trigger for the stream query. If this is not set it will run the query as fast
as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``.
.. note:: Evolving.
:param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'.
>>> # trigger the query for execution every 5 seconds
>>> writer = sdf.writeStream.trigger(processingTime='5 seconds')
>>> # trigger the query for just once batch of data
>>> writer = sdf.writeStream.trigger(once=True)
"""
jTrigger = None
if processingTime is not None:
if once is not None:
raise ValueError('Multiple triggers not allowed.')
if type(processingTime) != str or len(processingTime.strip()) == 0:
raise ValueError('Value for processingTime must be a non empty string. Got: %s' %
processingTime)
interval = processingTime.strip()
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime(
interval)
elif once is not None:
if once is not True:
raise ValueError('Value for once must be True. Got: %s' % once)
jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once()
else:
raise ValueError('No trigger provided')
self._jwrite = self._jwrite.trigger(jTrigger)
return self
@ignore_unicode_prefix
@since(2.0)
def start(self, path=None, format=None, outputMode=None, partitionBy=None, queryName=None,
**options):
"""Streams the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
.. note:: Evolving.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param outputMode: specifies how data of a streaming DataFrame/Dataset is written to a
streaming sink.
* `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the
sink
* `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink
every time these is some updates
* `update`:only the rows that were updated in the streaming DataFrame/Dataset will be
written to the sink every time there are some updates. If the query doesn't contain
aggregations, it will be equivalent to `append` mode.
:param partitionBy: names of partitioning columns
:param queryName: unique name for the query
:param options: All other string options. You may want to provide a `checkpointLocation`
for most streams, however it is not required for a `memory` stream.
>>> sq = sdf.writeStream.format('memory').queryName('this_query').start()
>>> sq.isActive
True
>>> sq.name
u'this_query'
>>> sq.stop()
>>> sq.isActive
False
>>> sq = sdf.writeStream.trigger(processingTime='5 seconds').start(
... queryName='that_query', outputMode="append", format='memory')
>>> sq.name
u'that_query'
>>> sq.isActive
True
>>> sq.stop()
"""
self.options(**options)
if outputMode is not None:
self.outputMode(outputMode)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if queryName is not None:
self.queryName(queryName)
if path is None:
return self._sq(self._jwrite.start())
else:
return self._sq(self._jwrite.start(path))
def _test():
import doctest
import os
import tempfile
from pyspark.sql import Row, SparkSession, SQLContext
import pyspark.sql.streaming
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.streaming.__dict__.copy()
try:
spark = SparkSession.builder.getOrCreate()
except py4j.protocol.Py4JError:
spark = SparkSession(sc)
globs['tempfile'] = tempfile
globs['os'] = os
globs['spark'] = spark
globs['sqlContext'] = SQLContext.getOrCreate(spark.sparkContext)
globs['sdf'] = \
spark.readStream.format('text').load('python/test_support/sql/streaming')
globs['sdf_schema'] = StructType([StructField("data", StringType(), False)])
globs['df'] = \
globs['spark'].readStream.format('text').load('python/test_support/sql/streaming')
(failure_count, test_count) = doctest.testmod(
pyspark.sql.streaming, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['spark'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | -8,217,027,726,388,523,000 | 43.514819 | 100 | 0.613937 | false |
demon-ru/iml-crm | addons/crm/report/crm_phonecall_report.py | 12 | 3947 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.addons.crm import crm
from openerp.osv import fields, osv
AVAILABLE_STATES = [
('draft', 'Draft'),
('open', 'Todo'),
('cancel', 'Cancelled'),
('done', 'Held'),
('pending', 'Pending')
]
class crm_phonecall_report(osv.osv):
""" Phone calls by user and section """
_name = "crm.phonecall.report"
_description = "Phone calls by user and section"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Section', readonly=True),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'nbr': fields.integer('# of Cases', readonly=True),
'state': fields.selection(AVAILABLE_STATES, 'Status', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'delay_close': fields.float('Delay to close', digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'duration': fields.float('Duration', digits=(16,2),readonly=True, group_operator="avg"),
'delay_open': fields.float('Delay to open',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'opening_date': fields.date('Opening Date', readonly=True, select=True),
'date_closed': fields.date('Close Date', readonly=True, select=True),
}
def init(self, cr):
""" Phone Calls By User And Section
@param cr: the current row, from the database cursor,
"""
tools.drop_view_if_exists(cr, 'crm_phonecall_report')
cr.execute("""
create or replace view crm_phonecall_report as (
select
id,
date(c.date_open) as opening_date,
date(c.date_closed) as date_closed,
c.state,
c.user_id,
c.section_id,
c.categ_id,
c.partner_id,
c.duration,
c.company_id,
c.priority,
1 as nbr,
c.create_date as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
from
crm_phonecall c
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,845,947,178,146,741,000 | 43.348315 | 145 | 0.563466 | false |
wakatime/wakatime-unity | Editor/WakaTime/client/wakatime/packages/pygments_py2/pygments/lexers/igor.py | 72 | 16870 | # -*- coding: utf-8 -*-
"""
pygments.lexers.igor
~~~~~~~~~~~~~~~~~~~~
Lexers for Igor Pro.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Keyword, Name, String
__all__ = ['IgorLexer']
class IgorLexer(RegexLexer):
"""
Pygments Lexer for Igor Pro procedure files (.ipf).
See http://www.wavemetrics.com/ and http://www.igorexchange.com/.
.. versionadded:: 2.0
"""
name = 'Igor'
aliases = ['igor', 'igorpro']
filenames = ['*.ipf']
mimetypes = ['text/ipf']
flags = re.IGNORECASE | re.MULTILINE
flowControl = (
'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch',
'case', 'default', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry',
'break', 'continue', 'return',
)
types = (
'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE',
'STRUCT', 'dfref'
)
keywords = (
'override', 'ThreadSafe', 'static', 'FuncFit', 'Proc', 'Picture',
'Prompt', 'DoPrompt', 'macro', 'window', 'graph', 'function', 'end',
'Structure', 'EndStructure', 'EndMacro', 'Menu', 'SubMenu',
)
operations = (
'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio',
'AddMovieFrame', 'APMath', 'Append', 'AppendImage',
'AppendLayoutObject', 'AppendMatrixContour', 'AppendText',
'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour',
'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall',
'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox',
'CheckDisplayed', 'ChooseColor', 'Close', 'CloseMovie', 'CloseProc',
'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar',
'ControlInfo', 'ControlUpdate', 'ConvexHull', 'Convolve', 'CopyFile',
'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'Cross',
'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor',
'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions',
'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefineGuide',
'DelayUpdate', 'DeleteFile', 'DeleteFolder', 'DeletePoints',
'Differentiate', 'dir', 'Display', 'DisplayHelpTopic',
'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow',
'DoXOPIdle', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine',
'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText',
'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder',
'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'Execute', 'ExecuteScriptText',
'ExperimentModified', 'Extract', 'FastGaussTransform', 'FastOp',
'FBinRead', 'FBinWrite', 'FFT', 'FIFO2Wave', 'FIFOStatus', 'FilterFIR',
'FilterIIR', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly',
'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf',
'FReadLine', 'FSetPos', 'FStatus', 'FTPDelete', 'FTPDownload',
'FTPUpload', 'FuncFit', 'FuncFitMD', 'GetAxis', 'GetFileFolderInfo',
'GetLastUserMenuInfo', 'GetMarquee', 'GetSelection', 'GetWindow',
'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox',
'Hanning', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools',
'HilbertTransform', 'Histogram', 'IFFT', 'ImageAnalyzeParticles',
'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection',
'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageGenerateROIMask',
'ImageHistModification', 'ImageHistogram', 'ImageInterpolate',
'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration',
'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave',
'ImageSeedFill', 'ImageSnake', 'ImageStats', 'ImageThreshold',
'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort',
'InsertPoints', 'Integrate', 'IntegrateODE', 'Interp3DPath',
'Interpolate3D', 'KillBackground', 'KillControl', 'KillDataFolder',
'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings',
'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout',
'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData',
'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess',
'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime',
'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter',
'MatrixGaussJ', 'MatrixInverse', 'MatrixLinearSolve',
'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD',
'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve',
'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText',
'Modify', 'ModifyContour', 'ModifyControl', 'ModifyControlList',
'ModifyFreeAxis', 'ModifyGraph', 'ModifyImage', 'ModifyLayout',
'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder',
'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable',
'MoveWave', 'MoveWindow', 'NeuralNetworkRun', 'NeuralNetworkTrain',
'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewImage',
'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath',
'NewWaterfall', 'Note', 'Notebook', 'NotebookAction', 'Open',
'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo',
'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction',
'PlaySnd', 'PlaySound', 'PopupContextualMenu', 'PopupMenu',
'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs',
'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable',
'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit',
'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour',
'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage',
'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder',
'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages',
'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample',
'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData',
'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook',
'SavePackagePreferences', 'SavePICT', 'SaveTableCopy',
'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern',
'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer',
'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode',
'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed',
'SetScale', 'SetVariable', 'SetWaveLock', 'SetWindow', 'ShowIgorMenus',
'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth',
'SmoothCustom', 'Sort', 'SoundInRecord', 'SoundInSet',
'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart',
'SphericalInterpolate', 'SphericalTriangulate', 'SplitString',
'sprintf', 'sscanf', 'Stack', 'StackWindows',
'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest',
'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest',
'StatsCircularCorrelationTest', 'StatsCircularMeans',
'StatsCircularMoments', 'StatsCircularTwoSampleTest',
'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest',
'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest',
'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKendallTauTest',
'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest',
'StatsLinearRegression', 'StatsMultiCorrelationTest',
'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles',
'StatsRankCorrelationTest', 'StatsResample', 'StatsSample',
'StatsScheffeTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest',
'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest',
'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest',
'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String',
'StructGet', 'StructPut', 'TabControl', 'Tag', 'TextBox', 'Tile',
'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid',
'Triangulate3d', 'Unwrap', 'ValDisplay', 'Variable', 'WaveMeanStdv',
'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform',
'WindowFunction',
)
functions = (
'abs', 'acos', 'acosh', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog',
'area', 'areaXY', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'bessi',
'bessj', 'bessk', 'bessy', 'beta', 'betai', 'BinarySearch',
'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs',
'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev',
'chebyshevU', 'CheckName', 'cmplx', 'cmpstr', 'conj', 'ContourZ', 'cos',
'cosh', 'cot', 'CountObjects', 'CountObjectsDFR', 'cpowi',
'CreationDate', 'csc', 'DataFolderExists', 'DataFolderRefsEqual',
'DataFolderRefStatus', 'date2secs', 'datetime', 'DateToJulian',
'Dawson', 'DDEExecute', 'DDEInitiate', 'DDEPokeString', 'DDEPokeWave',
'DDERequestWave', 'DDEStatus', 'DDETerminate', 'deltax', 'digamma',
'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf',
'erfc', 'exists', 'exp', 'expInt', 'expNoise', 'factorial', 'fakedata',
'faverage', 'faverageXY', 'FindDimLabel', 'FindListItem', 'floor',
'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin',
'gamma', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss',
'Gauss1D', 'Gauss2D', 'gcd', 'GetDefaultFontSize',
'GetDefaultFontStyle', 'GetKeyState', 'GetRTError', 'gnoise',
'GrepString', 'hcsr', 'hermite', 'hermiteGauss', 'HyperG0F1',
'HyperG1F1', 'HyperG2F1', 'HyperGNoise', 'HyperGPFQ', 'IgorVersion',
'ilim', 'imag', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D',
'inverseERF', 'inverseERFC', 'ItemsInList', 'jlim', 'Laguerre',
'LaguerreA', 'LaguerreGauss', 'leftx', 'LegendreA', 'limit', 'ln',
'log', 'logNormalNoise', 'lorentzianNoise', 'magsqr', 'MandelbrotPoint',
'MarcumQ', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max',
'mean', 'min', 'mod', 'ModDate', 'NaN', 'norm', 'NumberByKey',
'numpnts', 'numtype', 'NumVarOrDefault', 'NVAR_Exists', 'p2rect',
'ParamIsDefault', 'pcsr', 'Pi', 'PixelFromAxisVal', 'pnt2x',
'poissonNoise', 'poly', 'poly2D', 'PolygonArea', 'qcsr', 'r2polar',
'real', 'rightx', 'round', 'sawtooth', 'ScreenResolution', 'sec',
'SelectNumber', 'sign', 'sin', 'sinc', 'sinh', 'SphericalBessJ',
'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD',
'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF',
'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF',
'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF',
'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF',
'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF',
'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF',
'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF',
'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsHyperGCDF',
'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF',
'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF',
'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF',
'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF',
'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF',
'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF',
'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF',
'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF',
'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF',
'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF',
'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF',
'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF',
'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF',
'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF',
'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF',
'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF',
'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF',
'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute',
'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF',
'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF',
'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF',
'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF',
'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF',
'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean',
'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise',
'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF',
'StatsWeibullPDF', 'StopMSTimer', 'str2num', 'stringCRC', 'stringmatch',
'strlen', 'strsearch', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists',
'TagVal', 'tan', 'tanh', 'ThreadGroupCreate', 'ThreadGroupRelease',
'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks',
'trunc', 'Variance', 'vcsr', 'WaveCRC', 'WaveDims', 'WaveExists',
'WaveMax', 'WaveMin', 'WaveRefsEqual', 'WaveType', 'WhichListItem',
'WinType', 'WNoise', 'x', 'x2pnt', 'xcsr', 'y', 'z', 'zcsr', 'ZernikeR',
)
functions += (
'AddListItem', 'AnnotationInfo', 'AnnotationList', 'AxisInfo',
'AxisList', 'CaptureHistory', 'ChildWindowList', 'CleanupName',
'ContourInfo', 'ContourNameList', 'ControlNameList', 'CsrInfo',
'CsrWave', 'CsrXWave', 'CTabList', 'DataFolderDir', 'date',
'DDERequestString', 'FontList', 'FuncRefInfo', 'FunctionInfo',
'FunctionList', 'FunctionPath', 'GetDataFolder', 'GetDefaultFont',
'GetDimLabel', 'GetErrMessage', 'GetFormula',
'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR',
'GetRTErrMessage', 'GetRTStackInfo', 'GetScrapText', 'GetUserData',
'GetWavesDataFolder', 'GrepList', 'GuideInfo', 'GuideNameList', 'Hash',
'IgorInfo', 'ImageInfo', 'ImageNameList', 'IndexedDir', 'IndexedFile',
'JulianToDate', 'LayoutInfo', 'ListMatch', 'LowerStr', 'MacroList',
'NameOfWave', 'note', 'num2char', 'num2istr', 'num2str',
'OperationList', 'PadString', 'ParseFilePath', 'PathList', 'PICTInfo',
'PICTList', 'PossiblyQuoteName', 'ProcedureText', 'RemoveByKey',
'RemoveEnding', 'RemoveFromList', 'RemoveListItem',
'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey',
'Secs2Date', 'Secs2Time', 'SelectString', 'SortList',
'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath',
'StringByKey', 'StringFromList', 'StringList', 'StrVarOrDefault',
'TableInfo', 'TextFile', 'ThreadGroupGetDF', 'time', 'TraceFromPixel',
'TraceInfo', 'TraceNameList', 'UniqueName', 'UnPadString', 'UpperStr',
'VariableList', 'WaveInfo', 'WaveList', 'WaveName', 'WaveUnits',
'WinList', 'WinName', 'WinRecreation', 'XWaveName',
'ContourNameToWaveRef', 'CsrWaveRef', 'CsrXWaveRef',
'ImageNameToWaveRef', 'NewFreeWave', 'TagWaveRef', 'TraceNameToWaveRef',
'WaveRefIndexed', 'XWaveRefFromTrace', 'GetDataFolderDFR',
'GetWavesDataFolderDFR', 'NewFreeDataFolder', 'ThreadGroupGetDFR',
)
tokens = {
'root': [
(r'//.*$', Comment.Single),
(r'"([^"\\]|\\.)*"', String),
# Flow Control.
(words(flowControl, prefix=r'\b', suffix=r'\b'), Keyword),
# Types.
(words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
# Keywords.
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
# Built-in operations.
(words(operations, prefix=r'\b', suffix=r'\b'), Name.Class),
# Built-in functions.
(words(functions, prefix=r'\b', suffix=r'\b'), Name.Function),
# Compiler directives.
(r'^#(include|pragma|define|ifdef|ifndef|endif)',
Name.Decorator),
(r'[^a-z"/]+$', Text),
(r'.', Text),
],
}
| cc0-1.0 | -4,939,505,347,875,956,000 | 59.46595 | 80 | 0.635744 | false |
multikatt/CouchPotatoServer | libs/html5lib/trie/datrie.py | 785 | 1166 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| gpl-3.0 | -4,769,590,396,546,399,000 | 25.5 | 66 | 0.600343 | false |
Jorge-Rodriguez/ansible | lib/ansible/modules/system/osx_defaults.py | 9 | 13852 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, GeekChimp - Franck Nijhof <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: osx_defaults
author: Franck Nijhof (@frenck)
short_description: osx_defaults allows users to read, write, and delete macOS user defaults from Ansible
description:
- osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts.
macOS applications and other programs use the defaults system to record user preferences and other
information that must be maintained when the applications aren't running (such as default font for new
documents, or the position of an Info panel).
version_added: "2.0"
options:
domain:
description:
- The domain is a domain name of the form com.companyname.appname.
default: NSGlobalDomain
host:
description:
- The host on which the preference should apply. The special value "currentHost" corresponds to the
"-currentHost" switch of the defaults commandline tool.
version_added: "2.1"
key:
description:
- The key of the user preference
required: true
type:
description:
- The type of value to write.
default: string
choices: [ "array", "bool", "boolean", "date", "float", "int", "integer", "string" ]
array_add:
description:
- Add new elements to the array for a key which has an array as its value.
type: bool
default: 'no'
value:
description:
- The value to write. Only required when state = present.
state:
description:
- The state of the user defaults
default: present
choices: [ "present", "absent" ]
notes:
- Apple Mac caches defaults. You may need to logout and login to apply the changes.
'''
EXAMPLES = '''
- osx_defaults:
domain: com.apple.Safari
key: IncludeInternalDebugMenu
type: bool
value: true
state: present
- osx_defaults:
domain: NSGlobalDomain
key: AppleMeasurementUnits
type: str
value: Centimeters
state: present
- osx_defaults:
domain: com.apple.screensaver
host: currentHost
key: showClock
type: int
value: 1
- osx_defaults:
key: AppleMeasurementUnits
type: str
value: Centimeters
- osx_defaults:
key: AppleLanguages
type: array
value:
- en
- nl
- osx_defaults:
domain: com.geekchimp.macable
key: ExampleKeyToRemove
state: absent
'''
import datetime
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import binary_type, text_type
# exceptions --------------------------------------------------------------- {{{
class OSXDefaultsException(Exception):
pass
# /exceptions -------------------------------------------------------------- }}}
# class MacDefaults -------------------------------------------------------- {{{
class OSXDefaults(object):
""" Class to manage Mac OS user defaults """
# init ---------------------------------------------------------------- {{{
""" Initialize this module. Finds 'defaults' executable and preps the parameters """
def __init__(self, **kwargs):
# Initial var for storing current defaults value
self.current_value = None
# Just set all given parameters
for key, val in kwargs.items():
setattr(self, key, val)
# Try to find the defaults executable
self.executable = self.module.get_bin_path(
'defaults',
required=False,
opt_dirs=self.path.split(':'),
)
if not self.executable:
raise OSXDefaultsException("Unable to locate defaults executable.")
# When state is present, we require a parameter
if self.state == "present" and self.value is None:
raise OSXDefaultsException("Missing value parameter")
# Ensure the value is the correct type
self.value = self._convert_type(self.type, self.value)
# /init --------------------------------------------------------------- }}}
# tools --------------------------------------------------------------- {{{
""" Converts value to given type """
def _convert_type(self, type, value):
if type == "string":
return str(value)
elif type in ["bool", "boolean"]:
if isinstance(value, (binary_type, text_type)):
value = value.lower()
if value in [True, 1, "true", "1", "yes"]:
return True
elif value in [False, 0, "false", "0", "no"]:
return False
raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
elif type == "date":
try:
return datetime.datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
except ValueError:
raise OSXDefaultsException(
"Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
)
elif type in ["int", "integer"]:
if not str(value).isdigit():
raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
return int(value)
elif type == "float":
try:
value = float(value)
except ValueError:
raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
return value
elif type == "array":
if not isinstance(value, list):
raise OSXDefaultsException("Invalid value. Expected value to be an array")
return value
raise OSXDefaultsException('Type is not supported: {0}'.format(type))
""" Returns a normalized list of commandline arguments based on the "host" attribute """
def _host_args(self):
if self.host is None:
return []
elif self.host == 'currentHost':
return ['-currentHost']
else:
return ['-host', self.host]
""" Returns a list containing the "defaults" executable and any common base arguments """
def _base_command(self):
return [self.executable] + self._host_args()
""" Converts array output from defaults to an list """
@staticmethod
def _convert_defaults_str_to_list(value):
# Split output of defaults. Every line contains a value
value = value.splitlines()
# Remove first and last item, those are not actual values
value.pop(0)
value.pop(-1)
# Remove extra spaces and comma (,) at the end of values
value = [re.sub(',$', '', x.strip(' ')) for x in value]
return value
# /tools -------------------------------------------------------------- }}}
# commands ------------------------------------------------------------ {{{
""" Reads value of this domain & key from defaults """
def read(self):
# First try to find out the type
rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
# If RC is 1, the key does not exist
if rc == 1:
return None
# If the RC is not 0, then terrible happened! Ooooh nooo!
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key type from defaults: " + out)
# Ok, lets parse the type from output
type = out.strip().replace('Type is ', '')
# Now get the current value
rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
# Strip output
out = out.strip()
# An non zero RC at this point is kinda strange...
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key value from defaults: " + out)
# Convert string to list when type is array
if type == "array":
out = self._convert_defaults_str_to_list(out)
# Store the current_value
self.current_value = self._convert_type(type, out)
""" Writes value to this domain & key to defaults """
def write(self):
# We need to convert some values so the defaults commandline understands it
if isinstance(self.value, bool):
if self.value:
value = "TRUE"
else:
value = "FALSE"
elif isinstance(self.value, (int, float)):
value = str(self.value)
elif self.array_add and self.current_value is not None:
value = list(set(self.value) - set(self.current_value))
elif isinstance(self.value, datetime.datetime):
value = self.value.strftime('%Y-%m-%d %H:%M:%S')
else:
value = self.value
# When the type is array and array_add is enabled, morph the type :)
if self.type == "array" and self.array_add:
self.type = "array-add"
# All values should be a list, for easy passing it to the command
if not isinstance(value, list):
value = [value]
rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
if rc != 0:
raise OSXDefaultsException('An error occurred while writing value to defaults: ' + out)
""" Deletes defaults key from domain """
def delete(self):
rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
if rc != 0:
raise OSXDefaultsException("An error occurred while deleting key from defaults: " + out)
# /commands ----------------------------------------------------------- }}}
# run ----------------------------------------------------------------- {{{
""" Does the magic! :) """
def run(self):
# Get the current value from defaults
self.read()
# Handle absent state
if self.state == "absent":
if self.current_value is None:
return False
if self.module.check_mode:
return True
self.delete()
return True
# There is a type mismatch! Given type does not match the type in defaults
value_type = type(self.value)
if self.current_value is not None and not isinstance(self.current_value, value_type):
raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)
# Current value matches the given value. Nothing need to be done. Arrays need extra care
if self.type == "array" and self.current_value is not None and not self.array_add and \
set(self.current_value) == set(self.value):
return False
elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0:
return False
elif self.current_value == self.value:
return False
if self.module.check_mode:
return True
# Change/Create/Set given key/value for domain in defaults
self.write()
return True
# /run ---------------------------------------------------------------- }}}
# /class MacDefaults ------------------------------------------------------ }}}
# main -------------------------------------------------------------------- {{{
def main():
module = AnsibleModule(
argument_spec=dict(
domain=dict(
default="NSGlobalDomain",
required=False,
),
host=dict(
default=None,
required=False,
),
key=dict(
default=None,
),
type=dict(
default="string",
required=False,
choices=[
"array",
"bool",
"boolean",
"date",
"float",
"int",
"integer",
"string",
],
),
array_add=dict(
default=False,
required=False,
type='bool',
),
value=dict(
default=None,
required=False,
type='raw'
),
state=dict(
default="present",
required=False,
choices=[
"absent", "present"
],
),
path=dict(
default="/usr/bin:/usr/local/bin",
required=False,
)
),
supports_check_mode=True,
)
domain = module.params['domain']
host = module.params['host']
key = module.params['key']
type = module.params['type']
array_add = module.params['array_add']
value = module.params['value']
state = module.params['state']
path = module.params['path']
try:
defaults = OSXDefaults(module=module, domain=domain, host=host, key=key, type=type,
array_add=array_add, value=value, state=state, path=path)
changed = defaults.run()
module.exit_json(changed=changed)
except OSXDefaultsException as e:
module.fail_json(msg=e.message)
# /main ------------------------------------------------------------------- }}}
if __name__ == '__main__':
main()
| gpl-3.0 | 4,307,342,835,637,381,600 | 32.059666 | 150 | 0.534724 | false |
jaredly/codetalker | tests/tokenize/ctokens.py | 1 | 2966 | #!/usr/bin/env python
from util import just_tokenize, make_tests, make_fails, TSTRING, STRING, SSTRING, ID, WHITE, NUMBER, INT, HEX, CCOMMENT, CMCOMMENT, PYCOMMENT, NEWLINE, ANY
def make_single(tok, *tests):
fn = just_tokenize(tok, WHITE)
return make_tests(globals(), tok.__name__, fn, tests)
def fail_single(tok, *tests):
fn = just_tokenize(tok)
return make_fails(globals(), tok.__name__, fn, tests)
# string
make_single(STRING,
('', 0),
('"one"', 1),
('"lo' + 'o'*1000 + 'ng"', 1),
('"many"'*20, 20))
fail_single(STRING,
'"',
'"hello',
'"one""and',
'"lo' + 'o'*1000)
# sstring
make_single(SSTRING,
('', 0),
("'one'", 1),
('\'lo' + 'o'*1000 + 'ng\'', 1),
('\'many\''*20, 20))
fail_single(SSTRING,
"'",
"'one",
"'lo"+'o'*1000,
"'many'"*20+"'")
# tstring
make_single(TSTRING,
('', 0),
('""""""', 1),
('"""one line"""', 1),
('"""two\nlines"""', 1),
('"""lots'+'\n'*100+'of lines"""', 1),
('"""many"""'*20, 20),
("''''''", 1),
("'''one line'''", 1),
("'''two\nlines'''", 1),
("'''lots"+'\n'*100+"of lines'''", 1),
("'''many'''"*20, 20))
fail_single(TSTRING,
'"',
'"""""',
'"""',
'"""start',
'"""not full"',
'"""partial""')
# ID
make_single(ID,
('', 0),
('o', 1),
('one', 1),
('lo'+'o'*1000+'ng', 1),
('numb3rs', 1),
('ev3ry_thing', 1))
fail_single(ID,
'3',
'3tostart',
'$other',
'in-the-middle')
# NUMBER
make_single(NUMBER,
('', 0),
('24', 1),
('1 2', 3),
('1.2', 1),
('.3', 1),
('1.23'+'4'*100, 1),
('123'+'4'*100 + '1.20', 1),
('1.23e10', 1),
('1.23E10', 1),
('1.23e+10', 1),
('1.23E+10', 1),
('.1e-10', 1),
('.1E-10', 1))
fail_single(NUMBER,
'.1e',
'.2e.10')
# INT
make_single(INT,
('123', 1),
('', 0),
('100'+'0'*1000+'6543', 1))
# HEX
make_single(HEX,
('0xdead', 1),
('0x1234', 1),
('0xDEad0142', 1),
('0XDe23', 1))
fail_single(HEX,
'1x23',
'0xread')
# CCOMMENT
make_single(CCOMMENT,
('', 0),
('// hello!', 1),
('// one\n', 1),
('// one\n// two', 2))
# CMCOMMENT
make_single(CMCOMMENT,
('', 0),
('/**/', 1),
('/** //*/', 1),
('/*/*/', 1),
('/* // *//**/', 2),
('/** multi\n// line**/', 1))
fail_single(CMCOMMENT,
'/*/',
'/',
'/*',
'/** stuff\n')
# PYCOMMENT
make_single(PYCOMMENT,
('', 0),
('# stuff', 1),
('# nline\n', 1),
('# more\n# stuff', 2))
# ANY
make_single(ANY,
('', 0),
('ask@#$\n', 7))
# vim: et sw=4 sts=4
| mit | 7,849,862,772,122,954,000 | 17.772152 | 155 | 0.37087 | false |
benrudolph/commcare-hq | corehq/apps/telerivet/tasks.py | 2 | 1692 | from corehq.apps.telerivet.models import TelerivetBackend, IncomingRequest
from corehq.apps.sms.api import incoming as incoming_sms
from corehq.apps.sms.util import strip_plus
from corehq.apps.ivr.api import incoming as incoming_ivr
from celery.task import task
from dimagi.utils.logging import notify_exception
from django.conf import settings
EVENT_INCOMING = "incoming_message"
MESSAGE_TYPE_SMS = "sms"
MESSAGE_TYPE_MMS = "mms"
MESSAGE_TYPE_USSD = "ussd"
MESSAGE_TYPE_CALL = "call"
CELERY_QUEUE = ("sms_queue" if settings.SMS_QUEUE_ENABLED else
settings.CELERY_MAIN_QUEUE)
@task(queue=CELERY_QUEUE, ignore_result=True)
def process_incoming_message(*args, **kwargs):
try:
from corehq.apps.telerivet.views import TELERIVET_INBOUND_FIELD_MAP
fields = {a: kwargs[a] for (a, b) in TELERIVET_INBOUND_FIELD_MAP}
log = IncomingRequest(**fields)
log.save()
except Exception as e:
notify_exception(None, "Could not save Telerivet log entry")
pass
backend = TelerivetBackend.by_webhook_secret(kwargs["secret"])
if backend is None:
# Ignore the message if the webhook secret is not recognized
return
if kwargs["from_number_e164"]:
from_number = strip_plus(kwargs["from_number_e164"])
else:
from_number = strip_plus(kwargs["from_number"])
if kwargs["event"] == EVENT_INCOMING:
if kwargs["message_type"] == MESSAGE_TYPE_SMS:
incoming_sms(from_number, kwargs["content"], TelerivetBackend.get_api_id())
elif kwargs["message_type"] == MESSAGE_TYPE_CALL:
incoming_ivr(from_number, None,
"TELERIVET-%s" % kwargs["message_id"], None)
| bsd-3-clause | 6,432,596,914,690,855,000 | 36.6 | 87 | 0.689716 | false |
tipsybear/actors-simulation | tests/test_viz.py | 1 | 1179 | # test_viz
# Vizualization tests
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Sun Dec 06 20:45:32 2015 -0500
#
# Copyright (C) 2015 University of Maryland
# For license information, see LICENSE.txt
#
# ID: test_viz.py [] [email protected] $
"""
Vizualization tests
"""
##########################################################################
## Imports
##########################################################################
import unittest
import gvas.viz
from peak.util.imports import lazyModule
##########################################################################
## Vizualization and Configuration Tests
##########################################################################
class VizTests(unittest.TestCase):
def test_lazyimport(self):
"""
Test that the viz module is lazily imported.
"""
self.assertEqual(type(gvas.viz.sns), type(lazyModule('seaborn')))
self.assertEqual(type(gvas.viz.plt), type(lazyModule('matplotlib.pyplot')))
self.assertEqual(type(gvas.viz.np), type(lazyModule('numpy')))
self.assertEqual(type(gvas.viz.pd), type(lazyModule('pandas')))
| mit | -3,124,656,277,638,479,400 | 29.230769 | 83 | 0.509754 | false |
Zhaoyanzhang/-myflasky | venv/lib/python2.7/site-packages/sqlalchemy/sql/util.py | 31 | 24707 | # sql/util.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""High level utilities which build upon other modules here.
"""
from .. import exc, util
from .base import _from_objects, ColumnSet
from . import operators, visitors
from itertools import chain
from collections import deque
from .elements import BindParameter, ColumnClause, ColumnElement, \
Null, UnaryExpression, literal_column, Label, _label_reference, \
_textual_label_reference
from .selectable import ScalarSelect, Join, FromClause, FromGrouping
from .schema import Column
join_condition = util.langhelpers.public_factory(
Join._join_condition,
".sql.util.join_condition")
# names that are still being imported from the outside
from .annotation import _shallow_annotate, _deep_annotate, _deep_deannotate
from .elements import _find_columns
from .ddl import sort_tables
def find_join_source(clauses, join_to):
"""Given a list of FROM clauses and a selectable,
return the first index and element from the list of
clauses which can be joined against the selectable. returns
None, None if no match is found.
e.g.::
clause1 = table1.join(table2)
clause2 = table4.join(table5)
join_to = table2.join(table3)
find_join_source([clause1, clause2], join_to) == clause1
"""
selectables = list(_from_objects(join_to))
for i, f in enumerate(clauses):
for s in selectables:
if f.is_derived_from(s):
return i, f
else:
return None, None
def visit_binary_product(fn, expr):
"""Produce a traversal of the given expression, delivering
column comparisons to the given function.
The function is of the form::
def my_fn(binary, left, right)
For each binary expression located which has a
comparison operator, the product of "left" and
"right" will be delivered to that function,
in terms of that binary.
Hence an expression like::
and_(
(a + b) == q + func.sum(e + f),
j == r
)
would have the traversal::
a <eq> q
a <eq> e
a <eq> f
b <eq> q
b <eq> e
b <eq> f
j <eq> r
That is, every combination of "left" and
"right" that doesn't further contain
a binary comparison is passed as pairs.
"""
stack = []
def visit(element):
if isinstance(element, ScalarSelect):
# we don't want to dig into correlated subqueries,
# those are just column elements by themselves
yield element
elif element.__visit_name__ == 'binary' and \
operators.is_comparison(element.operator):
stack.insert(0, element)
for l in visit(element.left):
for r in visit(element.right):
fn(stack[0], l, r)
stack.pop(0)
for elem in element.get_children():
visit(elem)
else:
if isinstance(element, ColumnClause):
yield element
for elem in element.get_children():
for e in visit(elem):
yield e
list(visit(expr))
def find_tables(clause, check_columns=False,
include_aliases=False, include_joins=False,
include_selects=False, include_crud=False):
"""locate Table objects within the given expression."""
tables = []
_visitors = {}
if include_selects:
_visitors['select'] = _visitors['compound_select'] = tables.append
if include_joins:
_visitors['join'] = tables.append
if include_aliases:
_visitors['alias'] = tables.append
if include_crud:
_visitors['insert'] = _visitors['update'] = \
_visitors['delete'] = lambda ent: tables.append(ent.table)
if check_columns:
def visit_column(column):
tables.append(column.table)
_visitors['column'] = visit_column
_visitors['table'] = tables.append
visitors.traverse(clause, {'column_collections': False}, _visitors)
return tables
def unwrap_order_by(clause):
"""Break up an 'order by' expression into individual column-expressions,
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
result = []
stack = deque([clause])
while stack:
t = stack.popleft()
if isinstance(t, ColumnElement) and \
(
not isinstance(t, UnaryExpression) or
not operators.is_ordering_modifier(t.modifier)
):
if isinstance(t, _label_reference):
t = t.element
if isinstance(t, (_textual_label_reference)):
continue
if t not in cols:
cols.add(t)
result.append(t)
else:
for c in t.get_children():
stack.append(c)
return result
def unwrap_label_reference(element):
def replace(elem):
if isinstance(elem, (_label_reference, _textual_label_reference)):
return elem.element
return visitors.replacement_traverse(
element, {}, replace
)
def expand_column_list_from_order_by(collist, order_by):
"""Given the columns clause and ORDER BY of a selectable,
return a list of column expressions that can be added to the collist
corresponding to the ORDER BY, without repeating those already
in the collist.
"""
cols_already_present = set([
col.element if col._order_by_label_element is not None
else col for col in collist
])
return [
col for col in
chain(*[
unwrap_order_by(o)
for o in order_by
])
if col not in cols_already_present
]
def clause_is_present(clause, search):
"""Given a target clause and a second to search within, return True
if the target is plainly present in the search without any
subqueries or aliases involved.
Basically descends through Joins.
"""
for elem in surface_selectables(search):
if clause == elem: # use == here so that Annotated's compare
return True
else:
return False
def surface_selectables(clause):
stack = [clause]
while stack:
elem = stack.pop()
yield elem
if isinstance(elem, Join):
stack.extend((elem.left, elem.right))
elif isinstance(elem, FromGrouping):
stack.append(elem.element)
def surface_column_elements(clause):
"""traverse and yield only outer-exposed column elements, such as would
be addressable in the WHERE clause of a SELECT if this element were
in the columns clause."""
stack = deque([clause])
while stack:
elem = stack.popleft()
yield elem
for sub in elem.get_children():
if isinstance(sub, FromGrouping):
continue
stack.append(sub)
def selectables_overlap(left, right):
"""Return True if left/right have some overlapping selectable"""
return bool(
set(surface_selectables(left)).intersection(
surface_selectables(right)
)
)
def bind_values(clause):
"""Return an ordered list of "bound" values in the given clause.
E.g.::
>>> expr = and_(
... table.c.foo==5, table.c.foo==7
... )
>>> bind_values(expr)
[5, 7]
"""
v = []
def visit_bindparam(bind):
v.append(bind.effective_value)
visitors.traverse(clause, {}, {'bindparam': visit_bindparam})
return v
def _quote_ddl_expr(element):
if isinstance(element, util.string_types):
element = element.replace("'", "''")
return "'%s'" % element
else:
return repr(element)
class _repr_base(object):
_LIST = 0
_TUPLE = 1
_DICT = 2
__slots__ = 'max_chars',
def trunc(self, value):
rep = repr(value)
lenrep = len(rep)
if lenrep > self.max_chars:
segment_length = self.max_chars // 2
rep = (
rep[0:segment_length] +
(" ... (%d characters truncated) ... "
% (lenrep - self.max_chars)) +
rep[-segment_length:]
)
return rep
class _repr_row(_repr_base):
"""Provide a string view of a row."""
__slots__ = 'row',
def __init__(self, row, max_chars=300):
self.row = row
self.max_chars = max_chars
def __repr__(self):
trunc = self.trunc
return "(%s%s)" % (
", ".join(trunc(value) for value in self.row),
"," if len(self.row) == 1 else ""
)
class _repr_params(_repr_base):
"""Provide a string view of bound parameters.
Truncates display to a given numnber of 'multi' parameter sets,
as well as long values to a given number of characters.
"""
__slots__ = 'params', 'batches',
def __init__(self, params, batches, max_chars=300):
self.params = params
self.batches = batches
self.max_chars = max_chars
def __repr__(self):
if isinstance(self.params, list):
typ = self._LIST
ismulti = self.params and isinstance(
self.params[0], (list, dict, tuple))
elif isinstance(self.params, tuple):
typ = self._TUPLE
ismulti = self.params and isinstance(
self.params[0], (list, dict, tuple))
elif isinstance(self.params, dict):
typ = self._DICT
ismulti = False
else:
return self.trunc(self.params)
if ismulti and len(self.params) > self.batches:
msg = " ... displaying %i of %i total bound parameter sets ... "
return ' '.join((
self._repr_multi(self.params[:self.batches - 2], typ)[0:-1],
msg % (self.batches, len(self.params)),
self._repr_multi(self.params[-2:], typ)[1:]
))
elif ismulti:
return self._repr_multi(self.params, typ)
else:
return self._repr_params(self.params, typ)
def _repr_multi(self, multi_params, typ):
if multi_params:
if isinstance(multi_params[0], list):
elem_type = self._LIST
elif isinstance(multi_params[0], tuple):
elem_type = self._TUPLE
elif isinstance(multi_params[0], dict):
elem_type = self._DICT
else:
assert False, \
"Unknown parameter type %s" % (type(multi_params[0]))
elements = ", ".join(
self._repr_params(params, elem_type)
for params in multi_params)
else:
elements = ""
if typ == self._LIST:
return "[%s]" % elements
else:
return "(%s)" % elements
def _repr_params(self, params, typ):
trunc = self.trunc
if typ is self._DICT:
return "{%s}" % (
", ".join(
"%r: %s" % (key, trunc(value))
for key, value in params.items()
)
)
elif typ is self._TUPLE:
return "(%s%s)" % (
", ".join(trunc(value) for value in params),
"," if len(params) == 1 else ""
)
else:
return "[%s]" % (
", ".join(trunc(value) for value in params)
)
def adapt_criterion_to_null(crit, nulls):
"""given criterion containing bind params, convert selected elements
to IS NULL.
"""
def visit_binary(binary):
if isinstance(binary.left, BindParameter) \
and binary.left._identifying_key in nulls:
# reverse order if the NULL is on the left side
binary.left = binary.right
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
elif isinstance(binary.right, BindParameter) \
and binary.right._identifying_key in nulls:
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
return visitors.cloned_traverse(crit, {}, {'binary': visit_binary})
def splice_joins(left, right, stop_on=None):
if left is None:
return right
stack = [(right, None)]
adapter = ClauseAdapter(left)
ret = None
while stack:
(right, prevright) = stack.pop()
if isinstance(right, Join) and right is not stop_on:
right = right._clone()
right._reset_exported()
right.onclause = adapter.traverse(right.onclause)
stack.append((right.left, right))
else:
right = adapter.traverse(right)
if prevright is not None:
prevright.left = right
if ret is None:
ret = right
return ret
def reduce_columns(columns, *clauses, **kw):
r"""given a list of columns, return a 'reduced' set based on natural
equivalents.
the set is reduced to the smallest list of columns which have no natural
equivalent present in the list. A "natural equivalent" means that two
columns will ultimately represent the same value because they are related
by a foreign key.
\*clauses is an optional list of join clauses which will be traversed
to further identify columns that are "equivalent".
\**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys
whose tables are not yet configured, or columns that aren't yet present.
This function is primarily used to determine the most minimal "primary
key" from a selectable, by reducing the set of primary key columns present
in the selectable to just those that are not repeated.
"""
ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False)
only_synonyms = kw.pop('only_synonyms', False)
columns = util.ordered_column_set(columns)
omit = util.column_set()
for col in columns:
for fk in chain(*[c.foreign_keys for c in col.proxy_set]):
for c in columns:
if c is col:
continue
try:
fk_col = fk.column
except exc.NoReferencedColumnError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
except exc.NoReferencedTableError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
if fk_col.shares_lineage(c) and \
(not only_synonyms or
c.name == col.name):
omit.add(col)
break
if clauses:
def visit_binary(binary):
if binary.operator == operators.eq:
cols = util.column_set(
chain(*[c.proxy_set for c in columns.difference(omit)]))
if binary.left in cols and binary.right in cols:
for c in reversed(columns):
if c.shares_lineage(binary.right) and \
(not only_synonyms or
c.name == binary.left.name):
omit.add(c)
break
for clause in clauses:
if clause is not None:
visitors.traverse(clause, {}, {'binary': visit_binary})
return ColumnSet(columns.difference(omit))
def criterion_as_pairs(expression, consider_as_foreign_keys=None,
consider_as_referenced_keys=None, any_operator=False):
"""traverse an expression and locate binary criterion pairs."""
if consider_as_foreign_keys and consider_as_referenced_keys:
raise exc.ArgumentError("Can only specify one of "
"'consider_as_foreign_keys' or "
"'consider_as_referenced_keys'")
def col_is(a, b):
# return a is b
return a.compare(b)
def visit_binary(binary):
if not any_operator and binary.operator is not operators.eq:
return
if not isinstance(binary.left, ColumnElement) or \
not isinstance(binary.right, ColumnElement):
return
if consider_as_foreign_keys:
if binary.left in consider_as_foreign_keys and \
(col_is(binary.right, binary.left) or
binary.right not in consider_as_foreign_keys):
pairs.append((binary.right, binary.left))
elif binary.right in consider_as_foreign_keys and \
(col_is(binary.left, binary.right) or
binary.left not in consider_as_foreign_keys):
pairs.append((binary.left, binary.right))
elif consider_as_referenced_keys:
if binary.left in consider_as_referenced_keys and \
(col_is(binary.right, binary.left) or
binary.right not in consider_as_referenced_keys):
pairs.append((binary.left, binary.right))
elif binary.right in consider_as_referenced_keys and \
(col_is(binary.left, binary.right) or
binary.left not in consider_as_referenced_keys):
pairs.append((binary.right, binary.left))
else:
if isinstance(binary.left, Column) and \
isinstance(binary.right, Column):
if binary.left.references(binary.right):
pairs.append((binary.right, binary.left))
elif binary.right.references(binary.left):
pairs.append((binary.left, binary.right))
pairs = []
visitors.traverse(expression, {}, {'binary': visit_binary})
return pairs
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(self, selectable, equivalents=None,
include_fn=None, exclude_fn=None,
adapt_on_names=False, anonymize_labels=False):
self.__traverse_options__ = {
'stop_on': [selectable],
'anonymize_labels': anonymize_labels}
self.selectable = selectable
self.include_fn = include_fn
self.exclude_fn = exclude_fn
self.equivalents = util.column_dict(equivalents or {})
self.adapt_on_names = adapt_on_names
def _corresponding_column(self, col, require_embedded,
_seen=util.EMPTY_SET):
newcol = self.selectable.corresponding_column(
col,
require_embedded=require_embedded)
if newcol is None and col in self.equivalents and col not in _seen:
for equiv in self.equivalents[col]:
newcol = self._corresponding_column(
equiv, require_embedded=require_embedded,
_seen=_seen.union([col]))
if newcol is not None:
return newcol
if self.adapt_on_names and newcol is None:
newcol = self.selectable.c.get(col.name)
return newcol
def replace(self, col):
if isinstance(col, FromClause) and \
self.selectable.is_derived_from(col):
return self.selectable
elif not isinstance(col, ColumnElement):
return None
elif self.include_fn and not self.include_fn(col):
return None
elif self.exclude_fn and self.exclude_fn(col):
return None
else:
return self._corresponding_column(col, True)
class ColumnAdapter(ClauseAdapter):
"""Extends ClauseAdapter with extra utility functions.
Key aspects of ColumnAdapter include:
* Expressions that are adapted are stored in a persistent
.columns collection; so that an expression E adapted into
an expression E1, will return the same object E1 when adapted
a second time. This is important in particular for things like
Label objects that are anonymized, so that the ColumnAdapter can
be used to present a consistent "adapted" view of things.
* Exclusion of items from the persistent collection based on
include/exclude rules, but also independent of hash identity.
This because "annotated" items all have the same hash identity as their
parent.
* "wrapping" capability is added, so that the replacement of an expression
E can proceed through a series of adapters. This differs from the
visitor's "chaining" feature in that the resulting object is passed
through all replacing functions unconditionally, rather than stopping
at the first one that returns non-None.
* An adapt_required option, used by eager loading to indicate that
We don't trust a result row column that is not translated.
This is to prevent a column from being interpreted as that
of the child row in a self-referential scenario, see
inheritance/test_basic.py->EagerTargetingTest.test_adapt_stringency
"""
def __init__(self, selectable, equivalents=None,
chain_to=None, adapt_required=False,
include_fn=None, exclude_fn=None,
adapt_on_names=False,
allow_label_resolve=True,
anonymize_labels=False):
ClauseAdapter.__init__(self, selectable, equivalents,
include_fn=include_fn, exclude_fn=exclude_fn,
adapt_on_names=adapt_on_names,
anonymize_labels=anonymize_labels)
if chain_to:
self.chain(chain_to)
self.columns = util.populate_column_dict(self._locate_col)
if self.include_fn or self.exclude_fn:
self.columns = self._IncludeExcludeMapping(self, self.columns)
self.adapt_required = adapt_required
self.allow_label_resolve = allow_label_resolve
self._wrap = None
class _IncludeExcludeMapping(object):
def __init__(self, parent, columns):
self.parent = parent
self.columns = columns
def __getitem__(self, key):
if (
self.parent.include_fn and not self.parent.include_fn(key)
) or (
self.parent.exclude_fn and self.parent.exclude_fn(key)
):
if self.parent._wrap:
return self.parent._wrap.columns[key]
else:
return key
return self.columns[key]
def wrap(self, adapter):
ac = self.__class__.__new__(self.__class__)
ac.__dict__.update(self.__dict__)
ac._wrap = adapter
ac.columns = util.populate_column_dict(ac._locate_col)
if ac.include_fn or ac.exclude_fn:
ac.columns = self._IncludeExcludeMapping(ac, ac.columns)
return ac
def traverse(self, obj):
return self.columns[obj]
adapt_clause = traverse
adapt_list = ClauseAdapter.copy_and_process
def _locate_col(self, col):
c = ClauseAdapter.traverse(self, col)
if self._wrap:
c2 = self._wrap._locate_col(c)
if c2 is not None:
c = c2
if self.adapt_required and c is col:
return None
c._allow_label_resolve = self.allow_label_resolve
return c
def __getstate__(self):
d = self.__dict__.copy()
del d['columns']
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.columns = util.PopulateDict(self._locate_col)
| mit | -3,129,984,243,681,165,000 | 31.423885 | 78 | 0.57506 | false |
mskrzypkows/servo | tests/wpt/web-platform-tests/mixed-content/generic/expect.py | 92 | 4155 | import json, os, urllib, urlparse
def redirect(url, response):
response.add_required_headers = False
response.writer.write_status(301)
response.writer.write_header("access-control-allow-origin", "*")
response.writer.write_header("location", url)
response.writer.end_headers()
response.writer.write("")
def create_redirect_url(request, swap_scheme = False):
parsed = urlparse.urlsplit(request.url)
destination_netloc = parsed.netloc
scheme = parsed.scheme
if swap_scheme:
scheme = "http" if parsed.scheme == "https" else "https"
hostname = parsed.netloc.split(':')[0]
port = request.server.config["ports"][scheme][0]
destination_netloc = ":".join([hostname, str(port)])
# Remove "redirection" from query to avoid redirect loops.
parsed_query = dict(urlparse.parse_qsl(parsed.query))
assert "redirection" in parsed_query
del parsed_query["redirection"]
destination_url = urlparse.urlunsplit(urlparse.SplitResult(
scheme = scheme,
netloc = destination_netloc,
path = parsed.path,
query = urllib.urlencode(parsed_query),
fragment = None))
return destination_url
def main(request, response):
if "redirection" in request.GET:
redirection = request.GET["redirection"]
if redirection == "no-redirect":
pass
elif redirection == "keep-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=False), response)
return
elif redirection == "swap-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=True), response)
return
else:
raise ValueError ("Invalid redirect type: %s" % redirection)
content_type = "text/plain"
response_data = ""
if "action" in request.GET:
action = request.GET["action"]
if "content_type" in request.GET:
content_type = request.GET["content_type"]
key = request.GET["key"]
stash = request.server.stash
path = request.GET.get("path", request.url.split('?'))[0]
if action == "put":
value = request.GET["value"]
stash.take(key=key, path=path)
stash.put(key=key, value=value, path=path)
response_data = json.dumps({"status": "success", "result": key})
elif action == "purge":
value = stash.take(key=key, path=path)
if content_type == "image/png":
response_data = open(os.path.join(request.doc_root,
"images",
"smiley.png")).read()
elif content_type == "audio/mpeg":
response_data = open(os.path.join(request.doc_root,
"media",
"sound_5.oga")).read()
elif content_type == "video/mp4":
response_data = open(os.path.join(request.doc_root,
"media",
"movie_5.mp4")).read()
elif content_type == "application/javascript":
response_data = open(os.path.join(request.doc_root,
"mixed-content",
"generic",
"worker.js")).read()
else:
response_data = "/* purged */"
elif action == "take":
value = stash.take(key=key, path=path)
if value is None:
status = "allowed"
else:
status = "blocked"
response_data = json.dumps({"status": status, "result": value})
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("content-type", content_type)
response.writer.write_header("cache-control", "no-cache; must-revalidate")
response.writer.end_headers()
response.writer.write(response_data)
| mpl-2.0 | 6,159,997,418,781,841,000 | 39.735294 | 79 | 0.541757 | false |
tapanagupta/mi-instrument | mi/instrument/star_asimet/bulkmet/metbk_a/driver.py | 7 | 42844 | """
@package mi.instrument.star_asimet.bulkmet.metbk_a.driver
@file marine-integrations/mi/instrument/star_aismet/bulkmet/metbk_a/driver.py
@author Bill Bollenbacher
@brief Driver for the metbk_a
Release notes:
initial version
"""
import re
import time
from mi.core.log import get_logger
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, \
InstrumentProtocolException
from mi.core.time_tools import get_timestamp_delayed
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.chunker import StringChunker
from mi.core.driver_scheduler import DriverSchedulerConfigKey
from mi.core.driver_scheduler import TriggerType
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, \
ParameterDictType, \
ParameterDictVisibility
__author__ = 'Bill Bollenbacher'
__license__ = 'Apache 2.0'
log = get_logger()
# newline.
NEWLINE = '\r\n'
# default timeout.
TIMEOUT = 10
SYNC_TIMEOUT = 30
AUTO_SAMPLE_SCHEDULED_JOB = 'auto_sample'
LOGGING_STATUS_REGEX = r'.*Sampling (GO|STOPPED)'
LOGGING_STATUS_COMPILED = re.compile(LOGGING_STATUS_REGEX, re.DOTALL)
LOGGING_SYNC_REGEX = r'.*Sampling GO - synchronizing...'
LOGGING_SYNC_COMPILED = re.compile(LOGGING_STATUS_REGEX, re.DOTALL)
####
# Driver Constant Definitions
####
class ScheduledJob(BaseEnum):
ACQUIRE_STATUS = 'acquire_status'
CLOCK_SYNC = 'clock_sync'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
SYNC_CLOCK = 'PROTOCOL_STATE_SYNC_CLOCK'
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
DISCOVER = DriverEvent.DISCOVER
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
GET = DriverEvent.GET
SET = DriverEvent.SET
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
FLASH_STATUS = 'DRIVER_EVENT_FLASH_STATUS'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
FLASH_STATUS = ProtocolEvent.FLASH_STATUS
START_DIRECT = ProtocolEvent.START_DIRECT
STOP_DIRECT = ProtocolEvent.STOP_DIRECT
DISCOVER = ProtocolEvent.DISCOVER
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
CLOCK = 'clock'
SAMPLE_INTERVAL = 'sample_interval'
class Prompt(BaseEnum):
"""
Device i/o prompts.
"""
CR_NL = NEWLINE
STOPPED = "Sampling STOPPED"
SYNC = "Sampling GO - synchronizing..."
GO = "Sampling GO"
FS = "bytes free\r" + NEWLINE
class Command(BaseEnum):
"""
Instrument command strings
"""
GET_CLOCK = "#CLOCK"
SET_CLOCK = "#CLOCK="
D = "#D"
FS = "#FS"
STAT = "#STAT"
GO = "#GO"
STOP = "#STOP"
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
RAW = CommonDataParticleType.RAW
METBK_PARSED = 'metbk_parsed'
METBK_STATUS = 'metbk_status'
###############################################################################
# Data Particles
###############################################################################
class METBK_SampleDataParticleKey(BaseEnum):
BAROMETRIC_PRESSURE = 'barometric_pressure'
RELATIVE_HUMIDITY = 'relative_humidity'
AIR_TEMPERATURE = 'air_temperature'
LONGWAVE_IRRADIANCE = 'longwave_irradiance'
PRECIPITATION = 'precipitation'
SEA_SURFACE_TEMPERATURE = 'sea_surface_temperature'
SEA_SURFACE_CONDUCTIVITY = 'sea_surface_conductivity'
SHORTWAVE_IRRADIANCE = 'shortwave_irradiance'
EASTWARD_WIND_VELOCITY = 'eastward_wind_velocity'
NORTHWARD_WIND_VELOCITY = 'northward_wind_velocity'
class METBK_SampleDataParticle(DataParticle):
_data_particle_type = DataParticleType.METBK_PARSED
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
SAMPLE_DATA_PATTERN = (r'(-*\d+\.\d+)' + # BPR
'\s*(-*\d+\.\d+)' + # RH %
'\s*(-*\d+\.\d+)' + # RH temp
'\s*(-*\d+\.\d+)' + # LWR
'\s*(-*\d+\.\d+)' + # PRC
'\s*(-*\d+\.\d+)' + # ST
'\s*(-*\d+\.\d+)' + # SC
'\s*(-*\d+\.\d+)' + # SWR
'\s*(-*\d+\.\d+)' + # We
'\s*(-*\d+\.\d+)' + # Wn
'.*?' + NEWLINE) # throw away batteries
return re.compile(SAMPLE_DATA_PATTERN, re.DOTALL)
def _build_parsed_values(self):
match = METBK_SampleDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("METBK_SampleDataParticle: No regex match of parsed sample data: [%s]", self.raw_data)
result = [{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.BAROMETRIC_PRESSURE,
DataParticleKey.VALUE: float(match.group(1))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.RELATIVE_HUMIDITY,
DataParticleKey.VALUE: float(match.group(2))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.AIR_TEMPERATURE,
DataParticleKey.VALUE: float(match.group(3))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.LONGWAVE_IRRADIANCE,
DataParticleKey.VALUE: float(match.group(4))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.PRECIPITATION,
DataParticleKey.VALUE: float(match.group(5))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SEA_SURFACE_TEMPERATURE,
DataParticleKey.VALUE: float(match.group(6))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SEA_SURFACE_CONDUCTIVITY,
DataParticleKey.VALUE: float(match.group(7))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SHORTWAVE_IRRADIANCE,
DataParticleKey.VALUE: float(match.group(8))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.EASTWARD_WIND_VELOCITY,
DataParticleKey.VALUE: float(match.group(9))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.NORTHWARD_WIND_VELOCITY,
DataParticleKey.VALUE: float(match.group(10))}]
log.debug("METBK_SampleDataParticle._build_parsed_values: result=%s" % result)
return result
class METBK_StatusDataParticleKey(BaseEnum):
INSTRUMENT_MODEL = 'instrument_model'
SERIAL_NUMBER = 'serial_number'
CALIBRATION_DATE = 'calibration_date'
FIRMWARE_VERSION = 'firmware_version'
DATE_TIME_STRING = 'date_time_string'
LOGGING_INTERVAL = 'logging_interval'
CURRENT_TICK = 'current_tick'
RECENT_RECORD_INTERVAL = 'recent_record_interval'
FLASH_CARD_PRESENCE = 'flash_card_presence'
BATTERY_VOLTAGE_MAIN = 'battery_voltage_main'
FAILURE_MESSAGES = 'failure_messages'
PTT_ID1 = 'ptt_id1'
PTT_ID2 = 'ptt_id2'
PTT_ID3 = 'ptt_id3'
SAMPLING_STATE = 'sampling_state'
class METBK_StatusDataParticle(DataParticle):
_data_particle_type = DataParticleType.METBK_STATUS
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
STATUS_DATA_PATTERN = (r'Model:\s+(.+?)\r\n' +
'SerNum:\s+(.+?)\r\n' +
'CfgDat:\s+(.+?)\r\n' +
'Firmware:\s+(.+?)\r\n' +
'RTClock:\s+(.+?)\r\n' +
'Logging Interval:\s+(\d+);\s+' +
'Current Tick:\s+(\d+)\r\n' +
'R-interval:\s+(.+?)\r\n' +
'(.+?)\r\n' + # compact flash info
'Main Battery Voltage:\s+(.+?)\r\n' +
'(.+?)' + # module failures & PTT messages
'\r\nSampling\s+(\w+)\r\n')
return re.compile(STATUS_DATA_PATTERN, re.DOTALL)
def _build_parsed_values(self):
log.debug("METBK_StatusDataParticle: input = %s" % self.raw_data)
match = METBK_StatusDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("METBK_StatusDataParticle: No regex match of parsed status data: [%s]", self.raw_data)
result = [{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.INSTRUMENT_MODEL,
DataParticleKey.VALUE: match.group(1)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.SERIAL_NUMBER,
DataParticleKey.VALUE: match.group(2)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.CALIBRATION_DATE,
DataParticleKey.VALUE: match.group(3)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FIRMWARE_VERSION,
DataParticleKey.VALUE: match.group(4)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.DATE_TIME_STRING,
DataParticleKey.VALUE: match.group(5)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.LOGGING_INTERVAL,
DataParticleKey.VALUE: int(match.group(6))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.CURRENT_TICK,
DataParticleKey.VALUE: int(match.group(7))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.RECENT_RECORD_INTERVAL,
DataParticleKey.VALUE: int(match.group(8))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FLASH_CARD_PRESENCE,
DataParticleKey.VALUE: match.group(9)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.BATTERY_VOLTAGE_MAIN,
DataParticleKey.VALUE: float(match.group(10))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.SAMPLING_STATE,
DataParticleKey.VALUE: match.group(12)}]
lines = match.group(11).split(NEWLINE)
length = len(lines)
print ("length=%d; lines=%s" % (length, lines))
if length < 3:
raise SampleException("METBK_StatusDataParticle: Not enough PTT lines in status data: [%s]", self.raw_data)
# grab PTT lines
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID1,
DataParticleKey.VALUE: lines[length - 3]})
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID2,
DataParticleKey.VALUE: lines[length - 2]})
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID3,
DataParticleKey.VALUE: lines[length - 1]})
# grab any module failure lines
if length > 3:
length -= 3
failures = []
for index in range(0, length):
failures.append(lines[index])
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FAILURE_MESSAGES,
DataParticleKey.VALUE: failures})
log.debug("METBK_StatusDataParticle: result = %s" % result)
return result
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
########################################################################
# Superclass overrides for resource query.
########################################################################
def get_resource_params(self):
"""
Return list of device parameters available.
"""
return Parameter.list()
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
last_sample = ''
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_SAMPLE,
self._handler_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_DIRECT,
self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.CLOCK_SYNC,
self._handler_command_sync_clock)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE,
self._handler_command_start_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.FLASH_STATUS, self._handler_flash_status)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_STATUS,
self._handler_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE,
self._handler_autosample_stop_autosample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ACQUIRE_SAMPLE,
self._handler_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.CLOCK_SYNC,
self._handler_autosample_sync_clock)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.FLASH_STATUS, self._handler_flash_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ACQUIRE_STATUS,
self._handler_acquire_status)
# We setup a new state for clock sync because then we could use the state machine so the autosample scheduler
# is disabled before we try to sync the clock. Otherwise there could be a race condition introduced when we
# are syncing the clock and the scheduler requests a sample.
self._protocol_fsm.add_handler(ProtocolState.SYNC_CLOCK, ProtocolEvent.ENTER, self._handler_sync_clock_enter)
self._protocol_fsm.add_handler(ProtocolState.SYNC_CLOCK, ProtocolEvent.CLOCK_SYNC,
self._handler_sync_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER,
self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT,
self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT,
self._handler_direct_access_execute_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT,
self._handler_direct_access_stop_direct)
# Add build handlers for device commands.
self._add_build_handler(Command.GET_CLOCK, self._build_simple_command)
self._add_build_handler(Command.SET_CLOCK, self._build_set_clock_command)
self._add_build_handler(Command.D, self._build_simple_command)
self._add_build_handler(Command.GO, self._build_simple_command)
self._add_build_handler(Command.STOP, self._build_simple_command)
self._add_build_handler(Command.FS, self._build_simple_command)
self._add_build_handler(Command.STAT, self._build_simple_command)
# Add response handlers for device commands.
self._add_response_handler(Command.GET_CLOCK, self._parse_clock_response)
self._add_response_handler(Command.SET_CLOCK, self._parse_clock_response)
self._add_response_handler(Command.FS, self._parse_fs_response)
self._add_response_handler(Command.STAT, self._parse_common_response)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
self._chunker = StringChunker(Protocol.sieve_function)
self._add_scheduler_event(ScheduledJob.ACQUIRE_STATUS, ProtocolEvent.ACQUIRE_STATUS)
self._add_scheduler_event(ScheduledJob.CLOCK_SYNC, ProtocolEvent.CLOCK_SYNC)
# Start state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples and status
"""
matchers = []
return_list = []
matchers.append(METBK_SampleDataParticle.regex_compiled())
matchers.append(METBK_StatusDataParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
log.debug("_got_chunk: chunk=%s" % chunk)
self._extract_sample(METBK_SampleDataParticle, METBK_SampleDataParticle.regex_compiled(), chunk, timestamp)
self._extract_sample(METBK_StatusDataParticle, METBK_StatusDataParticle.regex_compiled(), chunk, timestamp)
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# override methods from base class.
########################################################################
def _extract_sample(self, particle_class, regex, line, timestamp, publish=True):
"""
Overridden to add duplicate sample checking. This duplicate checking should only be performed
on sample chunks and not other chunk types, therefore the regex is performed before the string checking.
Extract sample from a response line if present and publish parsed particle
@param particle_class The class to instantiate for this specific
data particle. Parameterizing this allows for simple, standard
behavior from this routine
@param regex The regular expression that matches a data sample
@param line string to match for sample.
@param timestamp port agent timestamp to include with the particle
@param publish boolean to publish samples (default True). If True,
two different events are published: one to notify raw data and
the other to notify parsed data.
@retval dict of dicts {'parsed': parsed_sample, 'raw': raw_sample} if
the line can be parsed for a sample. Otherwise, None.
@todo Figure out how the agent wants the results for a single poll
and return them that way from here
"""
match = regex.match(line)
if match:
if particle_class == METBK_SampleDataParticle:
# check to see if there is a delta from last sample, and don't parse this sample if there isn't
if match.group(0) == self.last_sample:
return
# save this sample as last_sample for next check
self.last_sample = match.group(0)
particle = particle_class(line, port_timestamp=timestamp)
parsed_sample = particle.generate()
if publish and self._driver_event:
self._driver_event(DriverAsyncEvent.SAMPLE, parsed_sample)
return parsed_sample
########################################################################
# implement virtual methods from base class.
########################################################################
def apply_startup_params(self):
"""
Apply sample_interval startup parameter.
"""
config = self.get_startup_config()
log.debug("apply_startup_params: startup config = %s" % config)
if config.has_key(Parameter.SAMPLE_INTERVAL):
log.debug("apply_startup_params: setting sample_interval to %d" % config[Parameter.SAMPLE_INTERVAL])
self._param_dict.set_value(Parameter.SAMPLE_INTERVAL, config[Parameter.SAMPLE_INTERVAL])
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no actual AUTOSAMPLE mode).
"""
next_state = self._discover()
result = []
return next_state, (next_state, result)
########################################################################
# Clock Sync handlers.
# Not much to do in this state except sync the clock then transition
# back to autosample. When in command mode we don't have to worry about
# stopping the scheduler so we just sync the clock without state
# transitions
########################################################################
def _handler_sync_clock_enter(self, *args, **kwargs):
"""
Enter sync clock state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._protocol_fsm.on_event(ProtocolEvent.CLOCK_SYNC)
def _handler_sync_clock_sync(self, *args, **kwargs):
"""
Sync the clock
"""
next_state = ProtocolState.AUTOSAMPLE
result = []
self._sync_clock()
self._async_agent_state_change(ResourceAgentState.STREAMING)
return next_state, (next_state, result)
########################################################################
# Command handlers.
# just implemented to make DA possible, instrument has no actual command mode
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
self._init_params()
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_command_set(self, *args, **kwargs):
"""
no writable parameters so does nothing, just implemented to make framework happy
"""
next_state = None
result = None
return next_state, result
def _handler_command_start_direct(self, *args, **kwargs):
"""
"""
next_state = ProtocolState.DIRECT_ACCESS
result = []
return next_state, (next_state, result)
def _handler_command_start_autosample(self, *args, **kwargs):
"""
"""
next_state = ProtocolState.AUTOSAMPLE
result = []
self._start_logging()
return next_state, (next_state, result)
def _handler_command_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
result = []
self._sync_clock()
return next_state, (next_state, result)
########################################################################
# autosample handlers.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state Because this is an instrument that must be
polled we need to ensure the scheduler is added when we are in an
autosample state. This scheduler raises events to poll the
instrument for data.
"""
self._init_params()
self._ensure_autosample_config()
self._add_scheduler_event(AUTO_SAMPLE_SCHEDULED_JOB, ProtocolEvent.ACQUIRE_SAMPLE)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_exit(self, *args, **kwargs):
"""
exit autosample state.
"""
self._remove_scheduler(AUTO_SAMPLE_SCHEDULED_JOB)
def _handler_autosample_stop_autosample(self, *args, **kwargs):
next_state = ProtocolState.COMMAND
result = []
self._stop_logging()
return next_state, (next_state, result)
def _handler_autosample_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = ProtocolState.SYNC_CLOCK
result = []
return next_state, (next_state, result)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
pass
def _handler_direct_access_execute_direct(self, data):
next_state = None
result = []
self._do_cmd_direct(data)
return next_state, (next_state, result)
def _handler_direct_access_stop_direct(self):
next_state = self._discover()
result = []
return next_state, (next_state, result)
########################################################################
# general handlers.
########################################################################
def _handler_flash_status(self, *args, **kwargs):
"""
Acquire flash status from instrument.
@retval (next_state, (next_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
result = self._do_cmd_resp(Command.FS, expected_prompt=Prompt.FS)
log.debug("FLASH RESULT: %s", result)
return next_state, (next_state, [result])
def _handler_acquire_sample(self, *args, **kwargs):
"""
Acquire sample from instrument.
@retval (next_state, (next_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
result = self._do_cmd_resp(Command.D, *args, **kwargs)
return next_state, (next_state, [result])
def _handler_acquire_status(self, *args, **kwargs):
"""
Acquire status from instrument.
@retval (next_state, (next_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
log.debug("Logging status: %s", self._is_logging())
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.GO])
return next_state, (next_state, [result])
########################################################################
# Private helpers.
########################################################################
def _set_params(self, *args, **kwargs):
"""
Overloaded from the base class, used in apply DA params. Not needed
here so just noop it.
"""
pass
def _discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no actual AUTOSAMPLE mode).
@retval (next_state, result), (ProtocolState.COMMAND, None) if successful.
"""
logging = self._is_logging()
if logging is None:
return ProtocolState.UNKNOWN
elif logging:
return ProtocolState.AUTOSAMPLE
return ProtocolState.COMMAND
def _start_logging(self):
"""
start the instrument logging if is isn't running already.
"""
if not self._is_logging():
log.debug("Sending start logging command: %s", Command.GO)
self._do_cmd_resp(Command.GO, expected_prompt=Prompt.GO)
def _stop_logging(self):
"""
stop the instrument logging if is is running. When the instrument
is in a syncing state we can not stop logging. We must wait before
we sent the stop command.
"""
if self._is_logging():
log.debug("Attempting to stop the instrument logging.")
result = self._do_cmd_resp(Command.STOP, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
log.debug("Stop Command Result: %s", result)
# If we are still logging then let's wait until we are not
# syncing before resending the command.
if self._is_logging():
self._wait_for_sync()
log.debug("Attempting to stop the instrument again.")
result = self._do_cmd_resp(Command.STOP, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
log.debug("Stop Command Result: %s", result)
def _wait_for_sync(self):
"""
When the instrument is syncing internal parameters we can't stop
logging. So we will watch the logging status and when it is not
synchronizing we will return. Basically we will just block
until we are no longer syncing.
@raise InstrumentProtocolException when we timeout waiting for a
transition.
"""
timeout = time.time() + SYNC_TIMEOUT
while time.time() < timeout:
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
match = LOGGING_SYNC_COMPILED.match(result)
if match:
log.debug("We are still in sync mode. Wait a bit and retry")
time.sleep(2)
else:
log.debug("Transitioned out of sync.")
return True
# We timed out
raise InstrumentProtocolException("failed to transition out of sync mode")
def _is_logging(self):
"""
Run the status command to determine if we are in command or autosample
mode.
@return: True if sampling, false if not, None if we can't determine
"""
log.debug("_is_logging: start")
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.GO])
log.debug("Checking logging status from %s", result)
match = LOGGING_STATUS_COMPILED.match(result)
if not match:
log.error("Unable to determine logging status from: %s", result)
return None
if match.group(1) == 'GO':
log.debug("Looks like we are logging: %s", match.group(1))
return True
else:
log.debug("Looks like we are NOT logging: %s", match.group(1))
return False
def _ensure_autosample_config(self):
scheduler_config = self._get_scheduler_config()
if (scheduler_config == None):
log.debug("_ensure_autosample_config: adding scheduler element to _startup_config")
self._startup_config[DriverConfigKey.SCHEDULER] = {}
self._get_scheduler_config()
log.debug("_ensure_autosample_config: adding autosample config to _startup_config")
config = {DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.SECONDS: self._param_dict.get(Parameter.SAMPLE_INTERVAL)}}
self._startup_config[DriverConfigKey.SCHEDULER][AUTO_SAMPLE_SCHEDULED_JOB] = config
if (not self._scheduler):
self.initialize_scheduler()
def _sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
time_format = "%Y/%m/%d %H:%M:%S"
str_val = get_timestamp_delayed(time_format)
log.debug("Setting instrument clock to '%s'", str_val)
self._do_cmd_resp(Command.SET_CLOCK, str_val, expected_prompt=Prompt.CR_NL)
def _wakeup(self, timeout):
"""There is no wakeup sequence for this instrument"""
pass
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.CLOCK_SYNC, display_name="Synchronize Clock")
self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="Acquire Status")
self._cmd_dict.add(Capability.ACQUIRE_SAMPLE, display_name="Acquire Sample")
self._cmd_dict.add(Capability.FLASH_STATUS, display_name="Flash Status")
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover')
def _build_param_dict(self):
"""
Populate the parameter dictionary with XR-420 parameters.
For each parameter key add value formatting function for set commands.
"""
# The parameter dictionary.
self._param_dict = ProtocolParameterDict()
# Add parameter handlers to parameter dictionary for instrument configuration parameters.
self._param_dict.add(Parameter.CLOCK,
r'(.*)\r\n',
lambda match: match.group(1),
lambda string: str(string),
type=ParameterDictType.STRING,
display_name="clock",
expiration=0,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.SAMPLE_INTERVAL,
r'Not used. This parameter is not parsed from instrument response',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=30,
value=30,
startup_param=True,
display_name="sample_interval",
visibility=ParameterDictVisibility.IMMUTABLE)
def _update_params(self, *args, **kwargs):
"""
Update the parameter dictionary.
"""
log.debug("_update_params:")
# Issue clock command and parse results.
# This is the only parameter and it is always changing so don't bother with the 'change' event
self._do_cmd_resp(Command.GET_CLOCK)
def _build_set_clock_command(self, cmd, val):
"""
Build handler for set clock command (cmd=val followed by newline).
@param cmd the string for setting the clock (this should equal #CLOCK=).
@param val the parameter value to set.
@ retval The set command to be sent to the device.
"""
cmd = '%s%s' % (cmd, val) + NEWLINE
return cmd
def _parse_clock_response(self, response, prompt):
"""
Parse handler for clock command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if clock command misunderstood.
"""
log.debug("_parse_clock_response: response=%s, prompt=%s" % (response, prompt))
if prompt not in [Prompt.CR_NL]:
raise InstrumentProtocolException('CLOCK command not recognized: %s.' % response)
if not self._param_dict.update(response):
raise InstrumentProtocolException('CLOCK command not parsed: %s.' % response)
return
def _parse_fs_response(self, response, prompt):
"""
Parse handler for FS command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if FS command misunderstood.
"""
log.debug("_parse_fs_response: response=%s, prompt=%s" % (response, prompt))
if prompt not in [Prompt.FS]:
raise InstrumentProtocolException('FS command not recognized: %s.' % response)
return response
def _parse_common_response(self, response, prompt):
"""
Parse handler for common commands.
@param response command response string.
@param prompt prompt following command response.
"""
return response
| bsd-2-clause | 8,993,199,238,074,514,000 | 40.475315 | 120 | 0.59747 | false |
ABaldwinHunter/django-clone-classic | django/test/html.py | 220 | 7928 | """
Comparing two html documents.
"""
from __future__ import unicode_literals
import re
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html_parser import HTMLParseError, HTMLParser
WHITESPACE = re.compile('\s+')
def normalize_whitespace(string):
return WHITESPACE.sub(' ', string)
@python_2_unicode_compatible
class Element(object):
def __init__(self, name, attributes):
self.name = name
self.attributes = sorted(attributes)
self.children = []
def append(self, element):
if isinstance(element, six.string_types):
element = force_text(element)
element = normalize_whitespace(element)
if self.children:
if isinstance(self.children[-1], six.string_types):
self.children[-1] += element
self.children[-1] = normalize_whitespace(self.children[-1])
return
elif self.children:
# removing last children if it is only whitespace
# this can result in incorrect dom representations since
# whitespace between inline tags like <span> is significant
if isinstance(self.children[-1], six.string_types):
if self.children[-1].isspace():
self.children.pop()
if element:
self.children.append(element)
def finalize(self):
def rstrip_last_element(children):
if children:
if isinstance(children[-1], six.string_types):
children[-1] = children[-1].rstrip()
if not children[-1]:
children.pop()
children = rstrip_last_element(children)
return children
rstrip_last_element(self.children)
for i, child in enumerate(self.children):
if isinstance(child, six.string_types):
self.children[i] = child.strip()
elif hasattr(child, 'finalize'):
child.finalize()
def __eq__(self, element):
if not hasattr(element, 'name'):
return False
if hasattr(element, 'name') and self.name != element.name:
return False
if len(self.attributes) != len(element.attributes):
return False
if self.attributes != element.attributes:
# attributes without a value is same as attribute with value that
# equals the attributes name:
# <input checked> == <input checked="checked">
for i in range(len(self.attributes)):
attr, value = self.attributes[i]
other_attr, other_value = element.attributes[i]
if value is None:
value = attr
if other_value is None:
other_value = other_attr
if attr != other_attr or value != other_value:
return False
if self.children != element.children:
return False
return True
def __hash__(self):
return hash((self.name,) + tuple(a for a in self.attributes))
def __ne__(self, element):
return not self.__eq__(element)
def _count(self, element, count=True):
if not isinstance(element, six.string_types):
if self == element:
return 1
i = 0
for child in self.children:
# child is text content and element is also text content, then
# make a simple "text" in "text"
if isinstance(child, six.string_types):
if isinstance(element, six.string_types):
if count:
i += child.count(element)
elif element in child:
return 1
else:
i += child._count(element, count=count)
if not count and i:
return i
return i
def __contains__(self, element):
return self._count(element, count=False) > 0
def count(self, element):
return self._count(element, count=True)
def __getitem__(self, key):
return self.children[key]
def __str__(self):
output = '<%s' % self.name
for key, value in self.attributes:
if value:
output += ' %s="%s"' % (key, value)
else:
output += ' %s' % key
if self.children:
output += '>\n'
output += ''.join(six.text_type(c) for c in self.children)
output += '\n</%s>' % self.name
else:
output += ' />'
return output
def __repr__(self):
return six.text_type(self)
@python_2_unicode_compatible
class RootElement(Element):
def __init__(self):
super(RootElement, self).__init__(None, ())
def __str__(self):
return ''.join(six.text_type(c) for c in self.children)
class Parser(HTMLParser):
SELF_CLOSING_TAGS = ('br', 'hr', 'input', 'img', 'meta', 'spacer',
'link', 'frame', 'base', 'col')
def __init__(self):
HTMLParser.__init__(self)
self.root = RootElement()
self.open_tags = []
self.element_positions = {}
def error(self, msg):
raise HTMLParseError(msg, self.getpos())
def format_position(self, position=None, element=None):
if not position and element:
position = self.element_positions[element]
if position is None:
position = self.getpos()
if hasattr(position, 'lineno'):
position = position.lineno, position.offset
return 'Line %d, Column %d' % position
@property
def current(self):
if self.open_tags:
return self.open_tags[-1]
else:
return self.root
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
if tag not in self.SELF_CLOSING_TAGS:
self.handle_endtag(tag)
def handle_starttag(self, tag, attrs):
# Special case handling of 'class' attribute, so that comparisons of DOM
# instances are not sensitive to ordering of classes.
attrs = [
(name, " ".join(sorted(value.split(" "))))
if name == "class"
else (name, value)
for name, value in attrs
]
element = Element(tag, attrs)
self.current.append(element)
if tag not in self.SELF_CLOSING_TAGS:
self.open_tags.append(element)
self.element_positions[element] = self.getpos()
def handle_endtag(self, tag):
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
while element.name != tag:
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
def handle_data(self, data):
self.current.append(data)
def handle_charref(self, name):
self.current.append('&%s;' % name)
def handle_entityref(self, name):
self.current.append('&%s;' % name)
def parse_html(html):
"""
Takes a string that contains *valid* HTML and turns it into a Python object
structure that can be easily compared against other HTML on semantic
equivalence. Syntactical differences like which quotation is used on
arguments will be ignored.
"""
parser = Parser()
parser.feed(html)
parser.close()
document = parser.root
document.finalize()
# Removing ROOT element if it's not necessary
if len(document.children) == 1:
if not isinstance(document.children[0], six.string_types):
document = document.children[0]
return document
| bsd-3-clause | -8,827,906,159,620,779,000 | 32.59322 | 80 | 0.558274 | false |
michael-dev2rights/ansible | lib/ansible/modules/network/nxos/nxos_bgp_af.py | 22 | 29498 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_bgp_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages BGP Address-family configuration.
description:
- Manages BGP Address-family configurations on NX-OS switches.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) removes the whole BGP ASN configuration
- Default, where supported, restores params default value.
options:
asn:
description:
- BGP autonomous system number. Valid values are String,
Integer in ASPLAIN or ASDOT notation.
required: true
vrf:
description:
- Name of the VRF. The name 'default' is a valid VRF representing
the global bgp.
required: true
afi:
description:
- Address Family Identifier.
required: true
choices: ['ipv4','ipv6', 'vpnv4', 'vpnv6', 'l2vpn']
safi:
description:
- Sub Address Family Identifier.
required: true
choices: ['unicast','multicast', 'evpn']
additional_paths_install:
description:
- Install a backup path into the forwarding table and provide
prefix independent convergence (PIC) in case of a PE-CE link
failure.
required: false
choices: ['true','false']
default: null
additional_paths_receive:
description:
- Enables the receive capability of additional paths for all of
the neighbors under this address family for which the capability
has not been disabled.
required: false
choices: ['true','false']
default: null
additional_paths_selection:
description:
- Configures the capability of selecting additional paths for
a prefix. Valid values are a string defining the name of
the route-map.
required: false
default: null
additional_paths_send:
description:
- Enables the send capability of additional paths for all of
the neighbors under this address family for which the capability
has not been disabled.
required: false
choices: ['true','false']
default: null
advertise_l2vpn_evpn:
description:
- Advertise evpn routes.
required: false
choices: ['true','false']
default: null
client_to_client:
description:
- Configure client-to-client route reflection.
required: false
choices: ['true','false']
default: null
dampen_igp_metric:
description:
- Specify dampen value for IGP metric-related changes, in seconds.
Valid values are integer and keyword 'default'.
required: false
default: null
dampening_state:
description:
- Enable/disable route-flap dampening.
required: false
choices: ['true','false']
default: null
dampening_half_time:
description:
- Specify decay half-life in minutes for route-flap dampening.
Valid values are integer and keyword 'default'.
required: false
default: null
dampening_max_suppress_time:
description:
- Specify max suppress time for route-flap dampening stable route.
Valid values are integer and keyword 'default'.
required: false
default: null
dampening_reuse_time:
description:
- Specify route reuse time for route-flap dampening.
Valid values are integer and keyword 'default'.
required: false
dampening_routemap:
description:
- Specify route-map for route-flap dampening. Valid values are a
string defining the name of the route-map.
required: false
default: null
dampening_suppress_time:
description:
- Specify route suppress time for route-flap dampening.
Valid values are integer and keyword 'default'.
required: false
default: null
default_information_originate:
description:
- Default information originate.
required: false
choices: ['true','false']
default: null
default_metric:
description:
- Sets default metrics for routes redistributed into BGP.
Valid values are Integer or keyword 'default'
required: false
default: null
distance_ebgp:
description:
- Sets the administrative distance for eBGP routes.
Valid values are Integer or keyword 'default'.
required: false
default: null
distance_ibgp:
description:
- Sets the administrative distance for iBGP routes.
Valid values are Integer or keyword 'default'.
required: false
default: null
distance_local:
description:
- Sets the administrative distance for local BGP routes.
Valid values are Integer or keyword 'default'.
required: false
default: null
inject_map:
description:
- An array of route-map names which will specify prefixes to
inject. Each array entry must first specify the inject-map name,
secondly an exist-map name, and optionally the copy-attributes
keyword which indicates that attributes should be copied from
the aggregate. For example [['lax_inject_map', 'lax_exist_map'],
['nyc_inject_map', 'nyc_exist_map', 'copy-attributes'],
['fsd_inject_map', 'fsd_exist_map']].
required: false
default: null
maximum_paths:
description:
- Configures the maximum number of equal-cost paths for
load sharing. Valid value is an integer in the range 1-64.
default: null
maximum_paths_ibgp:
description:
- Configures the maximum number of ibgp equal-cost paths for
load sharing. Valid value is an integer in the range 1-64.
required: false
default: null
networks:
description:
- Networks to configure. Valid value is a list of network
prefixes to advertise. The list must be in the form of an array.
Each entry in the array must include a prefix address and an
optional route-map. For example [['10.0.0.0/16', 'routemap_LA'],
['192.168.1.1', 'Chicago'], ['192.168.2.0/24'],
['192.168.3.0/24', 'routemap_NYC']].
required: false
default: null
next_hop_route_map:
description:
- Configure a route-map for valid nexthops. Valid values are a
string defining the name of the route-map.
required: false
default: null
redistribute:
description:
- A list of redistribute directives. Multiple redistribute entries
are allowed. The list must be in the form of a nested array.
the first entry of each array defines the source-protocol to
redistribute from; the second entry defines a route-map name.
A route-map is highly advised but may be optional on some
platforms, in which case it may be omitted from the array list.
For example [['direct', 'rm_direct'], ['lisp', 'rm_lisp']].
required: false
default: null
suppress_inactive:
description:
- Advertises only active routes to peers.
required: false
choices: ['true','false']
default: null
table_map:
description:
- Apply table-map to filter routes downloaded into URIB.
Valid values are a string.
required: false
default: null
table_map_filter:
description:
- Filters routes rejected by the route-map and does not download
them to the RIB.
required: false
choices: ['true','false']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# configure a simple address-family
- nxos_bgp_af:
asn: 65535
vrf: TESTING
afi: ipv4
safi: unicast
advertise_l2vpn_evpn: true
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf TESTING",
"address-family ipv4 unicast", "advertise l2vpn evpn"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
BOOL_PARAMS = [
'additional_paths_install',
'additional_paths_receive',
'additional_paths_send',
'advertise_l2vpn_evpn',
'dampening_state',
'default_information_originate',
'suppress_inactive',
]
PARAM_TO_DEFAULT_KEYMAP = {
'maximum_paths': '1',
'maximum_paths_ibgp': '1',
'client_to_client': True,
'distance_ebgp': '20',
'distance_ibgp': '200',
'distance_local': '220',
'dampen_igp_metric': '600'
}
PARAM_TO_COMMAND_KEYMAP = {
'asn': 'router bgp',
'afi': 'address-family',
'safi': 'address-family',
'additional_paths_install': 'additional-paths install backup',
'additional_paths_receive': 'additional-paths receive',
'additional_paths_selection': 'additional-paths selection route-map',
'additional_paths_send': 'additional-paths send',
'advertise_l2vpn_evpn': 'advertise l2vpn evpn',
'client_to_client': 'client-to-client reflection',
'dampen_igp_metric': 'dampen-igp-metric',
'dampening_state': 'dampening',
'dampening_half_time': 'dampening',
'dampening_max_suppress_time': 'dampening',
'dampening_reuse_time': 'dampening',
'dampening_routemap': 'dampening route-map',
'dampening_suppress_time': 'dampening',
'default_information_originate': 'default-information originate',
'default_metric': 'default-metric',
'distance_ebgp': 'distance',
'distance_ibgp': 'distance',
'distance_local': 'distance',
'inject_map': 'inject-map',
'maximum_paths': 'maximum-paths',
'maximum_paths_ibgp': 'maximum-paths ibgp',
'networks': 'network',
'redistribute': 'redistribute',
'next_hop_route_map': 'nexthop route-map',
'suppress_inactive': 'suppress-inactive',
'table_map': 'table-map',
'table_map_filter': 'table-map-filter',
'vrf': 'vrf'
}
DAMPENING_PARAMS = [
'dampening_half_time',
'dampening_suppress_time',
'dampening_reuse_time',
'dampening_max_suppress_time'
]
def get_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP[arg]
command_val_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
has_command_val = command_val_re.search(config)
if arg == 'inject_map':
inject_re = r'.*inject-map\s(?P<inject_map>\S+)\sexist-map\s(?P<exist_map>\S+)-*'
value = []
match_inject = re.match(inject_re, config, re.DOTALL)
if match_inject:
inject_group = match_inject.groupdict()
inject_map = inject_group['inject_map']
exist_map = inject_group['exist_map']
value.append(inject_map)
value.append(exist_map)
inject_map_command = ('inject-map {0} exist-map {1} '
'copy-attributes'.format(
inject_group['inject_map'],
inject_group['exist_map']))
inject_re = re.compile(r'\s+{0}\s*$'.format(inject_map_command), re.M)
if inject_re.search(config):
value.append('copy_attributes')
elif arg == 'networks':
value = []
for network in command_val_re.findall(config):
value.append(network.split())
elif arg == 'redistribute':
value = []
if has_command_val:
value = has_command_val.group('value').split()
if value:
if len(value) == 3:
value.pop(1)
elif len(value) == 4:
value = ['{0} {1}'.format(value[0], value[1]), value[3]]
elif command == 'distance':
distance_re = r'.*distance\s(?P<d_ebgp>\w+)\s(?P<d_ibgp>\w+)\s(?P<d_local>\w+)'
match_distance = re.match(distance_re, config, re.DOTALL)
value = ''
if match_distance:
distance_group = match_distance.groupdict()
if arg == 'distance_ebgp':
value = distance_group['d_ebgp']
elif arg == 'distance_ibgp':
value = distance_group['d_ibgp']
elif arg == 'distance_local':
value = distance_group['d_local']
elif command.split()[0] == 'dampening':
value = ''
if arg == 'dampen_igp_metric' or arg == 'dampening_routemap':
if command in config:
value = has_command_val.group('value')
else:
dampening_re = r'.*dampening\s(?P<half>\w+)\s(?P<reuse>\w+)\s(?P<suppress>\w+)\s(?P<max_suppress>\w+)'
match_dampening = re.match(dampening_re, config, re.DOTALL)
if match_dampening:
dampening_group = match_dampening.groupdict()
if arg == 'dampening_half_time':
value = dampening_group['half']
elif arg == 'dampening_reuse_time':
value = dampening_group['reuse']
elif arg == 'dampening_suppress_time':
value = dampening_group['suppress']
elif arg == 'dampening_max_suppress_time':
value = dampening_group['max_suppress']
elif arg == 'table_map_filter':
tmf_regex = re.compile(r'\s+table-map.*filter$', re.M)
value = False
if tmf_regex.search(config):
value = True
elif arg == 'table_map':
tm_regex = re.compile(r'(?:table-map\s)(?P<value>\S+)(\sfilter)?$', re.M)
has_tablemap = tm_regex.search(config)
value = ''
if has_tablemap:
value = has_tablemap.group('value')
elif arg == 'client_to_client':
no_command_re = re.compile(r'^\s+no\s{0}\s*$'.format(command), re.M)
value = True
if no_command_re.search(config):
value = False
elif arg in BOOL_PARAMS:
command_re = re.compile(r'^\s+{0}\s*$'.format(command), re.M)
value = False
if command_re.search(config):
value = True
else:
value = ''
if has_command_val:
value = has_command_val.group('value')
return value
def get_existing(module, args, warnings):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
asn_regex = re.compile(r'.*router\sbgp\s(?P<existing_asn>\d+).*', re.DOTALL)
match_asn = asn_regex.match(str(netcfg))
if match_asn:
existing_asn = match_asn.group('existing_asn')
parents = ["router bgp {0}".format(existing_asn)]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('address-family {0} {1}'.format(module.params['afi'], module.params['safi']))
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg not in ['asn', 'afi', 'safi', 'vrf']:
existing[arg] = get_value(arg, config, module)
existing['asn'] = existing_asn
existing['afi'] = module.params['afi']
existing['safi'] = module.params['safi']
existing['vrf'] = module.params['vrf']
else:
warnings.append("The BGP process {0} didn't exist but the task just created it.".format(module.params['asn']))
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = value
return new_dict
def fix_proposed(module, proposed, existing):
commands = list()
command = ''
fixed_proposed = {}
for key, value in proposed.items():
if key in DAMPENING_PARAMS:
if value != 'default':
command = 'dampening {0} {1} {2} {3}'.format(
proposed.get('dampening_half_time'),
proposed.get('dampening_reuse_time'),
proposed.get('dampening_suppress_time'),
proposed.get('dampening_max_suppress_time'))
else:
if existing.get(key):
command = ('no dampening {0} {1} {2} {3}'.format(
existing['dampening_half_time'],
existing['dampening_reuse_time'],
existing['dampening_suppress_time'],
existing['dampening_max_suppress_time']))
if 'default' in command:
command = ''
elif key.startswith('distance'):
command = 'distance {0} {1} {2}'.format(
proposed.get('distance_ebgp'),
proposed.get('distance_ibgp'),
proposed.get('distance_local'))
else:
fixed_proposed[key] = value
if command:
if command not in commands:
commands.append(command)
return fixed_proposed, commands
def default_existing(existing_value, key, value):
commands = []
if key == 'network':
for network in existing_value:
if len(network) == 2:
commands.append('no network {0} route-map {1}'.format(
network[0], network[1]))
elif len(network) == 1:
commands.append('no network {0}'.format(
network[0]))
elif key == 'inject-map':
for maps in existing_value:
if len(maps) == 2:
commands.append('no inject-map {0} exist-map {1}'.format(maps[0], maps[1]))
elif len(maps) == 3:
commands.append('no inject-map {0} exist-map {1} '
'copy-attributes'.format(maps[0], maps[1]))
else:
commands.append('no {0} {1}'.format(key, existing_value))
return commands
def get_network_command(existing, key, value):
commands = []
existing_networks = existing.get('networks', [])
for inet in value:
if not isinstance(inet, list):
inet = [inet]
if inet not in existing_networks:
if len(inet) == 1:
command = '{0} {1}'.format(key, inet[0])
elif len(inet) == 2:
command = '{0} {1} route-map {2}'.format(key, inet[0], inet[1])
commands.append(command)
return commands
def get_inject_map_command(existing, key, value):
commands = []
existing_maps = existing.get('inject_map', [])
for maps in value:
if not isinstance(maps, list):
maps = [maps]
if maps not in existing_maps:
if len(maps) == 2:
command = ('inject-map {0} exist-map {1}'.format(
maps[0], maps[1]))
elif len(maps) == 3:
command = ('inject-map {0} exist-map {1} '
'copy-attributes'.format(maps[0],
maps[1]))
commands.append(command)
return commands
def get_redistribute_command(existing, key, value):
commands = []
for rule in value:
if rule[1] == 'default':
existing_rule = existing.get('redistribute', [])
for each_rule in existing_rule:
if rule[0] in each_rule:
command = 'no {0} {1} route-map {2}'.format(
key, each_rule[0], each_rule[1])
commands.append(command)
else:
command = '{0} {1} route-map {2}'.format(key, rule[0], rule[1])
commands.append(command)
return commands
def get_table_map_command(module, existing, key, value):
commands = []
if key == 'table-map':
if value != 'default':
command = '{0} {1}'.format(key, module.params['table_map'])
if (module.params['table_map_filter'] is not None and
module.params['table_map_filter'] != 'default'):
command += ' filter'
commands.append(command)
else:
if existing.get('table_map'):
command = 'no {0} {1}'.format(key, existing.get('table_map'))
commands.append(command)
return commands
def get_default_table_map_filter(existing):
commands = []
existing_table_map_filter = existing.get('table_map_filter')
if existing_table_map_filter:
existing_table_map = existing.get('table_map')
if existing_table_map:
command = 'table-map {0}'.format(existing_table_map)
commands.append(command)
return commands
def state_present(module, existing, proposed, candidate):
fixed_proposed, commands = fix_proposed(module, proposed, existing)
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, fixed_proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if key == 'address-family':
addr_family_command = "address-family {0} {1}".format(
module.params['afi'], module.params['safi'])
if addr_family_command not in commands:
commands.append(addr_family_command)
elif key.startswith('table-map'):
table_map_commands = get_table_map_command(module, existing, key, value)
if table_map_commands:
commands.extend(table_map_commands)
elif value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if key in PARAM_TO_DEFAULT_KEYMAP:
commands.append('{0} {1}'.format(key, PARAM_TO_DEFAULT_KEYMAP[key]))
elif existing_commands.get(key):
if key == 'table-map-filter':
default_tmf_command = get_default_table_map_filter(existing)
if default_tmf_command:
commands.extend(default_tmf_command)
else:
existing_value = existing_commands.get(key)
default_command = default_existing(existing_value, key, value)
if default_command:
commands.extend(default_command)
else:
if key == 'network':
network_commands = get_network_command(existing, key, value)
if network_commands:
commands.extend(network_commands)
elif key == 'inject-map':
inject_map_commands = get_inject_map_command(existing, key, value)
if inject_map_commands:
commands.extend(inject_map_commands)
elif key == 'redistribute':
redistribute_commands = get_redistribute_command(existing, key, value)
if redistribute_commands:
commands.extend(redistribute_commands)
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ["router bgp {0}".format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
addr_family_command = "address-family {0} {1}".format(module.params['afi'],
module.params['safi'])
parents.append(addr_family_command)
if addr_family_command in commands:
commands.remove(addr_family_command)
candidate.add(commands, parents=parents)
def state_absent(module, candidate):
commands = []
parents = ["router bgp {0}".format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
commands.append('no address-family {0} {1}'.format(
module.params['afi'], module.params['safi']))
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
asn=dict(required=True, type='str'),
vrf=dict(required=False, type='str', default='default'),
safi=dict(required=True, type='str', choices=['unicast', 'multicast', 'evpn']),
afi=dict(required=True, type='str', choices=['ipv4', 'ipv6', 'vpnv4', 'vpnv6', 'l2vpn']),
additional_paths_install=dict(required=False, type='bool'),
additional_paths_receive=dict(required=False, type='bool'),
additional_paths_selection=dict(required=False, type='str'),
additional_paths_send=dict(required=False, type='bool'),
advertise_l2vpn_evpn=dict(required=False, type='bool'),
client_to_client=dict(required=False, type='bool'),
dampen_igp_metric=dict(required=False, type='str'),
dampening_state=dict(required=False, type='bool'),
dampening_half_time=dict(required=False, type='str'),
dampening_max_suppress_time=dict(required=False, type='str'),
dampening_reuse_time=dict(required=False, type='str'),
dampening_routemap=dict(required=False, type='str'),
dampening_suppress_time=dict(required=False, type='str'),
default_information_originate=dict(required=False, type='bool'),
default_metric=dict(required=False, type='str'),
distance_ebgp=dict(required=False, type='str'),
distance_ibgp=dict(required=False, type='str'),
distance_local=dict(required=False, type='str'),
inject_map=dict(required=False, type='list'),
maximum_paths=dict(required=False, type='str'),
maximum_paths_ibgp=dict(required=False, type='str'),
networks=dict(required=False, type='list'),
next_hop_route_map=dict(required=False, type='str'),
redistribute=dict(required=False, type='list'),
suppress_inactive=dict(required=False, type='bool'),
table_map=dict(required=False, type='str'),
table_map_filter=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present', required=False),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=[DAMPENING_PARAMS, ['distance_ibgp', 'distance_ebgp', 'distance_local']],
supports_check_mode=True,
)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
state = module.params['state']
if module.params['dampening_routemap']:
for param in DAMPENING_PARAMS:
if module.params[param]:
module.fail_json(msg='dampening_routemap cannot be used with'
' the {0} param'.format(param))
if module.params['advertise_l2vpn_evpn']:
if module.params['vrf'] == 'default':
module.fail_json(msg='It is not possible to advertise L2VPN '
'EVPN in the default VRF. Please specify '
'another one.', vrf=module.params['vrf'])
if module.params['table_map_filter'] and not module.params['table_map']:
module.fail_json(msg='table_map param is needed when using'
' table_map_filter filter.')
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args, warnings)
if existing.get('asn') and state == 'present':
if existing.get('asn') != module.params['asn']:
module.fail_json(msg='Another BGP ASN already exists.',
proposed_asn=module.params['asn'],
existing_asn=existing.get('asn'))
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
for arg in ['networks', 'inject_map']:
if proposed_args.get(arg):
if proposed_args[arg][0] == 'default':
proposed_args[arg] = 'default'
proposed = {}
for key, value in proposed_args.items():
if key not in ['asn', 'vrf']:
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key, 'default')
if existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, candidate)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['changed'] = True
result['commands'] = candidate
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 4,644,856,589,476,007,000 | 35.238329 | 118 | 0.595905 | false |
piffey/ansible | test/units/modules/cloud/amazon/test_s3_bucket.py | 137 | 5360 | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# (c) 2017 Red Hat Inc.
from ansible.modules.cloud.amazon.s3_bucket import compare_policies
small_policy_one = {
'Version': '2012-10-17',
'Statement': [
{
'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}
]
}
# The same as small_policy_one, except the single resource is in a list and the contents of Statement are jumbled
small_policy_two = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': 's3:PutObjectAcl',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']},
'Resource': ['arn:aws:s3:::test_policy/*'],
'Sid': 'AddCannedAcl2'
}
]
}
larger_policy_one = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Test",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
"arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
]
},
"Action": "s3:PutObjectAcl",
"Resource": "arn:aws:s3:::test_policy/*"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
},
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"
],
"Resource": "arn:aws:s3:::test_policy/*"
}
]
}
# The same as larger_policy_one, except having a list of length 1 and jumbled contents
larger_policy_two = {
"Version": "2012-10-17",
"Statement": [
{
"Principal": {
"AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]
},
"Effect": "Allow",
"Resource": "arn:aws:s3:::test_policy/*",
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"
]
},
{
"Action": "s3:PutObjectAcl",
"Principal": {
"AWS": [
"arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
"arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
]
},
"Sid": "Test",
"Resource": "arn:aws:s3:::test_policy/*",
"Effect": "Allow"
}
]
}
# Different than larger_policy_two: a different principal is given
larger_policy_three = {
"Version": "2012-10-17",
"Statement": [
{
"Principal": {
"AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]
},
"Effect": "Allow",
"Resource": "arn:aws:s3:::test_policy/*",
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"]
},
{
"Action": "s3:PutObjectAcl",
"Principal": {
"AWS": [
"arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
"arn:aws:iam::XXXXXXXXXXXX:user/testuser3"
]
},
"Sid": "Test",
"Resource": "arn:aws:s3:::test_policy/*",
"Effect": "Allow"
}
]
}
def test_compare_small_policies_without_differences():
""" Testing two small policies which are identical except for:
* The contents of the statement are in different orders
* The second policy contains a list of length one whereas in the first it is a string
"""
assert compare_policies(small_policy_one, small_policy_two) is False
def test_compare_large_policies_without_differences():
""" Testing two larger policies which are identical except for:
* The statements are in different orders
* The contents of the statements are also in different orders
* The second contains a list of length one for the Principal whereas in the first it is a string
"""
assert compare_policies(larger_policy_one, larger_policy_two) is False
def test_compare_larger_policies_with_difference():
""" Testing two larger policies which are identical except for:
* one different principal
"""
assert compare_policies(larger_policy_two, larger_policy_three)
def test_compare_smaller_policy_with_larger():
""" Testing two policies of different sizes """
assert compare_policies(larger_policy_one, small_policy_one)
| gpl-3.0 | 5,215,631,560,814,287,000 | 31.682927 | 123 | 0.546828 | false |
lcpt/xc | python_modules/rough_calculations/ng_min_dim_of_abutment_support.py | 1 | 3626 | # -*- coding: utf-8 -*-
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2016, LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "[email protected]"
import sys
def getLg(soilClass):
'''
From a length greater than de distance "lg" the soil mouvement
can bi consideread as completely uncorrelated.
'''
retval= 300
if(soilClass == "A"):
retval= 600
elif(soilClass == "B"):
retval= 500
elif(soilClass == "C"):
retval= 400
elif(soilClass == "D"):
retval= 300
elif(soilClass == "E"):
retval= 500
else:
sys.stderr.write("Unknown soil type: "+soilClass)
return retval
def getUgd(soilClass, quakeZone,bridgeClass):
'''
Returns the design value for soil displacement.
soilClass: A, B, C, D or E.
quakeZone: ZI, ZII, ZIIa, ZIIIb
bridgeClass: COI, COII, COIII
'''
retval= 17e-2
if(soilClass == "A"):
if(quakeZone == "ZI"):
retval= 2e-2
elif(quakeZone == "Z2"):
retval= 4e-2
elif(quakeZone == "Z3a"):
retval= 5e-2
elif(quakeZone == "Z3b"):
retval= 6e-2
else:
sys.stderr.write("Unknown quake zone: "+quakeZone)
elif(soilClass == "B"):
if(quakeZone == "ZI"):
retval= 4e-2
elif(quakeZone == "Z2"):
retval= 6e-2
elif(quakeZone == "Z3a"):
retval= 8e-2
elif(quakeZone == "Z3b"):
retval= 10e-2
else:
sys.stderr.write("Unknown quake zone: "+quakeZone)
elif(soilClass == "C"):
if(quakeZone == "ZI"):
retval= 5e-2
elif(quakeZone == "Z2"):
retval= 7e-2
elif(quakeZone == "Z3a"):
retval= 9e-2
elif(quakeZone == "Z3b"):
retval= 11e-2
else:
sys.stderr.write("Unknown quake zone: "+quakeZone)
elif(soilClass == "D"):
if(quakeZone == "ZI"):
retval= 6e-2
elif(quakeZone == "Z2"):
retval= 11e-2
elif(quakeZone == "Z3a"):
retval= 14e-2
elif(quakeZone == "Z3b"):
retval= 17e-2
else:
sys.stderr.write("Unknown quake zone: "+quakeZone)
elif(soilClass == "E"):
if(quakeZone == "ZI"):
retval= 4e-2
elif(quakeZone == "Z2"):
retval= 7e-2
elif(quakeZone == "Z3a"):
retval= 9e-2
elif(quakeZone == "Z3b"):
retval= 11e-2
else:
sys.stderr.write("Unknown quake zone: "+quakeZone)
else:
sys.stderr.write("Unknown soil type: "+soilClass)
if(bridgeClass == "COII"):
retval*=1.2
elif(bridgeClass == "COIII"):
retval*=1.4
return retval
def getBminPontFlotant(dAbutFixedPoint,soilClass,quakeZone,bridgeClass):
'''
Returns the minimal dimension of abutment support to avoid
the risk of bridge deck falling during a quake. See "Évaluation
parasismique des ponts-routes existants" Office féderal des routes
page 48).
dAbutFixedPoint: Distance between the abutment and the fixed point.
soilClass: A, B, C, D or E.
quakeZone: ZI, ZII, ZIIa, ZIIIb
bridgeClass: COI, COII, COIII
'''
lg= getLg(soilClass)
ugd= getUgd(soilClass, quakeZone,bridgeClass)
return 0.2+min((1.3+2*dAbutFixedPoint/lg),3.3)*ugd
def getBminPontAppuiFixe(l,a,soilClass,quakeZone,bridgeClass):
'''
Returns the minimal dimension of abutment support to avoid
the risk of bridge deck falling during a quake. See "Évaluation
parasismique des ponts-routes existants" Office féderal des routes
page 49).
l: Deck length. (Distance between free and fixed abutments).
a: expansion joint gap
soilClass: A, B, C, D or E.
quakeZone: ZI, ZII, ZIIa, ZIIIb
bridgeClass: COI, COII, COIII
'''
lg= getLg(soilClass)
ugd= getUgd(soilClass, quakeZone,bridgeClass)
return 0.2+a+min((2*l/lg),2)*ugd
| gpl-3.0 | 3,494,999,877,042,624,000 | 26.431818 | 72 | 0.628832 | false |
satra/prov | prov/model/test/testModel.py | 1 | 5770 | '''
Created on Jan 25, 2012
@author: Trung Dong Huynh
'''
import unittest
from prov.model import ProvBundle, ProvRecord, ProvExceptionCannotUnifyAttribute
import logging
import json
import examples
import os
logger = logging.getLogger(__name__)
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testAllExamples(self):
num_graphs = len(examples.tests)
logger.info('Testing %d provenance graphs' % num_graphs)
counter = 0
for name, graph in examples.tests:
counter += 1
logger.info('%d. Testing the %s example' % (counter, name))
g1 = graph()
logger.debug('Original graph in PROV-N\n%s' % g1.get_provn())
json_str = g1.get_provjson(indent=4)
logger.debug('Original graph in PROV-JSON\n%s' % json_str)
g2 = ProvBundle.from_provjson(json_str)
logger.debug('Graph decoded from PROV-JSON\n%s' % g2.get_provn())
self.assertEqual(g1, g2, 'Round-trip JSON encoding/decoding failed: %s.' % name)
class TestLoadingProvToolboxJSON(unittest.TestCase):
def testLoadAllJSON(self):
json_path = os.path.dirname(os.path.abspath(__file__)) + '/json/'
filenames = os.listdir(json_path)
fails = []
for filename in filenames:
if filename.endswith('.json'):
with open(json_path + filename) as json_file:
try:
g1 = json.load(json_file, cls=ProvBundle.JSONDecoder)
json_str = g1.get_provjson(indent=4)
g2 = ProvBundle.from_provjson(json_str)
self.assertEqual(g1, g2, 'Round-trip JSON encoding/decoding failed: %s.' % filename)
except:
fails.append(filename)
self.assertFalse(fails, 'Failed to load %d JSON files (%s)' % (len(fails), ', '.join(fails)))
# Code for debugging the failed tests
# for filename in fails:
# os.rename(json_path + filename, json_path + filename + '-fail')
# with open(json_path + filename) as json_file:
# json.load(json_file, cls=ProvBundle.JSONDecoder)
class TestFlattening(unittest.TestCase):
def test1(self):
target = ProvBundle()
target.activity('ex:correct', '2012-03-31T09:21:00', '2012-04-01T15:21:00')
result = ProvBundle()
result.activity('ex:correct', '2012-03-31T09:21:00')
result_inner = ProvBundle(identifier="ex:bundle1")
result_inner.activity('ex:correct', None, '2012-04-01T15:21:00')
result.add_bundle(result_inner)
self.assertEqual(result.get_flattened(), target)
def test2(self):
target = ProvBundle()
target.activity('ex:compose', other_attributes=(('prov:role', "ex:dataToCompose1"), ('prov:role', "ex:dataToCompose2")))
result = ProvBundle()
result.activity('ex:compose', other_attributes={'prov:role': "ex:dataToCompose1"})
result_inner = ProvBundle(identifier="ex:bundle1")
result_inner.activity('ex:compose', other_attributes={'prov:role': "ex:dataToCompose2"})
result.add_bundle(result_inner)
self.assertEqual(result.get_flattened(), target)
def test3(self):
target = ProvBundle()
target.activity('ex:compose', other_attributes=(('prov:role', "ex:dataToCompose1"), ('prov:role', "ex:dataToCompose2")))
result = ProvBundle()
result.activity('ex:compose', other_attributes={'prov:role': "ex:dataToCompose1"})
result_inner = ProvBundle(identifier="ex:bundle1")
result_inner.activity('ex:compose', other_attributes=(('prov:role', "ex:dataToCompose1"), ('prov:role', "ex:dataToCompose2")))
result.add_bundle(result_inner)
self.assertEqual(result.get_flattened(), target)
def test_references_in_flattened_documents(self):
bundle = examples.bundles1()
flattened = bundle.get_flattened()
records = set(flattened._records)
for record in records:
for attr_value in (record._attributes or {}).values():
if attr_value and isinstance(attr_value, ProvRecord):
self.assertIn(attr_value, records, 'Document does not contain the record %s with id %i (related to %s)' % (attr_value, id(attr_value), record))
def test_inferred_retyping_in_flattened_documents(self):
g = ProvBundle()
g.add_namespace("ex", "http://www.example.com/")
g.wasGeneratedBy('ex:Bob', time='2012-05-25T11:15:00')
b1 = g.bundle('ex:bundle')
b1.agent('ex:Bob')
h = ProvBundle()
h.add_namespace("ex", "http://www.example.com/")
h.agent('ex:Bob')
h.wasGeneratedBy('ex:Bob', time='2012-05-25T11:15:00')
self.assertEqual(g.get_flattened(), h)
def test_non_unifiable_document(self):
g = ProvBundle()
g.add_namespace("ex", "http://www.example.com/")
g.activity('ex:compose', other_attributes={'prov:role': "ex:dataToCompose1"})
g.used('ex:compose', 'ex:testEntity')
with self.assertRaises(ProvExceptionCannotUnifyAttribute):
g.activity('ex:testEntity')
h = g.bundle('ex:bundle')
h.add_namespace("ex", "http://www.example.com/")
h.entity('ex:compose', other_attributes={'prov:label': "impossible!!!"})
with self.assertRaises(ProvExceptionCannotUnifyAttribute):
g.get_flattened()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main()
| mit | 8,338,226,368,946,939,000 | 39.510791 | 163 | 0.597054 | false |
ky822/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause | -6,674,437,607,021,148,000 | 26.547945 | 76 | 0.668324 | false |
kickstandproject/ripcord | ripcord/cmd/manage.py | 1 | 2080 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CLI interface for ripcord management.
"""
import logging
from oslo.config import cfg
from ripcord.common import config
from ripcord.db import migration as db_migration
from ripcord.openstack.common import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
def do_db_version():
"""Print database's current migration level."""
print(db_migration.db_version())
def do_db_sync():
"""Place a database under migration control and upgrade,
creating first if necessary.
"""
db_migration.db_sync(CONF.command.version)
def add_command_parsers(subparsers):
parser = subparsers.add_parser('db-version')
parser.set_defaults(func=do_db_version)
parser = subparsers.add_parser('db-sync')
parser.set_defaults(func=do_db_sync)
parser.add_argument('version', nargs='?')
parser.add_argument('current_version', nargs='?')
parser.add_argument(
'-g', '--granularity', default='days',
choices=['days', 'hours', 'minutes', 'seconds'],
help='Granularity to use for age argument, defaults to days.')
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Available commands',
handler=add_command_parsers)
def main():
CONF.register_cli_opt(command_opt)
config.parse_args()
log.setup('ripcord')
CONF.log_opt_values(LOG, logging.INFO)
CONF.command.func()
| apache-2.0 | 5,014,066,116,016,644,000 | 28.295775 | 74 | 0.677404 | false |
jazcollins/models | im2txt/im2txt/inference_utils/caption_generator_test.py | 33 | 5787 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for CaptionGenerator."""
import math
import numpy as np
import tensorflow as tf
from im2txt.inference_utils import caption_generator
class FakeVocab(object):
"""Fake Vocabulary for testing purposes."""
def __init__(self):
self.start_id = 0 # Word id denoting sentence start.
self.end_id = 1 # Word id denoting sentence end.
class FakeModel(object):
"""Fake model for testing purposes."""
def __init__(self):
# Number of words in the vocab.
self._vocab_size = 12
# Dimensionality of the nominal model state.
self._state_size = 1
# Map of previous word to the probability distribution of the next word.
self._probabilities = {
0: {1: 0.1,
2: 0.2,
3: 0.3,
4: 0.4},
2: {5: 0.1,
6: 0.9},
3: {1: 0.1,
7: 0.4,
8: 0.5},
4: {1: 0.3,
9: 0.3,
10: 0.4},
5: {1: 1.0},
6: {1: 1.0},
7: {1: 1.0},
8: {1: 1.0},
9: {1: 0.5,
11: 0.5},
10: {1: 1.0},
11: {1: 1.0},
}
# pylint: disable=unused-argument
def feed_image(self, sess, encoded_image):
# Return a nominal model state.
return np.zeros([1, self._state_size])
def inference_step(self, sess, input_feed, state_feed):
# Compute the matrix of softmax distributions for the next batch of words.
batch_size = input_feed.shape[0]
softmax_output = np.zeros([batch_size, self._vocab_size])
for batch_index, word_id in enumerate(input_feed):
for next_word, probability in self._probabilities[word_id].items():
softmax_output[batch_index, next_word] = probability
# Nominal state and metadata.
new_state = np.zeros([batch_size, self._state_size])
metadata = None
return softmax_output, new_state, metadata
# pylint: enable=unused-argument
class CaptionGeneratorTest(tf.test.TestCase):
def _assertExpectedCaptions(self,
expected_captions,
beam_size=3,
max_caption_length=20,
length_normalization_factor=0):
"""Tests that beam search generates the expected captions.
Args:
expected_captions: A sequence of pairs (sentence, probability), where
sentence is a list of integer ids and probability is a float in [0, 1].
beam_size: Parameter passed to beam_search().
max_caption_length: Parameter passed to beam_search().
length_normalization_factor: Parameter passed to beam_search().
"""
expected_sentences = [c[0] for c in expected_captions]
expected_probabilities = [c[1] for c in expected_captions]
# Generate captions.
generator = caption_generator.CaptionGenerator(
model=FakeModel(),
vocab=FakeVocab(),
beam_size=beam_size,
max_caption_length=max_caption_length,
length_normalization_factor=length_normalization_factor)
actual_captions = generator.beam_search(sess=None, encoded_image=None)
actual_sentences = [c.sentence for c in actual_captions]
actual_probabilities = [math.exp(c.logprob) for c in actual_captions]
self.assertEqual(expected_sentences, actual_sentences)
self.assertAllClose(expected_probabilities, actual_probabilities)
def testBeamSize(self):
# Beam size = 1.
expected = [([0, 4, 10, 1], 0.16)]
self._assertExpectedCaptions(expected, beam_size=1)
# Beam size = 2.
expected = [([0, 4, 10, 1], 0.16), ([0, 3, 8, 1], 0.15)]
self._assertExpectedCaptions(expected, beam_size=2)
# Beam size = 3.
expected = [
([0, 2, 6, 1], 0.18), ([0, 4, 10, 1], 0.16), ([0, 3, 8, 1], 0.15)
]
self._assertExpectedCaptions(expected, beam_size=3)
def testMaxLength(self):
# Max length = 1.
expected = [([0], 1.0)]
self._assertExpectedCaptions(expected, max_caption_length=1)
# Max length = 2.
# There are no complete sentences, so partial sentences are returned.
expected = [([0, 4], 0.4), ([0, 3], 0.3), ([0, 2], 0.2)]
self._assertExpectedCaptions(expected, max_caption_length=2)
# Max length = 3.
# There is at least one complete sentence, so only complete sentences are
# returned.
expected = [([0, 4, 1], 0.12), ([0, 3, 1], 0.03)]
self._assertExpectedCaptions(expected, max_caption_length=3)
# Max length = 4.
expected = [
([0, 2, 6, 1], 0.18), ([0, 4, 10, 1], 0.16), ([0, 3, 8, 1], 0.15)
]
self._assertExpectedCaptions(expected, max_caption_length=4)
def testLengthNormalization(self):
# Length normalization factor = 3.
# The longest caption is returned first, despite having low probability,
# because it has the highest log(probability)/length**3.
expected = [
([0, 4, 9, 11, 1], 0.06),
([0, 2, 6, 1], 0.18),
([0, 4, 10, 1], 0.16),
([0, 3, 8, 1], 0.15),
]
self._assertExpectedCaptions(
expected, beam_size=4, length_normalization_factor=3)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 7,406,330,245,788,394,000 | 31.511236 | 80 | 0.607569 | false |
hsiegel/postsai-commitstop | permissions/configDb.py | 1 | 3248 | # The MIT License (MIT)
# Copyright (c) 2016-2017 HIS e. G.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from backend.db import PostsaiDB
from response import ret200
import config
def fetchLatestConfig():
""" returns the currently active configuration """
rows = fetchConfigs(1)
if len(rows) < 1:
return "- .* .* .* .* Cannot fetch config from database"
latestConfig = rows[0]
# return mock()
return latestConfig[0]
def fetchConfigs(maximum):
""" returns the $maximum configurations that were recently active """
db = PostsaiDB(vars(config))
db.connect()
m = max(0, int(maximum))
sql = "SELECT configtext, username, changecomment, changetime FROM repository_status ORDER BY changetime DESC LIMIT %s"
rows = db.query(sql, [m])
db.disconnect()
return rows
def writeConfigToDB(data):
""" stores a configuration in the database and makes it the active configuration """
db = PostsaiDB(vars(config))
db.connect()
sql = "INSERT INTO repository_status (`configtext`, `username`, `changecomment`, `changetime`) VALUES (%s, %s, %s, NOW());"
db.query(sql, data, cursor_type=None)
db.disconnect()
ret200("stored")
def mock():
""" mock """
return """\
# ---------------------------------------------------------------------------------------------------
# Repository Branch Benutzer Gruppe Meldung
# Folgende Repos sollen nicht vom Commit-Stop betroffen sein, obwohl sie dem H1-Namensschema entsprechen
+ cs.sys.externalapps.browser .* .* .* bla blubb oink honk
# Temporaere Ausnahme fuer Benutzer abc auf Repository webapps Version 2017.06
+ webapps VERSION_2017_06 abc .*
# Commits nach 2016.06 auf HISinOne-Repositories verbieten
- cs.*|cm.*|rt.*|rm.*|webapps VERSION_2017_06 .* .* |<| Geplanter Commit-Stop bis zum 10.11.2016
# Wenn bisher kein Regel gegriffen hat, Zugriff erlauben (Die normaler Zugriffsrechte wurden bereits im Vorfeld geprueft)
+ .* .* .* .*
"""
| mit | -1,189,038,373,649,532,700 | 40.641026 | 133 | 0.640702 | false |
NetDBNCKU/GAE-Conference-Web-App | django/contrib/auth/tests/management.py | 83 | 2529 | from StringIO import StringIO
from django.contrib.auth import models, management
from django.contrib.auth.management.commands import changepassword
from django.test import TestCase
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self._getpass_getuser = management.get_system_username
def tearDown(self):
management.get_system_username = self._getpass_getuser
def test_simple(self):
management.get_system_username = lambda: u'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
models.User.objects.create(username='joe')
management.get_system_username = lambda: u'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: u'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create_user(username='joe', password='qwerty')
self.stdout = StringIO()
self.stderr = StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
def test_that_changepassword_command_changes_joes_password(self):
" Executing the changepassword management command should change joe's password "
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute("joe", stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEquals(command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'")
self.assertTrue(models.User.objects.get(username="joe").check_password("not qwerty"))
def test_that_max_tries_exits_1(self):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times. This should be caught by execute() and
converted to a SystemExit
"""
command = changepassword.Command()
command._get_pass = lambda *args: args or 'foo'
self.assertRaises(
SystemExit,
command.execute,
"joe",
stdout=self.stdout,
stderr=self.stderr
)
| bsd-3-clause | 7,477,188,254,127,748,000 | 34.619718 | 123 | 0.66469 | false |
rsmoorthy/docker | tally/typekeys.py | 1 | 2917 | # Type keys and specify shift key up/down
import subprocess
import sys
import argparse
import time
import os
class TypeKeys:
def __init__(self, *args, **kwargs):
self.shift = False
self.name = 'Tally.ERP 9'
self.window = 0
if 'WID' in os.environ:
self.window = os.environ['WID']
if 'WINDOWID' in os.environ:
self.window = os.environ['WINDOWID']
self.chars = {}
for x in range(ord('A'), ord('Z')+1):
self.chars[chr(x)] = True
for x in range(ord('a'), ord('z')+1):
self.chars[chr(x)] = False
for x in [' ', ',', '.', '/', ';', "'", '[', ']', '`', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '-', '=', '\\']:
self.chars[x] = False
for x in ['<', '>', '?', ':', '"', '{', '}', '~', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '+', '|']:
self.chars[x] = True
self.keys = ["BackSpace", "Escape", "Return", "Down", "Up", "Left", "Right"]
def init(self):
if not self.window:
self.window = self.runxdo(["xdotool", "search", "--name", "%s" % (self.name)])
self.stop_shift()
def runxdo(self, cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return out
def start_shift(self):
if self.shift == True:
return
self.runxdo(["xdotool", "keydown", "--window", "%s" % (self.window), "Shift"])
self.shift = True
def stop_shift(self):
if self.shift == False:
return
self.runxdo(["xdotool", "keyup", "--window", "%s" % (self.window), "Shift"])
self.shift = False
def type(self, str):
if str in self.keys:
self.runxdo(["xdotool", "key", "--delay", "%s" % (self.delay), "--window", "%s" % (self.window), "%s" % (str)])
return
for x in list(str):
if self.chars[x]:
self.start_shift()
else:
self.stop_shift()
self.runxdo(["xdotool", "type", "--delay", "%s" % (self.delay), "--window", "%s" % (self.window), "%s" % (x)])
self.stop_shift()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("string", help="string to type")
parser.add_argument("--ender", help="Key to press at the end", default=None)
parser.add_argument("--delay", help="delay between characters", default=1)
parser.add_argument("--window", help="window id")
parser.add_argument("--sleep", type=float, help="sleep time after commands", default=0.1)
args = parser.parse_args()
tk = TypeKeys()
if args.delay:
tk.delay = args.delay
if args.window:
tk.window = args.window
tk.init()
tk.type(args.string)
if(args.ender):
tk.type(args.ender)
time.sleep(args.sleep)
| mit | -6,232,246,684,279,853,000 | 34.573171 | 129 | 0.502571 | false |
yangdw/repo.python | src/annotation/Firefly/firefly/dbentrust/util.py | 8 | 6754 | #coding:utf8
'''
Created on 2013-5-8
@author: lan (www.9miao.com)
'''
from dbpool import dbpool
from MySQLdb.cursors import DictCursor
from numbers import Number
from twisted.python import log
def forEachPlusInsertProps(tablename,props):
assert type(props) == dict
pkeysstr = str(tuple(props.keys())).replace('\'','`')
pvaluesstr = ["%s,"%val if isinstance(val,Number) else
"'%s',"%str(val).replace("'", "\\'") for val in props.values()]
pvaluesstr = ''.join(pvaluesstr)[:-1]
sqlstr = """INSERT INTO `%s` %s values (%s);"""%(tablename,pkeysstr,pvaluesstr)
return sqlstr
def FormatCondition(props):
"""生成查询条件字符串
"""
items = props.items()
itemstrlist = []
for _item in items:
if isinstance(_item[1],Number):
sqlstr = " `%s`=%s AND"%_item
else:
sqlstr = " `%s`='%s' AND "%(_item[0],str(_item[1]).replace("'", "\\'"))
itemstrlist.append(sqlstr)
sqlstr = ''.join(itemstrlist)
return sqlstr[:-4]
def FormatUpdateStr(props):
"""生成更新语句
"""
items = props.items()
itemstrlist = []
for _item in items:
if isinstance(_item[1],Number):
sqlstr = " `%s`=%s,"%_item
else:
sqlstr = " `%s`='%s',"%(_item[0],str(_item[1]).replace("'", "\\'"))
itemstrlist.append(sqlstr)
sqlstr = ''.join(itemstrlist)
return sqlstr[:-1]
def forEachUpdateProps(tablename,props,prere):
'''遍历所要修改的属性,以生成sql语句'''
assert type(props) == dict
pro = FormatUpdateStr(props)
pre = FormatCondition(prere)
sqlstr = """UPDATE `%s` SET %s WHERE %s;"""%(tablename,pro,pre)
return sqlstr
def EachQueryProps(props):
'''遍历字段列表生成sql语句
'''
sqlstr = ""
if props == '*':
return '*'
elif type(props) == type([0]):
for prop in props:
sqlstr = sqlstr + prop +','
sqlstr = sqlstr[:-1]
return sqlstr
else:
raise Exception('props to query must be dict')
return
def forEachQueryProps(sqlstr, props):
'''遍历所要查询属性,以生成sql语句'''
if props == '*':
sqlstr += ' *'
elif type(props) == type([0]):
i = 0
for prop in props:
if(i == 0):
sqlstr += ' ' + prop
else:
sqlstr += ', ' + prop
i += 1
else:
raise Exception('props to query must be list')
return
return sqlstr
def GetTableIncrValue(tablename):
"""
"""
database = dbpool.config.get('db')
sql = """SELECT AUTO_INCREMENT FROM information_schema.`TABLES` \
WHERE TABLE_SCHEMA='%s' AND TABLE_NAME='%s';"""%(database,tablename)
conn = dbpool.connection()
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchone()
cursor.close()
conn.close()
if result:
return result[0]
return result
def ReadDataFromDB(tablename):
"""
"""
sql = """select * from %s"""%tablename
conn = dbpool.connection()
cursor = conn.cursor(cursorclass = DictCursor)
cursor.execute(sql)
result=cursor.fetchall()
cursor.close()
conn.close()
return result
def DeleteFromDB(tablename,props):
'''从数据库中删除
'''
prers = FormatCondition(props)
sql = """DELETE FROM %s WHERE %s ;"""%(tablename,prers)
conn = dbpool.connection()
cursor = conn.cursor()
count = 0
try:
count = cursor.execute(sql)
conn.commit()
except Exception,e:
log.err(e)
log.err(sql)
cursor.close()
conn.close()
return bool(count)
def InsertIntoDB(tablename,data):
"""写入数据库
"""
sql = forEachPlusInsertProps(tablename,data)
conn = dbpool.connection()
cursor = conn.cursor()
count = 0
try:
count = cursor.execute(sql)
conn.commit()
except Exception,e:
log.err(e)
log.err(sql)
cursor.close()
conn.close()
return bool(count)
def UpdateWithDict(tablename,props,prere):
"""更新记录
"""
sql = forEachUpdateProps(tablename, props, prere)
conn = dbpool.connection()
cursor = conn.cursor()
count = 0
try:
count = cursor.execute(sql)
conn.commit()
except Exception,e:
log.err(e)
log.err(sql)
cursor.close()
conn.close()
if(count >= 1):
return True
return False
def getAllPkByFkInDB(tablename,pkname,props):
"""根据所有的外键获取主键ID
"""
props = FormatCondition(props)
sql = """Select `%s` from `%s` where %s"""%(pkname,tablename,props)
conn = dbpool.connection()
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
conn.close()
return [key[0] for key in result]
def GetOneRecordInfo(tablename,props):
'''获取单条数据的信息
'''
props = FormatCondition(props)
sql = """Select * from `%s` where %s"""%(tablename,props)
conn = dbpool.connection()
cursor = conn.cursor(cursorclass = DictCursor)
cursor.execute(sql)
result = cursor.fetchone()
cursor.close()
conn.close()
return result
def GetRecordList(tablename,pkname,pklist):
"""
"""
pkliststr = ""
for pkid in pklist:
pkliststr+="%s,"%pkid
pkliststr = "(%s)"%pkliststr[:-1]
sql = """SELECT * FROM `%s` WHERE `%s` IN %s;"""%(tablename,pkname,pkliststr)
conn = dbpool.connection()
cursor = conn.cursor(cursorclass = DictCursor)
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
conn.close()
return result
def DBTest():
sql = """SELECT * FROM tb_item WHERE characterId=1000001;"""
conn = dbpool.connection()
cursor = conn.cursor(cursorclass = DictCursor)
cursor.execute(sql)
result=cursor.fetchall()
cursor.close()
conn.close()
return result
def getallkeys(key,mem):
itemsinfo = mem.get_stats('items')
itemindex = []
for items in itemsinfo:
itemindex += [ _key.split(':')[1] for _key in items[1].keys()]
s = set(itemindex)
itemss = [mem.get_stats('cachedump %s 0'%i) for i in s]
allkeys = set([])
for item in itemss:
for _item in item:
nowlist = set([])
for _key in _item[1].keys():
try:
keysplit = _key.split(':')
pk = keysplit[2]
except:
continue
if _key.startswith(key) and not pk.startswith('_'):
nowlist.add(pk)
allkeys = allkeys.union(nowlist)
return allkeys
def getAllPkByFkInMEM(key,fk,mem):
pass
| mit | -1,960,004,071,857,271,600 | 25.401606 | 83 | 0.570277 | false |
LinuxChristian/home-assistant | homeassistant/components/climate/eq3btsmart.py | 10 | 4870 | """
Support for eQ-3 Bluetooth Smart thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.eq3btsmart/
"""
import logging
import voluptuous as vol
from homeassistant.components.climate import (
ClimateDevice, PLATFORM_SCHEMA, PRECISION_HALVES,
STATE_AUTO, STATE_ON, STATE_OFF,
)
from homeassistant.const import (
CONF_MAC, TEMP_CELSIUS, CONF_DEVICES, ATTR_TEMPERATURE)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-eq3bt==0.1.5']
_LOGGER = logging.getLogger(__name__)
STATE_BOOST = 'boost'
STATE_AWAY = 'away'
STATE_MANUAL = 'manual'
ATTR_STATE_WINDOW_OPEN = 'window_open'
ATTR_STATE_VALVE = 'valve'
ATTR_STATE_LOCKED = 'is_locked'
ATTR_STATE_LOW_BAT = 'low_battery'
ATTR_STATE_AWAY_END = 'away_end'
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_MAC): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICES):
vol.Schema({cv.string: DEVICE_SCHEMA}),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the eQ-3 BLE thermostats."""
devices = []
for name, device_cfg in config[CONF_DEVICES].items():
mac = device_cfg[CONF_MAC]
devices.append(EQ3BTSmartThermostat(mac, name))
add_devices(devices)
# pylint: disable=import-error
class EQ3BTSmartThermostat(ClimateDevice):
"""Representation of a eQ-3 Bluetooth Smart thermostat."""
def __init__(self, _mac, _name):
"""Initialize the thermostat."""
# we want to avoid name clash with this module..
import eq3bt as eq3
self.modes = {eq3.Mode.Open: STATE_ON,
eq3.Mode.Closed: STATE_OFF,
eq3.Mode.Auto: STATE_AUTO,
eq3.Mode.Manual: STATE_MANUAL,
eq3.Mode.Boost: STATE_BOOST,
eq3.Mode.Away: STATE_AWAY}
self.reverse_modes = {v: k for k, v in self.modes.items()}
self._name = _name
self._thermostat = eq3.Thermostat(_mac)
@property
def available(self) -> bool:
"""Return if thermostat is available."""
return self.current_operation is not None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
@property
def precision(self):
"""Return eq3bt's precision 0.5."""
return PRECISION_HALVES
@property
def current_temperature(self):
"""Can not report temperature, so return target_temperature."""
return self.target_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._thermostat.target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._thermostat.target_temperature = temperature
@property
def current_operation(self):
"""Return the current operation mode."""
if self._thermostat.mode < 0:
return None
return self.modes[self._thermostat.mode]
@property
def operation_list(self):
"""Return the list of available operation modes."""
return [x for x in self.modes.values()]
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
self._thermostat.mode = self.reverse_modes[operation_mode]
def turn_away_mode_off(self):
"""Away mode off turns to AUTO mode."""
self.set_operation_mode(STATE_AUTO)
def turn_away_mode_on(self):
"""Set away mode on."""
self.set_operation_mode(STATE_AWAY)
@property
def is_away_mode_on(self):
"""Return if we are away."""
return self.current_operation == STATE_AWAY
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._thermostat.min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._thermostat.max_temp
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
dev_specific = {
ATTR_STATE_LOCKED: self._thermostat.locked,
ATTR_STATE_LOW_BAT: self._thermostat.low_battery,
ATTR_STATE_VALVE: self._thermostat.valve_state,
ATTR_STATE_WINDOW_OPEN: self._thermostat.window_open,
ATTR_STATE_AWAY_END: self._thermostat.away_end,
}
return dev_specific
def update(self):
"""Update the data from the thermostat."""
self._thermostat.update()
| apache-2.0 | 7,478,260,268,917,543,000 | 28.161677 | 74 | 0.631211 | false |
dronly/python | shiyanlou/markup/markup.py | 1 | 2132 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys,re
from handlers import *
from util import *
from rules import *
import logging
logging.basicConfig(level=logging.INFO)
class Parser:
"""
解析器父类
"""
def __init__(self, handler):
self.handler = handler # 处理程序对象
self.rules = [] # 判断文本种类规则类列表
self.filters = [] # 判断 url Email 等正则函数列表 re.sub
def addRule(self, rule):
"""
添加规则
"""
self.rules.append(rule)
def addFilter(self, pattern, name):
"""
添加过滤器
"""
def filter(block, handler):
return re.sub(pattern, handler.sub(name), block)
self.filters.append(filter)
def parse(self, file):
"""
解析
"""
#print(file) ---> <_io.TextIOWrapper name='<stdin>' mode='r' encoding='UTF-8'>
self.handler.start('document')
for block in blocks(file): # 循环文本块
for filter in self.filters: #
block = filter(block, self.handler)
for rule in self.rules:
if rule.condition(block):
last = rule.action(block, self.handler)
if last:
break
self.handler.end('document')
class BasicTextParser(Parser):
"""
纯文本解析器
"""
def __init__(self, handler):
Parser.__init__(self, handler)
self.addRule(ListRule())
self.addRule(ListItemRule())
self.addRule(TitleRule())
self.addRule(HeadingRule())
self.addRule(ParagraphRule())
self.addFilter(r'\*(.+?)\*', 'emphasis') # 重点内容,两个 * 号之间的文本
self.addFilter(r'(http://[\.a-zA-Z/]+)', 'url') # 提取url地址正则。
self.addFilter(r'([\.a-zA-Z]+@[\.a-zA-Z]+[a-zA-Z]+)', 'mail') # 提取emali地址正则
"""
运行程序
"""
handler = HTMLRenderer() # 初始化处理程序,
parser = BasicTextParser(handler) # 初始化文本解析器
parser.parse(sys.stdin) # 执行解析方法
| apache-2.0 | 7,399,155,772,781,721,000 | 25.694444 | 87 | 0.542144 | false |
xouillet/sigal | tests/test_zip.py | 1 | 1645 | # -*- coding:utf-8 -*-
import os
import glob
import zipfile
from sigal.gallery import Gallery
from sigal.settings import read_settings
CURRENT_DIR = os.path.dirname(__file__)
SAMPLE_DIR = os.path.join(CURRENT_DIR, 'sample')
SAMPLE_SOURCE = os.path.join(SAMPLE_DIR, 'pictures', 'dir1')
def make_gallery(**kwargs):
default_conf = os.path.join(SAMPLE_DIR, 'sigal.conf.py')
settings = read_settings(default_conf)
settings['source'] = SAMPLE_SOURCE
settings.update(kwargs)
return Gallery(settings, ncpu=1)
def test_zipped_correctly(tmpdir):
outpath = str(tmpdir)
gallery = make_gallery(destination=outpath,
zip_gallery='archive.zip')
gallery.build()
zipped1 = glob.glob(os.path.join(outpath, 'test1', '*.zip'))
assert len(zipped1) == 1
assert os.path.basename(zipped1[0]) == 'archive.zip'
zip_file = zipfile.ZipFile(zipped1[0], 'r')
expected = ('11.jpg', 'archlinux-kiss-1024x640.png',
'flickr_jerquiaga_2394751088_cc-by-nc.jpg',
'50a1d0bc-763d-457e-b634-c87f16a64270.gif')
for filename in zip_file.namelist():
assert filename in expected
zip_file.close()
zipped2 = glob.glob(os.path.join(outpath, 'test2', '*.zip'))
assert len(zipped2) == 1
assert os.path.basename(zipped2[0]) == 'archive.zip'
def test_no_archive(tmpdir):
outpath = str(tmpdir)
gallery = make_gallery(destination=outpath,
zip_gallery=False)
gallery.build()
assert not glob.glob(os.path.join(outpath, 'test1', '*.zip'))
assert not glob.glob(os.path.join(outpath, 'test2', '*.zip'))
| mit | -4,184,395,364,319,796,000 | 28.909091 | 65 | 0.644985 | false |
dulems/hue | desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_checksums.py | 246 | 1098 | import unittest
from django.utils import checksums
class TestUtilsChecksums(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_luhn(self):
f = checksums.luhn
items = (
(4111111111111111, True), ('4111111111111111', True),
(4222222222222, True), (378734493671000, True),
(5424000000000015, True), (5555555555554444, True),
(1008, True), ('0000001008', True), ('000000001008', True),
(4012888888881881, True), (1234567890123456789012345678909, True),
(4111111111211111, False), (42222222222224, False),
(100, False), ('100', False), ('0000100', False),
('abc', False), (None, False), (object(), False),
)
for value, output in items:
self.check_output(f, value, output)
| apache-2.0 | -3,065,785,802,145,620,500 | 36.862069 | 78 | 0.592896 | false |
mbiciunas/nix | test/cli_config/tag/test_tag_delete.py | 1 | 1334 | import pytest
from cli_config.tag import tag
from utility.nix_error import NixError
def test_tag_delete_no_tag(capsys):
with pytest.raises(SystemExit) as _excinfo:
tag.tag("nixconfig", ["delete"])
_out, _err = capsys.readouterr()
assert "2" in str(_excinfo.value), "Exception doesn't contain expected string"
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert "the following arguments are required: tag" in _err, "StdErr doesn't contain expected string"
def test_tag_delete_invalid_tag(capsys):
with pytest.raises(NixError) as _excinfo:
tag.tag("nixconfig", ["delete", "badtag"])
_out, _err = capsys.readouterr()
assert "Unknown tag: badtag" in str(_excinfo.value)
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
def test_tag_delete_in_use_tag(capsys):
with pytest.raises(NixError) as _excinfo:
tag.tag("nixconfig", ["delete", "tag1"])
_out, _err = capsys.readouterr()
assert "Unable to delete tag: tag1 while attached to scripts" in str(_excinfo.value)
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
| gpl-3.0 | 4,161,962,923,103,819,300 | 35.054054 | 104 | 0.668666 | false |
40223101/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/pprint.py | 634 | 12757 | # Author: Fred L. Drake, Jr.
# [email protected]
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import sys as _sys
from collections import OrderedDict as _OrderedDict
from io import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_id = id
_len = len
_type = type
def pprint(object, stream=None, indent=1, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
class _safe_key:
"""Helper function for key functions when sorting unorderable objects.
The wrapped-object will fallback to an Py2.x style comparison for
unorderable types (sorting first comparing the type name and then by
the obj ids). Does not work recursively, so dict.items() must have
_safe_key applied to both the key and the value.
"""
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
rv = self.obj.__lt__(other.obj)
except TypeError:
rv = NotImplemented
if rv is NotImplemented:
rv = (str(type(self.obj)), id(self.obj)) < \
(str(type(other.obj)), id(other.obj))
return rv
def _safe_tuple(t):
"Helper function for comparing 2-tuples"
return _safe_key(t[0]), _safe_key(t[1])
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
import sys
sys.stderr.write(str(object))
objid = _id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) > (self._width - 1 - indent - allowance)
write = stream.write
if self._depth and level > self._depth:
write(rep)
return
if sepLines:
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict):
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
if issubclass(typ, _OrderedDict):
items = list(object.items())
else:
items = sorted(object.items(), key=_safe_tuple)
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
write(',\n%s%s: ' % (' '*indent, rep))
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if ((issubclass(typ, list) and r is list.__repr__) or
(issubclass(typ, tuple) and r is tuple.__repr__) or
(issubclass(typ, set) and r is set.__repr__) or
(issubclass(typ, frozenset) and r is frozenset.__repr__)
):
length = _len(object)
if issubclass(typ, list):
write('[')
endchar = ']'
elif issubclass(typ, tuple):
write('(')
endchar = ')'
else:
if not length:
write(rep)
return
if typ is set:
write('{')
endchar = '}'
else:
write(typ.__name__)
write('({')
endchar = '})'
indent += len(typ.__name__) + 1
object = sorted(object, key=_safe_key)
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
if length:
context[objid] = 1
indent = indent + self._indent_per_level
self._format(object[0], stream, indent, allowance + 1,
context, level)
if length > 1:
for ent in object[1:]:
write(',\n' + ' '*indent)
self._format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
write(rep)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = _id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(object.items(), key=_safe_tuple)
for k, v in items:
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = _id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % _commajoin(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print("_safe_repr:", t2 - t1)
print("pformat:", t3 - t2)
if __name__ == "__main__":
_perfcheck()
| gpl-3.0 | -263,709,992,797,487,000 | 33.478378 | 81 | 0.523791 | false |
blacktear23/django | django/contrib/humanize/templatetags/humanize.py | 274 | 3396 | from django.utils.translation import ungettext, ugettext as _
from django.utils.encoding import force_unicode
from django import template
from django.template import defaultfilters
from datetime import date
import re
register = template.Library()
def ordinal(value):
"""
Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
t = (_('th'), _('st'), _('nd'), _('rd'), _('th'), _('th'), _('th'), _('th'), _('th'), _('th'))
if value % 100 in (11, 12, 13): # special case
return u"%d%s" % (value, t[0])
return u'%d%s' % (value, t[value % 10])
ordinal.is_safe = True
register.filter(ordinal)
def intcomma(value):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
orig = force_unicode(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
intcomma.is_safe = True
register.filter(intcomma)
def intword(value):
"""
Converts a large integer to a friendly text representation. Works best for
numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000
becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000000:
return value
if value < 1000000000:
new_value = value / 1000000.0
return ungettext('%(value).1f million', '%(value).1f million', new_value) % {'value': new_value}
if value < 1000000000000:
new_value = value / 1000000000.0
return ungettext('%(value).1f billion', '%(value).1f billion', new_value) % {'value': new_value}
if value < 1000000000000000:
new_value = value / 1000000000000.0
return ungettext('%(value).1f trillion', '%(value).1f trillion', new_value) % {'value': new_value}
return value
intword.is_safe = False
register.filter(intword)
def apnumber(value):
"""
For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return value
return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'), _('seven'), _('eight'), _('nine'))[value-1]
apnumber.is_safe = True
register.filter(apnumber)
def naturalday(value, arg=None):
"""
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to settings.DATE_FORMAT.
"""
try:
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't a date object
return value
except ValueError:
# Date arguments out of range
return value
delta = value - date.today()
if delta.days == 0:
return _(u'today')
elif delta.days == 1:
return _(u'tomorrow')
elif delta.days == -1:
return _(u'yesterday')
return defaultfilters.date(value, arg)
register.filter(naturalday)
| bsd-3-clause | 1,788,655,249,878,772,200 | 32.294118 | 119 | 0.619258 | false |
prospwro/odoo | addons/l10n_in_hr_payroll/wizard/hr_salary_employee_bymonth.py | 374 | 2830 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_salary_employee_bymonth(osv.osv_memory):
_name = 'hr.salary.employee.month'
_description = 'Hr Salary Employee By Month Report'
_columns = {
'start_date': fields.date('Start Date', required=True),
'end_date': fields.date('End Date', required=True),
'employee_ids': fields.many2many('hr.employee', 'payroll_year_rel', 'payroll_year_id', 'employee_id', 'Employees', required=True),
'category_id': fields.many2one('hr.salary.rule.category', 'Category', required=True),
}
def _get_default_category(self, cr, uid, context=None):
category_ids = self.pool.get('hr.salary.rule.category').search(cr, uid, [('code', '=', 'NET')], context=context)
return category_ids and category_ids[0] or False
_defaults = {
'start_date': lambda *a: time.strftime('%Y-01-01'),
'end_date': lambda *a: time.strftime('%Y-%m-%d'),
'category_id': _get_default_category
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, context=context)
res = res and res[0] or {}
datas.update({'form': res})
return self.pool['report'].get_action(cr, uid, ids,
'l10n_in_hr_payroll.report_hrsalarybymonth',
data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,442,688,239,947,527,700 | 41.878788 | 138 | 0.601767 | false |
OpenCMISS-Bindings/ZincPythonTools | setup.py | 1 | 1174 | """
Zinc Python Tools
A collection of Qt widgets and utilities building on the Python bindings for the OpenCMISS-Zinc Visualisation Library.
"""
classifiers = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: Education
Intended Audience :: Science/Research
License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
Programming Language :: Python
Operating System :: Microsoft :: Windows
Operating System :: Unix
Operating System :: MacOS :: MacOS X
Topic :: Scientific/Engineering :: Medical Science Apps.
Topic :: Scientific/Engineering :: Visualization
Topic :: Software Development :: Libraries :: Python Modules
"""
from setuptools import setup
doc_lines = __doc__.split("\n")
requires = ['opencmiss.utils']
setup(
name='ZincPythonTools',
version='1.0.0',
author='H. Sorby',
author_email='[email protected]',
packages=['opencmiss', 'opencmiss.zincwidgets'],
platforms=['any'],
url='http://pypi.python.org/pypi/ZincPythonTools/',
license='LICENSE.txt',
description=doc_lines[0],
classifiers=filter(None, classifiers.split("\n")),
install_requires=requires,
)
| mpl-2.0 | -4,252,329,995,703,317,000 | 29.102564 | 118 | 0.717206 | false |
Sorsly/subtle | google-cloud-sdk/lib/googlecloudsdk/calliope/exceptions.py | 3 | 14638 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions that can be thrown by calliope tools.
The exceptions in this file, and those that extend them, can be thrown by
the Run() function in calliope tools without worrying about stack traces
littering the screen in CLI mode. In interpreter mode, they are not caught
from within calliope.
"""
from functools import wraps
import os
import sys
from googlecloudsdk.api_lib.util import exceptions as api_exceptions
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import console_attr_os
def NewErrorFromCurrentException(error, *args):
"""Creates a new error based on the current exception being handled.
If no exception is being handled, a new error with the given args
is created. If there is a current exception, the original exception is
first logged (to file only). A new error is then created with the
same args as the current one.
Args:
error: The new error to create.
*args: The standard args taken by the constructor of Exception for the new
exception that is created. If None, the args from the exception
currently being handled will be used.
Returns:
The generated error exception.
"""
(_, current_exception, _) = sys.exc_info()
# Log original exception details and traceback to the log file if we are
# currently handling an exception.
if current_exception:
file_logger = log.file_only_logger
file_logger.error('Handling the source of a tool exception, '
'original details follow.')
file_logger.exception(current_exception)
if args:
return error(*args)
elif current_exception:
return error(*current_exception.args)
return error('An unknown error has occurred')
# TODO(b/32328530): Remove ToolException when the last ref is gone
class ToolException(core_exceptions.Error):
"""ToolException is for Run methods to throw for non-code-bug errors.
Attributes:
command_name: The dotted group and command name for the command that threw
this exception. This value is set by calliope.
"""
@staticmethod
def FromCurrent(*args):
return NewErrorFromCurrentException(ToolException, *args)
class ExitCodeNoError(core_exceptions.Error):
"""A special exception for exit codes without error messages.
If this exception is raised, it's identical in behavior to returning from
the command code, except the overall exit code will be different.
"""
class FailedSubCommand(core_exceptions.Error):
"""Exception capturing a subcommand which did sys.exit(code)."""
def __init__(self, cmd, code):
super(FailedSubCommand, self).__init__(
'Failed command: [{0}] with exit code [{1}]'.format(
' '.join(cmd), code),
exit_code=code)
def RaiseErrorInsteadOf(error, *error_types):
"""A decorator that re-raises as an error.
If any of the error_types are raised in the decorated function, this decorator
will re-raise as an error.
Args:
error: Exception, The new exception to raise.
*error_types: [Exception], A list of exception types that this decorator
will watch for.
Returns:
The decorated function.
"""
def Wrap(func):
"""Wrapper function for the decorator."""
@wraps(func)
def TryFunc(*args, **kwargs):
try:
return func(*args, **kwargs)
except error_types:
(_, _, exc_traceback) = sys.exc_info()
# The 3 element form takes (type, instance, traceback). If the first
# element is an instance, it is used as the type and instance and the
# second element must be None. This preserves the original traceback.
# pylint:disable=nonstandard-exception, ToolException is an Exception.
raise NewErrorFromCurrentException(error), None, exc_traceback
return TryFunc
return Wrap
# TODO(b/32328530): Remove RaiseToolExceptionInsteadOf when the last ref is gone
def RaiseToolExceptionInsteadOf(*error_types):
"""A decorator that re-raises as ToolException."""
return RaiseErrorInsteadOf(ToolException, *error_types)
def _TruncateToLineWidth(string, align, width, fill=''):
"""Truncate string to line width, right aligning at align.
Examples (assuming a screen width of 10):
>>> _TruncateToLineWidth('foo', 0)
'foo'
>>> # Align to the beginning. Should truncate the end.
... _TruncateToLineWidth('0123456789abcdef', 0)
'0123456789'
>>> _TruncateToLineWidth('0123456789abcdef', 0, fill='...')
'0123456...'
>>> # Align to the end. Should truncate the beginning.
... _TruncateToLineWidth('0123456789abcdef', 16)
'6789abcdef'
>>> _TruncateToLineWidth('0123456789abcdef', 16, fill='...')
'...9abcdef'
>>> # Align to the middle (note: the index is toward the end of the string,
... # because this function right-aligns to the given index).
... # Should truncate the begnining and end.
... _TruncateToLineWidth('0123456789abcdef', 12)
'23456789ab'
>>> _TruncateToLineWidth('0123456789abcdef', 12, fill='...')
'...5678...'
Args:
string: string to truncate
align: index to right-align to
width: maximum length for the resulting string
fill: if given, indicate truncation with this string. Must be shorter than
terminal width / 2.
Returns:
str, the truncated string
Raises:
ValueError, if provided fill is too long for the terminal.
"""
if len(fill) >= width / 2:
# Either the caller provided a fill that's way too long, or the user has a
# terminal that's way too narrow. In either case, we aren't going to be able
# to make this look nice, but we don't want to throw an error because that
# will mask the original error.
log.warn('Screen not wide enough to display correct error message.')
return string
if len(string) <= width:
return string
if align > width:
string = fill + string[align-width+len(fill):]
if len(string) <= width:
return string
string = string[:width-len(fill)] + fill
return string
_MARKER = '^ invalid character'
# pylint: disable=g-doc-bad-indent
def _FormatNonAsciiMarkerString(args):
u"""Format a string that will mark the first non-ASCII character it contains.
Example:
>>> args = ['command.py', '--foo=\xce\x94']
>>> _FormatNonAsciiMarkerString(args) == (
... 'command.py --foo=\u0394\n'
... ' ^ invalid character'
... )
True
Args:
args: The arg list for the command executed
Returns:
unicode, a properly formatted string with two lines, the second of which
indicates the non-ASCII character in the first.
Raises:
ValueError: if the given string is all ASCII characters
"""
# nonascii will be True if at least one arg contained a non-ASCII character
nonascii = False
# pos is the position of the first non-ASCII character in ' '.join(args)
pos = 0
for arg in args:
try:
# idx is the index of the first non-ASCII character in arg
for idx, char in enumerate(arg):
char.decode('ascii')
except UnicodeError:
# idx will remain set, indicating the first non-ASCII character
pos += idx
nonascii = True
break
# this arg was all ASCII; add 1 for the ' ' between args
pos += len(arg) + 1
if not nonascii:
raise ValueError('The command line is composed entirely of ASCII '
'characters.')
# Make a string that, when printed in parallel, will point to the non-ASCII
# character
marker_string = ' ' * pos + _MARKER
# Make sure that this will still print out nicely on an odd-sized screen
align = len(marker_string)
args_string = u' '.join([console_attr.EncodeForOutput(arg) for arg in args])
width, _ = console_attr_os.GetTermSize()
fill = '...'
if width < len(_MARKER) + len(fill):
# It's hopeless to try to wrap this and make it look nice. Preserve it in
# full for logs and so on.
return '\n'.join((args_string, marker_string))
# If len(args_string) < width < len(marker_string) (ex:)
#
# args_string = 'command BAD'
# marker_string = ' ^ invalid character'
# width = len('----------------')
#
# then the truncation can give a result like the following:
#
# args_string = 'command BAD'
# marker_string = ' ^ invalid character'
#
# (This occurs when args_string is short enough to not be truncated, but
# marker_string is long enough to be truncated.)
#
# ljust args_string to make it as long as marker_string before passing to
# _TruncateToLineWidth, which will yield compatible truncations. rstrip at the
# end to get rid of the new trailing spaces.
formatted_args_string = _TruncateToLineWidth(args_string.ljust(align), align,
width, fill=fill).rstrip()
formatted_marker_string = _TruncateToLineWidth(marker_string, align, width)
return u'\n'.join((formatted_args_string, formatted_marker_string))
class InvalidCharacterInArgException(ToolException):
"""InvalidCharacterInArgException is for non-ASCII CLI arguments."""
def __init__(self, args, invalid_arg):
self.invalid_arg = invalid_arg
cmd = os.path.basename(args[0])
if cmd.endswith('.py'):
cmd = cmd[:-3]
args = [cmd] + args[1:]
super(InvalidCharacterInArgException, self).__init__(
u'Failed to read command line argument [{0}] because it does '
u'not appear to be valid 7-bit ASCII.\n\n'
u'{1}'.format(
console_attr.EncodeForOutput(self.invalid_arg),
_FormatNonAsciiMarkerString(args)))
# TODO(user): Eventually use api_exceptions.HttpException exclusively.
class HttpException(api_exceptions.HttpException):
"""HttpException is raised whenever the Http response status code != 200.
See api_lib.util.exceptions.HttpException for full documentation.
"""
def __init__(self, error, error_format='{message}'):
super(HttpException, self).__init__(error, error_format)
class InvalidArgumentException(ToolException):
"""InvalidArgumentException is for malformed arguments."""
def __init__(self, parameter_name, message):
super(InvalidArgumentException, self).__init__(
u'Invalid value for [{0}]: {1}'.format(parameter_name, message))
self.parameter_name = parameter_name
class ConflictingArgumentsException(ToolException):
"""ConflictingArgumentsException arguments that are mutually exclusive."""
def __init__(self, *parameter_names):
super(ConflictingArgumentsException, self).__init__(
u'arguments not allowed simultaneously: ' + ', '.join(parameter_names))
self.parameter_names = parameter_names
class UnknownArgumentException(ToolException):
"""UnknownArgumentException is for arguments with unexpected values."""
def __init__(self, parameter_name, message):
super(UnknownArgumentException, self).__init__(
u'Unknown value for [{0}]: {1}'.format(parameter_name, message))
self.parameter_name = parameter_name
class RequiredArgumentException(ToolException):
"""An exception for when a usually optional argument is required in this case.
"""
def __init__(self, parameter_name, message):
super(RequiredArgumentException, self).__init__(
'Missing required argument [{0}]: {1}'.format(parameter_name, message))
self.parameter_name = parameter_name
class MinimumArgumentException(ToolException):
"""An exception for when one of several arguments is required."""
def __init__(self, parameter_names, message):
super(MinimumArgumentException, self).__init__(
'One of [{0}] must be supplied: {1}'.format(
', '.join(['{0}'.format(p) for p in parameter_names]),
message)
)
class BadFileException(ToolException):
"""BadFileException is for problems reading or writing a file."""
# pylint: disable=g-import-not-at-top, Delay the import of this because
# importing store is relatively expensive.
def _GetTokenRefreshError(exc):
from googlecloudsdk.core.credentials import store
return store.TokenRefreshError(exc)
# In general, lower level libraries should be catching exceptions and re-raising
# exceptions that extend core.Error so nice error messages come out. There are
# some error classes that want to be handled as recoverable errors, but cannot
# import the core_exceptions module (and therefore the Error class) for various
# reasons (e.g. circular dependencies). To work around this, we keep a list of
# known "friendly" error types, which we handle in the same way as core.Error.
# Additionally, we provide an alternate exception class to convert the errors
# to which may add additional information. We use strings here so that we don't
# have to import all these libraries all the time, just to be able to handle the
# errors when they come up. Only add errors here if there is no other way to
# handle them.
_KNOWN_ERRORS = {
'apitools.base.py.exceptions.HttpError': HttpException,
'googlecloudsdk.core.util.files.Error': lambda x: None,
'httplib.ResponseNotReady': core_exceptions.NetworkIssueError,
'oauth2client.client.AccessTokenRefreshError': _GetTokenRefreshError,
'ssl.SSLError': core_exceptions.NetworkIssueError,
}
def _GetExceptionName(exc):
"""Returns the exception name used as index into _KNOWN_ERRORS."""
if isinstance(exc, type):
name = exc.__module__ + '.' + exc.__name__
else:
name = exc.__class__.__module__ + '.' + exc.__class__.__name__
return name
def ConvertKnownError(exc):
"""Convert the given exception into an alternate type if it is known.
Args:
exc: Exception, the exception to convert.
Returns:
None if this is not a known type, otherwise a new exception that should be
logged.
"""
convert_to_known_err = _KNOWN_ERRORS.get(_GetExceptionName(exc))
if not convert_to_known_err:
# This is not a known error type
return None
# If there is no known exception just return the original exception.
return convert_to_known_err(exc) or exc
| mit | 7,098,045,688,062,629,000 | 34.702439 | 80 | 0.699617 | false |
gnip/support | Data Collector/Rules API/Python/AddRule.py | 3 | 1489 | #!/usr/bin/env python
import urllib2
import base64
import json
import xml
import sys
def post():
# Ensure that your stream format matches the rule format you intend to use (e.g. '.xml' or '.json')
# See below to edit the rule format used when adding and deleting rules (xml or json)
# Expected Enterprise Data Collector URL formats:
# JSON: https://<host>.gnip.com/data_collectors/<data_collector_id>/rules.json
# XML: https://<host>.gnip.com/data_collectors/<data_collector_id>/rules.xml
url = 'ENTER_RULES_API_URL_HERE'
UN = 'ENTER_USERNAME_HERE'
PWD = 'ENTER_PASSWORD_HERE'
rule = 'testRule'
tag = 'testTag'
# Edit below to use the rule format that matches the Rules API URL you entered above
# Use this line for XML formatted rules
values = '<rules><rule tag="' + tag + '"><value>' + rule + '</value></rule></rules>'
# Use this line for JSON formatted rules
# values = '{"rules": [{"value":"' + rule + '","tag":"' + tag + '"}]}'
base64string = base64.encodestring('%s:%s' % (UN, PWD)).replace('\n', '')
req = urllib2.Request(url=url, data=values)
# Use this line for JSON formatted rules
# req.add_header('Content-type', 'application/json')
# Use this line for XML formatted rules
req.add_header('Content-type', 'application/xml')
req.add_header("Authorization", "Basic %s" % base64string)
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError as e:
print e.read()
the_page = response.read()
if __name__ == "__main__":
post()
| mit | -900,697,633,826,165,200 | 26.574074 | 99 | 0.675621 | false |
shsingh/ansible | lib/ansible/modules/monitoring/zabbix/zabbix_screen.py | 9 | 18673 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: zabbix_screen
short_description: Create/update/delete Zabbix screens
description:
- This module allows you to create, modify and delete Zabbix screens and associated graph data.
version_added: "2.0"
author:
- "Cove (@cove)"
- "Tony Minfei Ding (!UNKNOWN)"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- "zabbix-api >= 0.5.4"
options:
screens:
description:
- List of screens to be created/updated/deleted (see example).
type: list
elements: dict
required: true
suboptions:
screen_name:
description:
- Screen name will be used.
- If a screen has already been added, the screen name won't be updated.
type: str
required: true
host_group:
description:
- Host group will be used for searching hosts.
- Required if I(state=present).
type: str
state:
description:
- I(present) - Create a screen if it doesn't exist. If the screen already exists, the screen will be updated as needed.
- I(absent) - If a screen exists, the screen will be deleted.
type: str
default: present
choices:
- absent
- present
graph_names:
description:
- Graph names will be added to a screen. Case insensitive.
- Required if I(state=present).
type: list
elements: str
graph_width:
description:
- Graph width will be set in graph settings.
type: int
graph_height:
description:
- Graph height will be set in graph settings.
type: int
graphs_in_row:
description:
- Limit columns of a screen and make multiple rows.
type: int
default: 3
sort:
description:
- Sort hosts alphabetically.
- If there are numbers in hostnames, leading zero should be used.
type: bool
default: no
extends_documentation_fragment:
- zabbix
notes:
- Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed.
'''
EXAMPLES = r'''
# Create/update a screen.
- name: Create a new screen or update an existing screen's items 5 in a row
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
screens:
- screen_name: ExampleScreen1
host_group: Example group1
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
graphs_in_row: 5
# Create/update multi-screen
- name: Create two of new screens or update the existing screens' items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
screens:
- screen_name: ExampleScreen1
host_group: Example group1
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
- screen_name: ExampleScreen2
host_group: Example group2
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurrent updates
- name: Create a new screen or update an existing screen's items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
screens:
- screen_name: ExampleScreen
host_group: Example group
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
when: inventory_hostname==groups['group_name'][0]
'''
import atexit
import traceback
try:
from zabbix_api import ZabbixAPI
from zabbix_api import ZabbixAPIException
from zabbix_api import Already_Exists
HAS_ZABBIX_API = True
except ImportError:
ZBX_IMP_ERR = traceback.format_exc()
HAS_ZABBIX_API = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class Screen(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# get group id by group name
def get_host_group_id(self, group_name):
if group_name == "":
self._module.fail_json(msg="group_name is required")
hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}})
if len(hostGroup_list) < 1:
self._module.fail_json(msg="Host group not found: %s" % group_name)
else:
hostGroup_id = hostGroup_list[0]['groupid']
return hostGroup_id
# get monitored host_id by host_group_id
def get_host_ids_by_group_id(self, group_id, sort):
host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1})
if len(host_list) < 1:
self._module.fail_json(msg="No host in the group.")
else:
if sort:
host_list = sorted(host_list, key=lambda name: name['name'])
host_ids = []
for i in host_list:
host_id = i['hostid']
host_ids.append(host_id)
return host_ids
# get screen
def get_screen_id(self, screen_name):
if screen_name == "":
self._module.fail_json(msg="screen_name is required")
try:
screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}})
if len(screen_id_list) >= 1:
screen_id = screen_id_list[0]['screenid']
return screen_id
return None
except Exception as e:
self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e))
# create screen
def create_screen(self, screen_name, h_size, v_size):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size})
return screen['screenids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e))
# update screen
def update_screen(self, screen_id, screen_name, h_size, v_size):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size})
except Exception as e:
self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e))
# delete screen
def delete_screen(self, screen_id, screen_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screen.delete([screen_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e))
# get graph ids
def get_graph_ids(self, hosts, graph_name_list):
graph_id_lists = []
vsize = 1
for host in hosts:
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
size = len(graph_id_list)
if size > 0:
graph_id_lists.extend(graph_id_list)
if vsize < size:
vsize = size
return graph_id_lists, vsize
# getGraphs
def get_graphs_by_host_id(self, graph_name_list, host_id):
graph_ids = []
for graph_name in graph_name_list:
graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id})
graph_id_list = []
if len(graphs_list) > 0:
for graph in graphs_list:
graph_id = graph['graphid']
graph_id_list.append(graph_id)
if len(graph_id_list) > 0:
graph_ids.extend(graph_id_list)
return graph_ids
# get screen items
def get_screen_items(self, screen_id):
screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id})
return screen_item_list
# delete screen items
def delete_screen_items(self, screen_id, screen_item_id_list):
try:
if len(screen_item_id_list) == 0:
return True
screen_item_list = self.get_screen_items(screen_id)
if len(screen_item_list) > 0:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screenitem.delete(screen_item_id_list)
return True
return False
except ZabbixAPIException:
pass
# get screen's hsize and vsize
def get_hsize_vsize(self, hosts, v_size, graphs_in_row):
h_size = len(hosts)
# when there is only one host, put all graphs in a row
if h_size == 1:
if v_size <= graphs_in_row:
h_size = v_size
else:
h_size = graphs_in_row
v_size = (v_size - 1) // h_size + 1
# when len(hosts) is more then graphs_in_row
elif len(hosts) > graphs_in_row:
h_size = graphs_in_row
v_size = (len(hosts) // graphs_in_row + 1) * v_size
return h_size, v_size
# create screen_items
def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size, graphs_in_row):
if len(hosts) < 4:
if width is None or width < 0:
width = 500
else:
if width is None or width < 0:
width = 200
if height is None or height < 0:
height = 100
try:
# when there're only one host, only one row is not good.
if len(hosts) == 1:
graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0])
for i, graph_id in enumerate(graph_id_list):
if graph_id is not None:
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
'width': width, 'height': height,
'x': i % h_size, 'y': i // h_size, 'colspan': 1, 'rowspan': 1,
'elements': 0, 'valign': 0, 'halign': 0,
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
else:
for i, host in enumerate(hosts):
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
for j, graph_id in enumerate(graph_id_list):
if graph_id is not None:
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
'width': width, 'height': height,
'x': i % graphs_in_row, 'y': len(graph_id_list) * (i // graphs_in_row) + j,
'colspan': 1, 'rowspan': 1,
'elements': 0, 'valign': 0, 'halign': 0,
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
except Already_Exists:
pass
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
timeout=dict(type='int', default=10),
screens=dict(
type='list',
elements='dict',
required=True,
options=dict(
screen_name=dict(type='str', required=True),
host_group=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
graph_names=dict(type='list', elements='str'),
graph_width=dict(type='int', default=None),
graph_height=dict(type='int', default=None),
graphs_in_row=dict(type='int', default=3),
sort=dict(default=False, type='bool'),
),
required_if=[
['state', 'present', ['host_group']]
]
)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR)
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
timeout = module.params['timeout']
screens = module.params['screens']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
atexit.register(zbx.logout)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
screen = Screen(module, zbx)
created_screens = []
changed_screens = []
deleted_screens = []
for zabbix_screen in screens:
screen_name = zabbix_screen['screen_name']
screen_id = screen.get_screen_id(screen_name)
state = zabbix_screen['state']
sort = zabbix_screen['sort']
if state == "absent":
if screen_id:
screen_item_list = screen.get_screen_items(screen_id)
screen_item_id_list = []
for screen_item in screen_item_list:
screen_item_id = screen_item['screenitemid']
screen_item_id_list.append(screen_item_id)
screen.delete_screen_items(screen_id, screen_item_id_list)
screen.delete_screen(screen_id, screen_name)
deleted_screens.append(screen_name)
else:
host_group = zabbix_screen['host_group']
graph_names = zabbix_screen['graph_names']
graphs_in_row = zabbix_screen['graphs_in_row']
graph_width = zabbix_screen['graph_width']
graph_height = zabbix_screen['graph_height']
host_group_id = screen.get_host_group_id(host_group)
hosts = screen.get_host_ids_by_group_id(host_group_id, sort)
screen_item_id_list = []
resource_id_list = []
graph_ids, v_size = screen.get_graph_ids(hosts, graph_names)
h_size, v_size = screen.get_hsize_vsize(hosts, v_size, graphs_in_row)
if not screen_id:
# create screen
screen_id = screen.create_screen(screen_name, h_size, v_size)
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size, graphs_in_row)
created_screens.append(screen_name)
else:
screen_item_list = screen.get_screen_items(screen_id)
for screen_item in screen_item_list:
screen_item_id = screen_item['screenitemid']
resource_id = screen_item['resourceid']
screen_item_id_list.append(screen_item_id)
resource_id_list.append(resource_id)
# when the screen items changed, then update
if graph_ids != resource_id_list:
deleted = screen.delete_screen_items(screen_id, screen_item_id_list)
if deleted:
screen.update_screen(screen_id, screen_name, h_size, v_size)
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size, graphs_in_row)
changed_screens.append(screen_name)
if created_screens and changed_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens),
",".join(changed_screens)))
elif created_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens))
elif changed_screens:
module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens))
elif deleted_screens:
module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens))
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,670,101,714,846,610,000 | 38.645435 | 140 | 0.542655 | false |
huahang/typhoon-blade | src/blade/load_build_files.py | 3 | 9983 | # Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Huan Yu <[email protected]>
# Feng Chen <[email protected]>
# Yi Wang <[email protected]>
# Chong Peng <[email protected]>
# Date: October 20, 2011
"""
This is the CmdOptions module which parses the users'
input and provides hint for users.
"""
import os
import traceback
import build_rules
import console
from blade_util import relative_path
# import these modules make build functions registered into build_rules
# TODO(chen3feng): Load build modules dynamically to enable extension.
import cc_targets
import cu_targets
import gen_rule_target
import java_jar_target
import java_targets
import lex_yacc_target
import proto_library_target
import py_targets
import resource_library_target
import swig_library_target
import thrift_library
import fbthrift_library
class TargetAttributes(object):
"""Build target attributes
"""
def __init__(self, options):
self._options = options
@property
def bits(self):
return int(self._options.m)
@property
def arch(self):
if self._options.m == '32':
return 'i386'
else:
return 'x86_64'
def is_debug(self):
return self._options.profile == 'debug'
build_target = None
def _find_dir_depender(dir, blade):
"""_find_dir_depender to find which target depends on the dir.
"""
target_database = blade.get_target_database()
for key in target_database:
for dkey in target_database[key].expanded_deps:
if dkey[0] == dir:
return '//%s:%s' % (target_database[key].path,
target_database[key].name)
return None
def _report_not_exist(source_dir, path, blade):
""" Report dir or BUILD file does not exist
"""
depender = _find_dir_depender(source_dir, blade)
if depender:
console.error_exit('//%s not found, required by %s, exit...' % (path, depender))
else:
console.error_exit('//%s not found, exit...' % path)
def enable_if(cond, true_value, false_value=None):
"""A global function can be called in BUILD to filter srcs/deps by target"""
if cond:
ret = true_value
else:
ret = false_value
if ret is None:
ret = []
return ret
build_rules.register_function(enable_if)
IGNORE_IF_FAIL = 0
WARN_IF_FAIL = 1
ABORT_IF_FAIL = 2
def _load_build_file(source_dir, action_if_fail, processed_source_dirs, blade):
"""_load_build_file to load the BUILD and place the targets into database.
Invoked by _load_targets. Load and execute the BUILD
file, which is a Python script, in source_dir. Statements in BUILD
depends on global variable current_source_dir, and will register build
target/rules into global variables target_database. If path/BUILD
does NOT exsit, take action corresponding to action_if_fail. The
parameters processed_source_dirs refers to a set defined in the
caller and used to avoid duplicated execution of BUILD files.
"""
# Initialize the build_target at first time, to be used for BUILD file
# loaded by execfile
global build_target
if build_target is None:
build_target = TargetAttributes(blade.get_options())
build_rules.register_variable('build_target', build_target)
source_dir = os.path.normpath(source_dir)
# TODO(yiwang): the character '#' is a magic value.
if source_dir in processed_source_dirs or source_dir == '#':
return
processed_source_dirs.add(source_dir)
if not os.path.exists(source_dir):
_report_not_exist(source_dir, source_dir, blade)
old_current_source_path = blade.get_current_source_path()
blade.set_current_source_path(source_dir)
build_file = os.path.join(source_dir, 'BUILD')
if os.path.exists(build_file):
try:
# The magic here is that a BUILD file is a Python script,
# which can be loaded and executed by execfile().
execfile(build_file, build_rules.get_all(), None)
except SystemExit:
console.error_exit('%s: fatal error, exit...' % build_file)
except:
console.error_exit('Parse error in %s, exit...\n%s' % (
build_file, traceback.format_exc()))
else:
if action_if_fail == ABORT_IF_FAIL:
_report_not_exist(source_dir, build_file, blade)
blade.set_current_source_path(old_current_source_path)
def _find_depender(dkey, blade):
"""_find_depender to find which target depends on the target with dkey.
"""
target_database = blade.get_target_database()
for key in target_database:
if dkey in target_database[key].expanded_deps:
return '//%s:%s' % (target_database[key].path,
target_database[key].name)
return None
def load_targets(target_ids, working_dir, blade_root_dir, blade):
"""load_targets.
Parse and load targets, including those specified in command line
and their direct and indirect dependencies, by loading related BUILD
files. Returns a map which contains all these targets.
"""
target_database = blade.get_target_database()
# targets specified in command line
cited_targets = set()
# cited_targets and all its dependencies
related_targets = {}
# source dirs mentioned in command line
source_dirs = []
# to prevent duplicated loading of BUILD files
processed_source_dirs = set()
direct_targets = []
all_command_targets = []
# Parse command line target_ids. For those in the form of <path>:<target>,
# record (<path>,<target>) in cited_targets; for the rest (with <path>
# but without <target>), record <path> into paths.
for target_id in target_ids:
if target_id.find(':') == -1:
source_dir, target_name = target_id, '*'
else:
source_dir, target_name = target_id.rsplit(':', 1)
source_dir = relative_path(os.path.join(working_dir, source_dir),
blade_root_dir)
if target_name != '*' and target_name != '':
cited_targets.add((source_dir, target_name))
elif source_dir.endswith('...'):
source_dir = source_dir[:-3]
if not source_dir:
source_dir = './'
source_dirs.append((source_dir, WARN_IF_FAIL))
for root, dirs, files in os.walk(source_dir):
# Skip over subdirs starting with '.', e.g., .svn.
# Note the dirs[:] = slice assignment; we are replacing the
# elements in dirs (and not the list referred to by dirs) so
# that os.walk() will not process deleted directories.
dirs[:] = [d for d in dirs if not d.startswith('.')]
for d in dirs:
source_dirs.append((os.path.join(root, d), IGNORE_IF_FAIL))
else:
source_dirs.append((source_dir, ABORT_IF_FAIL))
direct_targets = list(cited_targets)
# Load BUILD files in paths, and add all loaded targets into
# cited_targets. Together with above step, we can ensure that all
# targets mentioned in the command line are now in cited_targets.
for source_dir, action_if_fail in source_dirs:
_load_build_file(source_dir,
action_if_fail,
processed_source_dirs,
blade)
for key in target_database:
cited_targets.add(key)
all_command_targets = list(cited_targets)
# Starting from targets specified in command line, breath-first
# propagate to load BUILD files containing directly and indirectly
# dependent targets. All these targets form related_targets,
# which is a subset of target_databased created by loading BUILD files.
while cited_targets:
source_dir, target_name = cited_targets.pop()
target_id = (source_dir, target_name)
if target_id in related_targets:
continue
_load_build_file(source_dir,
ABORT_IF_FAIL,
processed_source_dirs,
blade)
if target_id not in target_database:
console.error_exit('%s: target //%s:%s does not exists' % (
_find_depender(target_id, blade), source_dir, target_name))
related_targets[target_id] = target_database[target_id]
for key in related_targets[target_id].expanded_deps:
if key not in related_targets:
cited_targets.add(key)
# Iterating to get svn root dirs
for path, name in related_targets:
root_dir = path.split('/')[0].strip()
if root_dir not in blade.svn_root_dirs and '#' not in root_dir:
blade.svn_root_dirs.append(root_dir)
return direct_targets, all_command_targets, related_targets
def find_blade_root_dir(working_dir):
"""find_blade_root_dir to find the dir holds the BLADE_ROOT file.
The blade_root_dir is the directory which is the closest upper level
directory of the current working directory, and containing a file
named BLADE_ROOT.
"""
blade_root_dir = working_dir
if blade_root_dir.endswith('/'):
blade_root_dir = blade_root_dir[:-1]
while blade_root_dir and blade_root_dir != '/':
if os.path.isfile(os.path.join(blade_root_dir, 'BLADE_ROOT')):
break
blade_root_dir = os.path.dirname(blade_root_dir)
if not blade_root_dir or blade_root_dir == '/':
console.error_exit(
"Can't find the file 'BLADE_ROOT' in this or any upper directory.\n"
"Blade need this file as a placeholder to locate the root source directory "
"(aka the directory where you #include start from).\n"
"You should create it manually at the first time.")
return blade_root_dir
| bsd-3-clause | -4,328,048,820,123,596,000 | 33.663194 | 92 | 0.626665 | false |
AdrianGaudebert/socorro | webapp-django/crashstats/crashstats/utils.py | 4 | 12843 | import csv
import codecs
import cStringIO
import datetime
import isodate
import functools
import json
import re
from collections import OrderedDict
from django import http
from django.conf import settings
from . import models
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def parse_isodate(ds):
"""
return a datetime object from a date string
"""
if isinstance(ds, unicode):
# isodate struggles to convert unicode strings with
# its parse_datetime() if the input string is unicode.
ds = ds.encode('ascii')
return isodate.parse_datetime(ds)
def daterange(start_date, end_date, format='%Y-%m-%d'):
for n in range((end_date - start_date).days):
yield (start_date + datetime.timedelta(n)).strftime(format)
def json_view(f):
@functools.wraps(f)
def wrapper(request, *args, **kw):
request._json_view = True
response = f(request, *args, **kw)
if isinstance(response, http.HttpResponse):
return response
else:
indent = 0
request_data = (
request.method == 'GET' and request.GET or request.POST
)
if request_data.get('pretty') == 'print':
indent = 2
if isinstance(response, tuple) and isinstance(response[1], int):
response, status = response
else:
status = 200
if isinstance(response, tuple) and isinstance(response[1], dict):
response, headers = response
else:
headers = {}
http_response = http.HttpResponse(
_json_clean(json.dumps(
response,
cls=DateTimeEncoder,
indent=indent
)),
status=status,
content_type='application/json; charset=UTF-8'
)
for key, value in headers.items():
http_response[key] = value
return http_response
return wrapper
def _json_clean(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javscript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashe\
# s-escaped
return value.replace("</", "<\\/")
def enhance_frame(frame, vcs_mappings):
"""
Add some additional info to a stack frame--signature
and source links from vcs_mappings.
"""
if 'function' in frame:
# Remove spaces before all stars, ampersands, and commas
function = re.sub(' (?=[\*&,])', '', frame['function'])
# Ensure a space after commas
function = re.sub(',(?! )', ', ', function)
frame['function'] = function
signature = function
elif 'file' in frame and 'line' in frame:
signature = '%s#%d' % (frame['file'], frame['line'])
elif 'module' in frame and 'module_offset' in frame:
signature = '%s@%s' % (frame['module'], frame['module_offset'])
else:
signature = '@%s' % frame['offset']
frame['signature'] = signature
frame['short_signature'] = re.sub('\(.*\)', '', signature)
if 'file' in frame:
vcsinfo = frame['file'].split(':')
if len(vcsinfo) == 4:
vcstype, root, vcs_source_file, revision = vcsinfo
if '/' in root:
# The root is something like 'hg.mozilla.org/mozilla-central'
server, repo = root.split('/', 1)
else:
# E.g. 'gecko-generated-sources' or something without a '/'
repo = server = root
if vcs_source_file.count('/') > 1 and len(vcs_source_file.split('/')[0]) == 128:
# In this case, the 'vcs_source_file' will be something like
# '{SHA-512 hex}/ipc/ipdl/PCompositorBridgeChild.cpp'
# So drop the sha part for the sake of the 'file' because
# we don't want to display a 128 character hex code in the
# hyperlink text.
vcs_source_file_display = '/'.join(vcs_source_file.split('/')[1:])
else:
# Leave it as is if it's not unweildly long.
vcs_source_file_display = vcs_source_file
if vcstype in vcs_mappings:
if server in vcs_mappings[vcstype]:
link = vcs_mappings[vcstype][server]
frame['file'] = vcs_source_file_display
frame['source_link'] = link % {
'repo': repo,
'file': vcs_source_file,
'revision': revision,
'line': frame['line']}
else:
path_parts = vcs_source_file.split('/')
frame['file'] = path_parts.pop()
def parse_dump(dump, vcs_mappings):
parsed_dump = {
'status': 'OK',
'modules': [],
'threads': [],
'crash_info': {
'crashing_thread': None,
},
'system_info': {}
}
for line in dump.split('\n'):
entry = line.split('|')
if entry[0] == 'OS':
parsed_dump['system_info']['os'] = entry[1]
parsed_dump['system_info']['os_ver'] = entry[2]
elif entry[0] == 'CPU':
parsed_dump['system_info']['cpu_arch'] = entry[1]
parsed_dump['system_info']['cpu_info'] = entry[2]
parsed_dump['system_info']['cpu_count'] = int(entry[3])
elif entry[0] == 'Crash':
parsed_dump['crash_info']['type'] = entry[1]
parsed_dump['crash_info']['crash_address'] = entry[2]
parsed_dump['crash_info']['crashing_thread'] = int(entry[3])
elif entry[0] == 'Module':
if entry[7] == '1':
parsed_dump['main_module'] = len(parsed_dump['modules'])
parsed_dump['modules'].append({
'filename': entry[1],
'version': entry[2],
'debug_file': entry[3],
'debug_id': entry[4],
'base_addr': entry[5],
'end_addr': entry[6]
})
elif entry[0].isdigit():
thread_num, frame_num, module_name, function, \
source_file, source_line, instruction = entry
thread_num = int(thread_num)
frame_num = int(frame_num)
frame = {
'frame': frame_num,
}
if module_name:
frame['module'] = module_name
if not function:
frame['module_offset'] = instruction
else:
frame['offset'] = instruction
if function:
frame['function'] = function
if not source_line:
frame['function_offset'] = instruction
if source_file:
frame['file'] = source_file
if source_line:
frame['line'] = int(source_line)
enhance_frame(frame, vcs_mappings)
if parsed_dump['crash_info']['crashing_thread'] is None:
parsed_dump['crash_info']['crashing_thread'] = thread_num
if thread_num >= len(parsed_dump['threads']):
# `parsed_dump['threads']` is a list and we haven't stuff
# this many items in it yet so, up til and including
# `thread_num` we stuff in an initial empty one
for x in range(len(parsed_dump['threads']), thread_num + 1):
# This puts in the possible padding too if thread_num
# is higher than the next index
if x >= len(parsed_dump['threads']):
parsed_dump['threads'].append({
'thread': x,
'frames': []
})
parsed_dump['threads'][thread_num]['frames'].append(frame)
parsed_dump['thread_count'] = len(parsed_dump['threads'])
for thread in parsed_dump['threads']:
thread['frame_count'] = len(thread['frames'])
return parsed_dump
def enhance_json_dump(dump, vcs_mappings):
"""
Add some information to the stackwalker's json_dump output
for display. Mostly applying vcs_mappings to stack frames.
"""
for i, thread in enumerate(dump.get('threads', [])):
if 'thread' not in thread:
thread['thread'] = i
for frame in thread['frames']:
enhance_frame(frame, vcs_mappings)
return dump
def build_default_context(product=None, versions=None):
"""
from ``product`` and ``versions`` transfer to
a dict. If there's any left-over, raise a 404 error
"""
context = {}
api = models.ProductVersions()
active_versions = OrderedDict() # so that products are in order
# Turn the list of all product versions into a dict, one per product.
for pv in api.get(active=True)['hits']:
if pv['product'] not in active_versions:
active_versions[pv['product']] = []
active_versions[pv['product']].append(pv)
context['active_versions'] = active_versions
if versions is None:
versions = []
elif isinstance(versions, basestring):
versions = versions.split(';')
if product:
if product not in context['active_versions']:
raise http.Http404('Not a recognized product')
context['product'] = product
else:
context['product'] = settings.DEFAULT_PRODUCT
if versions:
assert isinstance(versions, list)
context['version'] = versions[0]
# Also, check that that's a valid version for this product
pv_versions = [
x['version'] for x in active_versions[context['product']]
]
for version in versions:
if version not in pv_versions:
raise http.Http404("Not a recognized version for that product")
return context
_crash_id_regex = re.compile(
r'^(%s)?([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-'
r'[0-9a-f]{4}-[0-9a-f]{6}[0-9]{6})$' % (settings.CRASH_ID_PREFIX,)
)
def find_crash_id(input_str):
"""Return the valid Crash ID part of a string"""
for match in _crash_id_regex.findall(input_str):
try:
datetime.datetime.strptime(match[1][-6:], '%y%m%d')
return match[1]
except ValueError:
pass # will return None
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([unicode(s).encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def add_CORS_header(f):
@functools.wraps(f)
def wrapper(request, *args, **kw):
response = f(request, *args, **kw)
response['Access-Control-Allow-Origin'] = '*'
return response
return wrapper
def ratelimit_rate(group, request):
"""return None if we don't want to set any rate limit.
Otherwise return a number according to
https://django-ratelimit.readthedocs.org/en/latest/rates.html#rates-chapter
"""
if group == 'crashstats.api.views.model_wrapper':
if request.user.is_active:
return settings.API_RATE_LIMIT_AUTHENTICATED
else:
return settings.API_RATE_LIMIT
elif group.startswith('crashstats.supersearch.views.search'):
# this applies to both the web view and ajax views
if request.user.is_active:
return settings.RATELIMIT_SUPERSEARCH_AUTHENTICATED
else:
return settings.RATELIMIT_SUPERSEARCH
raise NotImplementedError(group)
| mpl-2.0 | -3,472,060,438,278,238,000 | 34.576177 | 92 | 0.555166 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.