code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Flexlay - A Generic 2D Game Editor
# Copyright (C) 2014 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flexlay.gui.editor_map_component import EditorMapComponent
from flexlay.math import Point
from flexlay.tools import Tool
class Zoom2Tool(Tool):
def __init__(self):
super().__init__()
self.active = False
self.click_pos = Point(0, 0)
self.old_zoom = 0.0
def on_mouse_up(self, event):
self.active = False
def on_mouse_down(self, event):
self.active = True
self.click_pos = event.mouse_pos
gc = EditorMapComponent.current.get_gc_state()
self.old_zoom = gc.get_zoom()
def on_mouse_move(self, event):
if self.active:
gc = EditorMapComponent.current.get_gc_state()
zoom_pos = Point(gc.width / 2, gc.height / 2)
factor = (event.mouse_pos.y - self.click_pos.y) / 20.0
if factor > 0:
gc.set_zoom(self.old_zoom * pow(1.25, factor), zoom_pos)
elif factor < 0:
gc.set_zoom(self.old_zoom / pow(1.25, -factor), zoom_pos)
else:
gc.set_zoom(self.old_zoom, zoom_pos)
# EOF #
| Karkus476/flexlay | flexlay/tools/zoom2_tool.py | Python | gpl-3.0 | 1,815 |
#!/usr/bin/env python
# Copyright (C) 2006-2010, University of Maryland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Author: James Krycka
"""
This script uses py2exe to create inversion\dist\direfl.exe for Windows.
The resulting executable bundles the DiRefl application, the python runtime
environment, and other required python packages into a single file. Additional
resource files that are needed when DiRefl is run are placed in the dist
directory tree. On completion, the contents of the dist directory tree can be
used by the Inno Setup Compiler (via a separate script) to build a Windows
installer/uninstaller for deployment of the DiRefl application. For testing
purposes, direfl.exe can be run from the dist directory.
"""
import os
import sys
'''
print "*** Python path is:"
for i, p in enumerate(sys.path):
print "%5d %s" %(i, p)
'''
from distutils.core import setup
# Augment the setup interface with the py2exe command and make sure the py2exe
# option is passed to setup.
import py2exe
if len(sys.argv) == 1:
sys.argv.append('py2exe')
import matplotlib
# Retrieve the application version string.
from version import version
# A manifest is required to be included in a py2exe image (or accessible as a
# file in the image directory) when wxPython is included so that the Windows XP
# theme is used when rendering wx widgets. The manifest must be matched to the
# version of Python that is being used.
#
# Create a manifest for use with Python 2.5 on Windows XP or Vista. It is
# adapted from the Python manifest file (C:\Python25\pythonw.exe.manifest).
manifest_for_python25 = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="1.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
# Create a manifest for use with Python 2.6 or 2.7 on Windows XP or Vista.
manifest_for_python26 = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32">
</assemblyIdentity>
<description>%(prog)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="1fc8b3b9a1e18e3b">
</assemblyIdentity>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="x86"
publicKeyToken="6595b64144ccf1df"
language="*">
</assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>
"""
# Select the appropriate manifest to use.
if sys.version_info >= (3, 0) or sys.version_info < (2, 5):
print "*** This script only works with Python 2.5, 2.6, or 2.7."
sys.exit()
elif sys.version_info >= (2, 6):
manifest = manifest_for_python26
elif sys.version_info >= (2, 5):
manifest = manifest_for_python25
# Create a list of all files to include along side the executable being built
# in the dist directory tree. Each element of the data_files list is a tuple
# consisting of a path (relative to dist\) and a list of files in that path.
data_files = []
# Add data files from the matplotlib\mpl-data folder and its subfolders.
# For matploblib prior to version 0.99 see the examples at the end of the file.
data_files = matplotlib.get_py2exe_datafiles()
# Add resource files that need to reside in the same directory as the image.
data_files.append( ('.', [os.path.join('.', 'direfl.ico')]) )
data_files.append( ('.', [os.path.join('.', 'direfl_splash.png')]) )
data_files.append( ('.', [os.path.join('.', 'LICENSE.txt')]) )
data_files.append( ('.', [os.path.join('.', 'README.txt')]) )
data_files.append( ('examples', [os.path.join('examples', 'demo_model_1.dat')]) )
data_files.append( ('examples', [os.path.join('examples', 'demo_model_2.dat')]) )
data_files.append( ('examples', [os.path.join('examples', 'demo_model_3.dat')]) )
data_files.append( ('examples', [os.path.join('examples', 'qrd1.refl')]) )
data_files.append( ('examples', [os.path.join('examples', 'qrd2.refl')]) )
data_files.append( ('examples', [os.path.join('examples', 'surround_air_4.refl')]) )
data_files.append( ('examples', [os.path.join('examples', 'surround_d2o_4.refl')]) )
# Add the Microsoft Visual C++ 2008 redistributable kit if we are building with
# Python 2.6 or 2.7. This kit will be installed on the target system as part
# of the installation process for the frozen image. Note that the Python 2.5
# interpreter requires msvcr71.dll which is included in the Python25 package,
# however, Python 2.6 and 2.7 require the msvcr90.dll but they do not bundle it
# with the Python26 or Python27 package. Thus, for Python 2.6 and later, the
# appropriate dll must be present on the target system at runtime.
if sys.version_info >= (2, 6):
pypath = os.path.dirname(sys.executable)
data_files.append( ('.', [os.path.join(pypath, 'vcredist_x86.exe')]) )
# Specify required packages to bundle in the executable image.
packages = ['matplotlib', 'numpy', 'scipy', 'pytz']
# Specify files to include in the executable image.
includes = []
# Specify files to exclude from the executable image.
# - We can safely exclude Tk/Tcl and Qt modules because our app uses wxPython.
# - We do not use ssl services so they are omitted.
# - We can safely exclude the TkAgg matplotlib backend because our app uses
# "matplotlib.use('WXAgg')" to override the default matplotlib configuration.
# - On the web it is widely recommended to exclude certain lib*.dll modules
# but this does not seem necessary any more (but adding them does not hurt).
# - Python25 requires mscvr71.dll, however, Win XP includes this file.
# - Since we do not support Win 9x systems, w9xpopen.dll is not needed.
# - For some reason cygwin1.dll gets included by default, but it is not needed.
excludes = ['Tkinter', 'PyQt4', '_ssl', '_tkagg']
dll_excludes = ['libgdk_pixbuf-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgdk-win32-2.0-0.dll',
'tcl84.dll',
'tk84.dll',
'QtGui4.dll',
'QtCore4.dll',
'msvcr71.dll',
'msvcp90.dll',
'w9xpopen.exe',
'cygwin1.dll']
class Target():
"""This class stores metadata about the distribution in a dictionary."""
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = version
client = Target(
name = 'DiRefl',
description = 'Direct Inversion Reflectometry (DiRefl) application',
script = 'bin/direfl.py', # module to run on application start
dest_base = 'direfl', # file name part of the exe file to create
icon_resources = [(1, 'direfl.ico')], # also need to specify in data_files
bitmap_resources = [],
other_resources = [(24, 1, manifest % dict(prog='DiRefl'))] )
# Now we do the work to create a standalone distribution using py2exe.
#
# When the application is run in console mode, a console window will be created
# to receive any logging or error messages and the application will then create
# a separate GUI application window.
#
# When the application is run in windows mode, it will create a GUI application
# window and no console window will be provided. Output to stderr will be
# written to <app-image-name>.log.
setup(
#console=[client],
windows=[client],
options={'py2exe': {
'packages': packages,
'includes': includes,
'excludes': excludes,
'dll_excludes': dll_excludes,
'compressed': 1, # standard compression
'optimize': 0, # no byte-code optimization
'dist_dir': "dist",# where to put py2exe results
'xref': False, # display cross reference (as html doc)
'bundle_files': 1 # bundle python25.dll in library.zip
}
},
#zipfile=None, # None means bundle library.zip in exe
data_files=data_files # list of files to copy to dist directory
)
#==============================================================================
# This section is for reference only when using older versions of matplotlib.
# The location of mpl-data files has changed across releases of matplotlib.
# Furthermore, matplotlib.get_py2exe_datafiles() had problems prior to version
# 0.99 (see link below for details), so alternative ways had to be used.
# The various techniques shown below for obtaining matplotlib auxiliary files
# (and previously used by this project) was adapted from the examples and
# discussion on http://www.py2exe.org/index.cgi/MatPlotLib.
#
# The following technique worked for matplotlib 0.91.2.
# Note that glob '*.*' will not find files that have no file extension.
'''
import glob
data_files = []
matplotlibdatadir = matplotlib.get_data_path()
mpl_lst = ('mpl-data', glob.glob(os.path.join(matplotlibdatadir, '*.*')))
data_files.append(mpl_lst)
mpl_lst = ('mpl-data', [os.path.join(matplotlibdatadir, 'matplotlibrc')])
data_files.append(mpl_lst) # pickup file missed by glob
mpl_lst = (r'mpl-data\fonts',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\images',
glob.glob(os.path.join(matplotlibdatadir, r'images\*.*')))
data_files.append(mpl_lst)
'''
# The following technique worked for matplotlib 0.98.5.
# Note that glob '*.*' will not find files that have no file extension.
'''
import glob
data_files = []
matplotlibdatadir = matplotlib.get_data_path()
mpl_lst = ('mpl-data', glob.glob(os.path.join(matplotlibdatadir, '*.*')))
data_files.append(mpl_lst)
mpl_lst = ('mpl-data', [os.path.join(matplotlibdatadir, 'matplotlibrc')])
data_files.append(mpl_lst) # pickup file missed by glob
mpl_lst = (r'mpl-data\fonts\afm',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\afm\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\fonts\pdfcorefonts',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\pdfcorefonts\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\fonts\ttf',
glob.glob(os.path.join(matplotlibdatadir, r'fonts\ttf\*.*')))
data_files.append(mpl_lst)
mpl_lst = (r'mpl-data\images',
glob.glob(os.path.join(matplotlibdatadir, r'images\*.*')))
data_files.append(mpl_lst)
'''
# The following technique worked for matplotlib 0.98 and 0.99.
'''
from distutils.filelist import findall
data_files = []
matplotlibdatadir = matplotlib.get_data_path()
matplotlibdata = findall(matplotlibdatadir)
for f in matplotlibdata:
dirname = os.path.join('mpl-data', f[len(matplotlibdatadir)+1:])
data_files.append((os.path.split(dirname)[0], [f]))
'''
| reflectometry/direfl | setup_py2exe.py | Python | mit | 12,830 |
#!/usr/bin/python
# This script parses contents of given 'Source' files, and creates rsync
# command line to synchronize mirror
import re
import sys
# Regex to parse
regex=re.compile("^(?P<param>[a-zA-Z0-9_-]+):\s?(?P<value>.*)$")
files_regex=re.compile("(?P<md5>[a-f0-9]{32}) [0-9]+ (?P<filename>.*)")
for pkgfile in sys.argv[1:]:
if pkgfile.endswith(".gz"):
import gzip
file = gzip.open(pkgfile)
elif pkgfile.endswith(".bz2"):
import bz2
file = bz2.BZ2File(pkgfile)
else:
file = open(pkgfile)
pkg={}
cur_param=""
for line in file:
if line == "\n":
#print("----------------------------------------------------")
basedir=pkg['directory']
files=files_regex.findall(pkg['files'])
for md5, file in files:
print basedir + "/" + file
pkg={}
continue
m = regex.match(line)
if m:
cur_param = m.group("param").lower()
pkg[cur_param] = m.group("value")
elif line.startswith(" "):
# We got a multiliner continuation
pkg[cur_param] += line.lstrip()
else:
print "IMMPOSSIBIRUUUU!!!!"
sys.exit(999)
| brain461/mirror-sync | util/parseSources.py | Python | gpl-2.0 | 1,263 |
"""Implementations of various matchers for pdbuddy
Matchers provide a convinience layer for filtering trace points. If a matcher
matches a call trace, that call will be processed. Otherwise it will be
ignored.
"""
from __future__ import absolute_import
from pdbuddy.matchers.base import BaseMatcher
from pdbuddy.matchers.call import CallMatcher
from pdbuddy.matchers.filename import FilenameMatcher
from pdbuddy.matchers.line_number import LineNumberMatcher
from pdbuddy.matchers.match_any import AnyMatcher
from pdbuddy.matchers.matcher_operators import AndMatcher, BinaryMatcher, OrMatcher
from pdbuddy.matchers.function import FunctionMatcher
__all__ = [
'AndMatcher',
'AnyMatcher',
'BaseMatcher',
'BinaryMatcher',
'CallMatcher',
'FilenameMatcher',
'FunctionMatcher',
'LineNumberMatcher',
'OrMatcher',
]
| emou/pdbuddy | pdbuddy/matchers/__init__.py | Python | mit | 847 |
from django.test import TestCase
from curious.query import Parser
import humanize
class TestParserCore(TestCase):
def test_parsing_model_and_filter(self):
p = Parser('A(1)')
self.assertEquals(p.object_query, {'model': 'A', 'method': None,
'filters': [{'method': 'filter', 'kwargs': {'id': '1'}}]})
self.assertEquals(p.steps, [])
def test_parsing_kv_filters_in_steps(self):
p = Parser('A(1) B.b(a=1, b="2", c=True, d=[1,2, 3])')
self.assertEquals(p.object_query, {'model': 'A', 'method': None,
'filters': [{'method': 'filter', 'kwargs': {'id': '1'}}]})
self.assertEquals(p.steps, [{
'model': 'B', 'method': 'b',
'filters': [{'method': 'filter', 'kwargs': {'a': 1, 'b': '2', 'c': True, 'd': [1, 2, 3]}}]
}])
def test_parsing_relationship_and_filters(self):
qs = 'Blog(1) Blog.entry_set Entry.authors(name__icontains="Smith")'
q = Parser(qs)
self.assertEquals(q.object_query['model'], 'Blog')
self.assertEquals(q.object_query['method'], None)
self.assertEquals(q.object_query['filters'], [dict(method='filter', kwargs=dict(id='1'))])
self.assertEquals(q.steps, [
dict(model='Blog', method='entry_set', filters=[]),
dict(model='Entry', method='authors', filters=[
dict(method='filter', kwargs=dict(name__icontains='Smith')),
])
])
def test_parsing_multiple_filters(self):
qs = 'Entry(1) Entry.authors(name__icontains="Smith").exclude(name__icontains="Joe")'
q = Parser(qs)
self.assertEquals(q.object_query['model'], 'Entry')
self.assertEquals(q.object_query['method'], None)
self.assertEquals(q.object_query['filters'], [dict(method='filter', kwargs=dict(id='1'))])
self.assertEquals(q.steps, [dict(model='Entry', method='authors', filters=[
dict(method='filter', kwargs=dict(name__icontains='Smith')),
dict(method='exclude', kwargs=dict(name__icontains='Joe')),
])])
def test_parsing_joins(self):
qs = 'Blog(1), Blog.entry_set'
q = Parser(qs)
self.assertEquals(q.object_query['model'], 'Blog')
self.assertEquals(q.object_query['method'], None)
self.assertEquals(q.object_query['filters'], [dict(method='filter', kwargs=dict(id='1'))])
self.assertEquals(q.steps, [dict(join=True, model='Blog', method='entry_set', filters=[])])
def test_parsing_or_queries(self):
qs = 'Blog(1), (Blog.entry_set_a) | (Blog.entry_set_b)'
q = Parser(qs)
self.assertEquals(q.object_query['model'], 'Blog')
self.assertEquals(q.object_query['method'], None)
self.assertEquals(q.object_query['filters'], [dict(method='filter', kwargs=dict(id='1'))])
self.assertEquals(q.steps, [dict(join=True, orquery=[
[dict(model='Blog', method='entry_set_a', filters=[])],
[dict(model='Blog', method='entry_set_b', filters=[])],
])])
def test_parsing_sub_queries(self):
qs = 'Blog(1) (Blog.entry_set)'
q = Parser(qs)
self.assertEquals(q.object_query['model'], 'Blog')
self.assertEquals(q.object_query['method'], None)
self.assertEquals(q.object_query['filters'], [dict(method='filter', kwargs=dict(id='1'))])
self.assertEquals(q.steps, [dict(having=None, join=False, subquery=[
dict(model='Blog', method='entry_set', filters=[]),
])])
def test_parsing_sub_queries_with_plus(self):
qs = 'Blog(1) +(Blog.entry_set)'
q = Parser(qs)
self.assertEquals(q.object_query['model'], 'Blog')
self.assertEquals(q.object_query['method'], None)
self.assertEquals(q.object_query['filters'], [dict(method='filter', kwargs=dict(id='1'))])
self.assertEquals(q.steps, [dict(having='+', join=False, subquery=[
dict(model='Blog', method='entry_set', filters=[]),
])])
def test_parsing_sub_queries_with_minus(self):
qs = 'Blog(1) -(Blog.entry_set)'
q = Parser(qs)
self.assertEquals(q.object_query['model'], 'Blog')
self.assertEquals(q.object_query['method'], None)
self.assertEquals(q.object_query['filters'], [dict(method='filter', kwargs=dict(id='1'))])
self.assertEquals(q.steps, [dict(having='-', join=False, subquery=[
dict(model='Blog', method='entry_set', filters=[]),
])])
def test_parsing_left_join_sub_queries(self):
qs = 'Blog(1) ?(Blog.entry_set)'
q = Parser(qs)
self.assertEquals(q.object_query['model'], 'Blog')
self.assertEquals(q.object_query['method'], None)
self.assertEquals(q.object_query['filters'], [dict(method='filter', kwargs=dict(id='1'))])
self.assertEquals(q.steps, [dict(having='?', join=False, subquery=[
dict(model='Blog', method='entry_set', filters=[])
])])
def test_parsing_or_query_in_sub_query(self):
qs = 'Blog(1) ((Blog.entry_set_a)|(Blog.entry_set_b))'
q = Parser(qs)
self.assertEquals(q.object_query['model'], 'Blog')
self.assertEquals(q.object_query['method'], None)
self.assertEquals(q.object_query['filters'], [dict(method='filter', kwargs=dict(id='1'))])
self.assertEquals(q.steps, [
dict(having=None, join=False, subquery=[
dict(join=False, orquery=[
[dict(model='Blog', method='entry_set_a', filters=[])],
[dict(model='Blog', method='entry_set_b', filters=[])],
]),
])
])
class TestDateTimeParsing(TestCase):
def test_does_not_auto_convert_date_strings(self):
p = Parser('A(1) B.b(a="3 days ago")')
self.assertEquals(p.steps, [{'model': 'B', 'method': 'b',
'filters': [{'method': 'filter',
'kwargs': {'a': '3 days ago'}}]}])
def test_converts_date_strings_with_years(self):
p = Parser('A(1) B.b(a=t"3 years ago")')
t = p.steps[0]['filters'][0]['kwargs']['a']
t_natural = str(humanize.naturaltime(t))
self.assertEquals(t_natural, "3 years ago")
p = Parser('A(1) B.b(a=t"10 years from now")')
t = p.steps[0]['filters'][0]['kwargs']['a']
t_natural = str(humanize.naturaltime(t))
self.assertEquals(t_natural, "10 years from now")
def test_converts_date_strings_with_days(self):
p = Parser('A(1) B.b(a=t"3 days ago")')
t = p.steps[0]['filters'][0]['kwargs']['a']
t_natural = str(humanize.naturaltime(t))
self.assertEquals(t_natural, "3 days ago")
p = Parser('A(1) B.b(a=t"3 days 15 minutes from now")')
t = p.steps[0]['filters'][0]['kwargs']['a']
t_natural = str(humanize.naturaltime(t))
self.assertEquals(t_natural, "3 days from now")
def test_converts_date_strings_with_minutes(self):
p = Parser('A(1) B.b(a=t"2 minutes ago")')
t = p.steps[0]['filters'][0]['kwargs']['a']
t_natural = str(humanize.naturaltime(t))
self.assertEquals(t_natural, "2 minutes ago")
p = Parser('A(1) B.b(a=t"10 minutes 10 seconds from now")')
t = p.steps[0]['filters'][0]['kwargs']['a']
t_natural = str(humanize.naturaltime(t))
self.assertEquals(t_natural, "10 minutes from now")
def test_converts_date_strings_with_seconds(self):
p = Parser('A(1) B.b(a=t"10 seconds ago")')
t = p.steps[0]['filters'][0]['kwargs']['a']
t_natural = str(humanize.naturaltime(t))
self.assertEquals(t_natural, "10 seconds ago")
p = Parser('A(1) B.b(a=t"10 seconds from now")')
t = p.steps[0]['filters'][0]['kwargs']['a']
t_natural = str(humanize.naturaltime(t))
self.assertIn(t_natural, ["10 seconds from now", "9 seconds from now"])
def test_converts_date_strings_with_year_and_days_incorrectly(self):
p = Parser('A(1) B.b(a=t"1 year 3 days ago")')
t = p.steps[0]['filters'][0]['kwargs']['a']
t_natural = str(humanize.naturaltime(t))
self.assertIn("11 months", t_natural)
def test_converts_a_specific_date(self):
p = Parser('A(1) B.b(a=t"Aug 22nd 2014")')
t = p.steps[0]['filters'][0]['kwargs']['a']
self.assertEquals(t.year, 2014)
self.assertEquals(t.month, 8)
self.assertEquals(t.day, 22)
p = Parser('A(1) B.b(a=t"8/22/2014")')
t = p.steps[0]['filters'][0]['kwargs']['a']
self.assertEquals(t.year, 2014)
self.assertEquals(t.month, 8)
self.assertEquals(t.day, 22)
| ginkgobioworks/curious | tests/curious_tests/test_parser.py | Python | mit | 8,133 |
# So that we can construct any QObject from a string.
from PySide.QtGui import *
from qtlayoutbuilder.api.layouterror import LayoutError
from qtlayoutbuilder.lib.qtclassnameprompter import QtClassNamePrompter
class QObjectMaker(object):
"""
Able to construct (or find) the QObjects cited by each line of the
builder's input. Includes fairly rich error handling, including suggestions
of what you might have meant when it can't recognize the class you
asked for.
"""
def __init__(self, widget_and_layout_finder):
self._widget_and_layout_finder = widget_and_layout_finder
def make(self, name, type_word):
if type_word.startswith('?'):
return self._find_existing_object(name, type_word[1:])
else:
return self._instantiate_object(type_word)
# ----------------------------------------------------------------------------
# Private below
def _instantiate_object(self, type_word):
"""
Instantiates a QObject of the type specified by the name.
"""
# First see if the constructor can be found.
try:
constructor = globals()[type_word]
except KeyError:
raise LayoutError("""
Python cannot find this word in the QtGui namespace: <%s>,
Did you mean one of these:
%s
""", (type_word, self._generate_name_suggestions(type_word)))
# Deal with one special case (pity - but adding stretch to QxBoxLayout
# is too common not to support.
if type_word == 'QSpacerItem':
instance = QSpacerItem(
0, 0, QSizePolicy.Expanding, QSizePolicy.Expanding)
return instance
# Now the general case.
# Have a go at constructing it, and then make sure it is a class derived
# from QLayout or QWidget.
try:
instance = constructor()
except Exception as e:
raise LayoutError("""
Cannot instantiate one of these: <%s>.
It is supposed to be the name a a QLayout or QWidget
class, that can be used as a constructor.
The error coming back from Python is:
%s.
""", (type_word, str(e)))
# Make sure it is a QWidget or QLayout
if isinstance(instance, QWidget):
return instance
if isinstance(instance, QLayout):
return instance
raise LayoutError("""
This class name: <%s>, instantiates successfully,
but is neither a QLayout nor a QWidget.
""", type_word)
def _find_existing_object(self, name, type_word):
"""
Tries to find an already-instantiated QWidget or QLayout that is of
the class specified by the type_word provided, and which is
referenced by a
variable or attribute having the name specified by the name provided.
"""
found = self._widget_and_layout_finder.find(type_word, name)
# Object to nothing found, or duplicates found.
if len(found) == 0:
raise LayoutError("""
Cannot find any objects of class <%s>,
that are referenced by a variable or attribute
called <%s>
""", (type_word, name))
if len(found) > 1:
raise LayoutError("""
Ambiguity problem. Found more than one object of
class: <%s>, referenced by a variable or attribute
called: <%s>
""", (type_word, name))
# All is well
return found[0]
def _generate_name_suggestions(self, duff_word):
list_of_names = QtClassNamePrompter.suggest_names_similar_to_this(
duff_word)
return '\n'.join(list_of_names)
| peterhoward42/qt-layout-gen | src/qtlayoutbuilder/lib/qobjectmaker.py | Python | mit | 3,867 |
import os.path
#
# Build and add path facets from filename
#
class enhance_path(object):
def process(self, parameters=None, data=None):
if parameters is None:
parameters = {}
if data is None:
data = {}
docid = parameters['id']
filename_extension = os.path.splitext(docid)[1][1:].lower()
if filename_extension:
data['filename_extension_s'] = filename_extension
if 'facet_path_strip_prefix' in parameters:
facet_path_strip_prefix = parameters['facet_path_strip_prefix']
else:
facet_path_strip_prefix = ['file://', 'http://', 'https://']
# if begins with unwanted path prefix strip it
if facet_path_strip_prefix:
for prefix in facet_path_strip_prefix:
if docid.startswith(prefix):
docid = docid.replace(prefix, '', 1)
break
# replace backslash (i.e. windows filenames) with unix path seperator
docid = docid.replace("\\", '/')
# replace # (i.e. uri) with unix path seperator
docid = docid.replace("#", '/')
# if more than one /
docid = docid.replace("//", '/')
# split paths
path = docid.split('/')
# it's only a domain
if (len(path) == 1) or (len(path) == 2 and docid.endswith('/')):
data['path0_s'] = path[0]
else:
# it's a path
# if leading / on unix paths, split leads to first element empty, so delete it
if not path[0]:
del path[0]
i = 0
for subpath in path:
if i == len(path) - 1:
# last element, so basename/pure filename without path
if subpath: # if not ending / so empty last part after split
data['path_basename_s'] = subpath
else:
# not last path element (=filename), so part of path, not the filename at the end
data['path' + str(i) + '_s'] = subpath
i += 1
return parameters, data
| opensemanticsearch/open-semantic-etl | src/opensemanticetl/enhance_path.py | Python | gpl-3.0 | 2,155 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "learning_python.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| aminhp93/learning_python | src/manage.py | Python | mit | 813 |
# encoding: utf-8
"""
The :mod:`ast` module contains the classes comprising the Python abstract syntax tree.
All attributes ending with ``loc`` contain instances of :class:`.source.Range`
or None. All attributes ending with ``_locs`` contain lists of instances of
:class:`.source.Range` or [].
The attribute ``loc``, present in every class except those inheriting :class:`boolop`,
has a special meaning: it encompasses the entire AST node, so that it is possible
to cut the range contained inside ``loc`` of a parsetree fragment and paste it
somewhere else without altering said parsetree fragment that.
The AST format for all supported versions is generally normalized to be a superset
of the native :mod:`..ast` module of the latest supported Python version.
In particular this affects:
* :class:`With`: on 2.6-2.7 it uses the 3.0 format.
* :class:`TryExcept` and :class:`TryFinally`: on 2.6-2.7 they're replaced with
:class:`Try` from 3.0.
* :class:`arguments`: on 2.6-3.1 it uses the 3.2 format, with dedicated
:class:`arg` in ``vararg`` and ``kwarg`` slots.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# Location mixins
class commonloc(object):
"""
A mixin common for all nodes.
:cvar _locs: (tuple of strings)
names of all attributes with location values
:ivar loc: range encompassing all locations defined for this node
or its children
"""
_locs = ("loc",)
def _reprfields(self):
return self._fields + self._locs
def __repr__(self):
def value(name):
try:
loc = self.__dict__[name]
if isinstance(loc, list):
return "[%s]" % (", ".join(map(repr, loc)))
else:
return repr(loc)
except:
return "(!!!MISSING!!!)"
fields = ", ".join(map(lambda name: "%s=%s" % (name, value(name)),
self._reprfields()))
return "%s(%s)" % (self.__class__.__name__, fields)
@property
def lineno(self):
return self.loc.line()
class keywordloc(commonloc):
"""
A mixin common for all keyword statements, e.g. ``pass`` and ``yield expr``.
:ivar keyword_loc: location of the keyword, e.g. ``yield``.
"""
_locs = commonloc._locs + ("keyword_loc",)
class beginendloc(commonloc):
"""
A mixin common for nodes with a opening and closing delimiters, e.g. tuples and lists.
:ivar begin_loc: location of the opening delimiter, e.g. ``(``.
:ivar end_loc: location of the closing delimiter, e.g. ``)``.
"""
_locs = commonloc._locs + ("begin_loc", "end_loc")
# AST nodes
class AST(object):
"""
An ancestor of all nodes.
:cvar _fields: (tuple of strings)
names of all attributes with semantic values
"""
_fields = ()
def __init__(self, **fields):
for field in fields:
setattr(self, field, fields[field])
class alias(AST, commonloc):
"""
An import alias, e.g. ``x as y``.
:ivar name: (string) value to import
:ivar asname: (string) name to add to the environment
:ivar name_loc: location of name
:ivar as_loc: location of ``as``
:ivar asname_loc: location of asname
"""
_fields = ("name", "asname")
_locs = commonloc._locs + ("name_loc", "as_loc", "asname_loc")
class arg(AST, commonloc):
"""
A formal argument, e.g. in ``def f(x)`` or ``def f(x: T)``.
:ivar arg: (string) argument name
:ivar annotation: (:class:`AST`) type annotation, if any; **emitted since 3.0**
:ivar arg_loc: location of argument name
:ivar colon_loc: location of ``:``, if any; **emitted since 3.0**
"""
_fields = ("arg", "annotation")
_locs = commonloc._locs + ("arg_loc", "colon_loc")
class arguments(AST, beginendloc):
"""
Function definition arguments, e.g. in ``def f(x, y=1, *z, **t)``.
:ivar args: (list of :class:`arg`) regular formal arguments
:ivar defaults: (list of :class:`AST`) values of default arguments
:ivar vararg: (:class:`arg`) splat formal argument (if any), e.g. in ``*x``
:ivar kwonlyargs: (list of :class:`arg`) keyword-only (post-\*) formal arguments;
**emitted since 3.0**
:ivar kw_defaults: (list of :class:`AST`) values of default keyword-only arguments;
**emitted since 3.0**
:ivar kwarg: (:class:`arg`) keyword splat formal argument (if any), e.g. in ``**x``
:ivar star_loc: location of ``*``, if any
:ivar dstar_loc: location of ``**``, if any
:ivar equals_locs: locations of ``=``
:ivar kw_equals_locs: locations of ``=`` of default keyword-only arguments;
**emitted since 3.0**
"""
_fields = ("args", "vararg", "kwonlyargs", "kwarg", "defaults", "kw_defaults")
_locs = beginendloc._locs + ("star_loc", "dstar_loc", "equals_locs", "kw_equals_locs")
class boolop(AST, commonloc):
"""
Base class for binary boolean operators.
This class is unlike others in that it does not have the ``loc`` field.
It serves only as an indicator of operation and corresponds to no source
itself; locations are recorded in :class:`BoolOp`.
"""
_locs = ()
class And(boolop):
"""The ``and`` operator."""
class Or(boolop):
"""The ``or`` operator."""
class cmpop(AST, commonloc):
"""Base class for comparison operators."""
class Eq(cmpop):
"""The ``==`` operator."""
class Gt(cmpop):
"""The ``>`` operator."""
class GtE(cmpop):
"""The ``>=`` operator."""
class In(cmpop):
"""The ``in`` operator."""
class Is(cmpop):
"""The ``is`` operator."""
class IsNot(cmpop):
"""The ``is not`` operator."""
class Lt(cmpop):
"""The ``<`` operator."""
class LtE(cmpop):
"""The ``<=`` operator."""
class NotEq(cmpop):
"""The ``!=`` (or deprecated ``<>``) operator."""
class NotIn(cmpop):
"""The ``not in`` operator."""
class comprehension(AST, commonloc):
"""
A single ``for`` list comprehension clause.
:ivar target: (assignable :class:`AST`) the variable(s) bound in comprehension body
:ivar iter: (:class:`AST`) the expression being iterated
:ivar ifs: (list of :class:`AST`) the ``if`` clauses
:ivar for_loc: location of the ``for`` keyword
:ivar in_loc: location of the ``in`` keyword
:ivar if_locs: locations of ``if`` keywords
"""
_fields = ("target", "iter", "ifs")
_locs = commonloc._locs + ("for_loc", "in_loc", "if_locs")
class excepthandler(AST, commonloc):
"""Base class for the exception handler."""
class ExceptHandler(excepthandler):
"""
An exception handler, e.g. ``except x as y:· z``.
:ivar type: (:class:`AST`) type of handled exception, if any
:ivar name: (assignable :class:`AST` **until 3.0**, string **since 3.0**)
variable bound to exception, if any
:ivar body: (list of :class:`AST`) code to execute when exception is caught
:ivar except_loc: location of ``except``
:ivar as_loc: location of ``as``, if any
:ivar name_loc: location of variable name
:ivar colon_loc: location of ``:``
"""
_fields = ("type", "name", "body")
_locs = excepthandler._locs + ("except_loc", "as_loc", "name_loc", "colon_loc")
class expr(AST, commonloc):
"""Base class for expression nodes."""
class Attribute(expr):
"""
An attribute access, e.g. ``x.y``.
:ivar value: (:class:`AST`) left-hand side
:ivar attr: (string) attribute name
"""
_fields = ("value", "attr", "ctx")
_locs = expr._locs + ("dot_loc", "attr_loc")
class BinOp(expr):
"""
A binary operation, e.g. ``x + y``.
:ivar left: (:class:`AST`) left-hand side
:ivar op: (:class:`operator`) operator
:ivar right: (:class:`AST`) right-hand side
"""
_fields = ("left", "op", "right")
class BoolOp(expr):
"""
A boolean operation, e.g. ``x and y``.
:ivar op: (:class:`boolop`) operator
:ivar values: (list of :class:`AST`) operands
:ivar op_locs: locations of operators
"""
_fields = ("op", "values")
_locs = expr._locs + ("op_locs",)
class Call(expr, beginendloc):
"""
A function call, e.g. ``f(x, y=1, *z, **t)``.
:ivar func: (:class:`AST`) function to call
:ivar args: (list of :class:`AST`) regular arguments
:ivar keywords: (list of :class:`keyword`) keyword arguments
:ivar starargs: (:class:`AST`) splat argument (if any), e.g. in ``*x``
:ivar kwargs: (:class:`AST`) keyword splat argument (if any), e.g. in ``**x``
:ivar star_loc: location of ``*``, if any
:ivar dstar_loc: location of ``**``, if any
"""
_fields = ("func", "args", "keywords", "starargs", "kwargs")
_locs = beginendloc._locs + ("star_loc", "dstar_loc")
class Compare(expr):
"""
A comparison operation, e.g. ``x < y`` or ``x < y > z``.
:ivar left: (:class:`AST`) left-hand
:ivar ops: (list of :class:`cmpop`) compare operators
:ivar comparators: (list of :class:`AST`) compare values
"""
_fields = ("left", "ops", "comparators")
class Dict(expr, beginendloc):
"""
A dictionary, e.g. ``{x: y}``.
:ivar keys: (list of :class:`AST`) keys
:ivar values: (list of :class:`AST`) values
:ivar colon_locs: locations of ``:``
"""
_fields = ("keys", "values")
_locs = beginendloc._locs + ("colon_locs",)
class DictComp(expr, beginendloc):
"""
A list comprehension, e.g. ``{x: y for x,y in z}``.
**Emitted since 2.7.**
:ivar key: (:class:`AST`) key part of comprehension body
:ivar value: (:class:`AST`) value part of comprehension body
:ivar generators: (list of :class:`comprehension`) ``for`` clauses
:ivar colon_loc: location of ``:``
"""
_fields = ("key", "value", "generators")
_locs = beginendloc._locs + ("colon_loc",)
class Ellipsis(expr):
"""The ellipsis, e.g. in ``x[...]``."""
class GeneratorExp(expr, beginendloc):
"""
A generator expression, e.g. ``(x for x in y)``.
:ivar elt: (:class:`AST`) expression body
:ivar generators: (list of :class:`comprehension`) ``for`` clauses
"""
_fields = ("elt", "generators")
class IfExp(expr):
"""
A conditional expression, e.g. ``x if y else z``.
:ivar test: (:class:`AST`) condition
:ivar body: (:class:`AST`) value if true
:ivar orelse: (:class:`AST`) value if false
:ivar if_loc: location of ``if``
:ivar else_loc: location of ``else``
"""
_fields = ("test", "body", "orelse")
_locs = expr._locs + ("if_loc", "else_loc")
class Lambda(expr):
"""
A lambda expression, e.g. ``lambda x: x*x``.
:ivar args: (:class:`arguments`) arguments
:ivar body: (:class:`AST`) body
:ivar lambda_loc: location of ``lambda``
:ivar colon_loc: location of ``:``
"""
_fields = ("args", "body")
_locs = expr._locs + ("lambda_loc", "colon_loc")
class List(expr, beginendloc):
"""
A list, e.g. ``[x, y]``.
:ivar elts: (list of :class:`AST`) elements
"""
_fields = ("elts", "ctx")
class ListComp(expr, beginendloc):
"""
A list comprehension, e.g. ``[x for x in y]``.
:ivar elt: (:class:`AST`) comprehension body
:ivar generators: (list of :class:`comprehension`) ``for`` clauses
"""
_fields = ("elt", "generators")
class Name(expr):
"""
An identifier, e.g. ``x``.
:ivar id: (string) name
"""
_fields = ("id", "ctx")
class NameConstant(expr):
"""
A named constant, e.g. ``None``.
:ivar value: Python value, one of ``None``, ``True`` or ``False``
"""
_fields = ("value",)
class Num(expr):
"""
An integer, floating point or complex number, e.g. ``1``, ``1.0`` or ``1.0j``.
:ivar n: (int, float or complex) value
"""
_fields = ("n",)
class Repr(expr, beginendloc):
"""
A repr operation, e.g. ``\`x\```
**Emitted until 3.0.**
:ivar value: (:class:`AST`) value
"""
_fields = ("value",)
class Set(expr, beginendloc):
"""
A set, e.g. ``{x, y}``.
**Emitted since 2.7.**
:ivar elts: (list of :class:`AST`) elements
"""
_fields = ("elts",)
class SetComp(expr, beginendloc):
"""
A set comprehension, e.g. ``{x for x in y}``.
**Emitted since 2.7.**
:ivar elt: (:class:`AST`) comprehension body
:ivar generators: (list of :class:`comprehension`) ``for`` clauses
"""
_fields = ("elt", "generators")
class Str(expr, beginendloc):
"""
A string, e.g. ``"x"``.
:ivar s: (string) value
"""
_fields = ("s",)
class Starred(expr):
"""
A starred expression, e.g. ``*x`` in ``*x, y = z``.
:ivar value: (:class:`AST`) expression
:ivar star_loc: location of ``*``
"""
_fields = ("value", "ctx")
_locs = expr._locs + ("star_loc",)
class Subscript(expr, beginendloc):
"""
A subscript operation, e.g. ``x[1]``.
:ivar value: (:class:`AST`) object being sliced
:ivar slice: (:class:`slice`) slice
"""
_fields = ("value", "slice", "ctx")
class Tuple(expr, beginendloc):
"""
A tuple, e.g. ``(x,)`` or ``x,y``.
:ivar elts: (list of nodes) elements
"""
_fields = ("elts", "ctx")
class UnaryOp(expr):
"""
An unary operation, e.g. ``+x``.
:ivar op: (:class:`unaryop`) operator
:ivar operand: (:class:`AST`) operand
"""
_fields = ("op", "operand")
class Yield(expr):
"""
A yield expression, e.g. ``yield x``.
:ivar value: (:class:`AST`) yielded value
:ivar yield_loc: location of ``yield``
"""
_fields = ("value",)
_locs = expr._locs + ("yield_loc",)
class YieldFrom(expr):
"""
A yield from expression, e.g. ``yield from x``.
:ivar value: (:class:`AST`) yielded value
:ivar yield_loc: location of ``yield``
:ivar from_loc: location of ``from``
"""
_fields = ("value",)
_locs = expr._locs + ("yield_loc", "from_loc")
# expr_context
# AugLoad
# AugStore
# Del
# Load
# Param
# Store
class keyword(AST, commonloc):
"""
A keyword actual argument, e.g. in ``f(x=1)``.
:ivar arg: (string) name
:ivar value: (:class:`AST`) value
:ivar equals_loc: location of ``=``
"""
_fields = ("arg", "value")
_locs = commonloc._locs + ("arg_loc", "equals_loc")
class mod(AST, commonloc):
"""Base class for modules (groups of statements)."""
_fields = ("body",)
class Expression(mod):
"""A group of statements parsed as if for :func:`eval`."""
class Interactive(mod):
"""A group of statements parsed as if it was REPL input."""
class Module(mod):
"""A group of statements parsed as if it was a file."""
class operator(AST, commonloc):
"""Base class for numeric binary operators."""
class Add(operator):
"""The ``+`` operator."""
class BitAnd(operator):
"""The ``&`` operator."""
class BitOr(operator):
"""The ``|`` operator."""
class BitXor(operator):
"""The ``^`` operator."""
class Div(operator):
"""The ``\\`` operator."""
class FloorDiv(operator):
"""The ``\\\\`` operator."""
class LShift(operator):
"""The ``<<`` operator."""
class MatMult(operator):
"""The ``@`` operator."""
class Mod(operator):
"""The ``%`` operator."""
class Mult(operator):
"""The ``*`` operator."""
class Pow(operator):
"""The ``**`` operator."""
class RShift(operator):
"""The ``>>`` operator."""
class Sub(operator):
"""The ``-`` operator."""
class slice(AST, commonloc):
"""Base class for slice operations."""
class ExtSlice(slice):
"""
The multiple slice, e.g. in ``x[0:1, 2:3]``.
Note that multiple slices with only integer indexes
will appear as instances of :class:`Index`.
:ivar dims: (:class:`slice`) sub-slices
"""
_fields = ("dims",)
class Index(slice):
"""
The index, e.g. in ``x[1]`` or ``x[1, 2]``.
:ivar value: (:class:`AST`) index
"""
_fields = ("value",)
class Slice(slice):
"""
The slice, e.g. in ``x[0:1]`` or ``x[0:1:2]``.
:ivar lower: (:class:`AST`) lower bound, if any
:ivar upper: (:class:`AST`) upper bound, if any
:ivar step: (:class:`AST`) iteration step, if any
:ivar bound_colon_loc: location of first semicolon
:ivar step_colon_loc: location of second semicolon, if any
"""
_fields = ("lower", "upper", "step")
_locs = slice._locs + ("bound_colon_loc", "step_colon_loc")
class stmt(AST, commonloc):
"""Base class for statement nodes."""
class Assert(stmt, keywordloc):
"""
The ``assert x, msg`` statement.
:ivar test: (:class:`AST`) condition
:ivar msg: (:class:`AST`) message, if any
"""
_fields = ("test", "msg")
class Assign(stmt):
"""
The ``=`` statement, e.g. in ``x = 1`` or ``x = y = 1``.
:ivar targets: (list of assignable :class:`AST`) left-hand sides
:ivar value: (:class:`AST`) right-hand side
:ivar op_locs: location of equality signs corresponding to ``targets``
"""
_fields = ("targets", "value")
_locs = stmt._locs + ("op_locs",)
class AugAssign(stmt):
"""
The operator-assignment statement, e.g. ``+=``.
:ivar target: (assignable :class:`AST`) left-hand side
:ivar op: (:class:`operator`) operator
:ivar value: (:class:`AST`) right-hand side
"""
_fields = ("target", "op", "value")
class Break(stmt, keywordloc):
"""The ``break`` statement."""
class ClassDef(stmt, keywordloc):
"""
The ``class x(z, y):· t`` (2.6) or
``class x(y, z=1, *t, **u):· v`` (3.0) statement.
:ivar name: (string) name
:ivar bases: (list of :class:`AST`) base classes
:ivar keywords: (list of :class:`keyword`) keyword arguments; **emitted since 3.0**
:ivar starargs: (:class:`AST`) splat argument (if any), e.g. in ``*x``; **emitted since 3.0**
:ivar kwargs: (:class:`AST`) keyword splat argument (if any), e.g. in ``**x``; **emitted since 3.0**
:ivar body: (list of :class:`AST`) body
:ivar decorator_list: (list of :class:`AST`) decorators
:ivar keyword_loc: location of ``class``
:ivar name_loc: location of name
:ivar lparen_loc: location of ``(``, if any
:ivar star_loc: location of ``*``, if any; **emitted since 3.0**
:ivar dstar_loc: location of ``**``, if any; **emitted since 3.0**
:ivar rparen_loc: location of ``)``, if any
:ivar colon_loc: location of ``:``
:ivar at_locs: locations of decorator ``@``
"""
_fields = ("name", "bases", "keywords", "starargs", "kwargs", "body", "decorator_list")
_locs = keywordloc._locs + ("name_loc", "lparen_loc", "star_loc", "dstar_loc", "rparen_loc",
"colon_loc", "at_locs")
class Continue(stmt, keywordloc):
"""The ``continue`` statement."""
class Delete(stmt, keywordloc):
"""
The ``del x, y`` statement.
:ivar targets: (list of :class:`Name`)
"""
_fields = ("targets",)
class Exec(stmt, keywordloc):
"""
The ``exec code in locals, globals`` statement.
**Emitted until 3.0.**
:ivar body: (:class:`AST`) code
:ivar locals: (:class:`AST`) locals
:ivar globals: (:class:`AST`) globals
:ivar keyword_loc: location of ``exec``
:ivar in_loc: location of ``in``
"""
_fields = ("body", "locals", "globals")
_locs = keywordloc._locs + ("in_loc",)
class Expr(stmt):
"""
An expression in statement context. The value of expression is discarded.
:ivar value: (:class:`expr`) value
"""
_fields = ("value",)
class For(stmt, keywordloc):
"""
The ``for x in y:· z·else:· t`` statement.
:ivar target: (assignable :class:`AST`) loop variable
:ivar iter: (:class:`AST`) loop collection
:ivar body: (list of :class:`AST`) code for every iteration
:ivar orelse: (list of :class:`AST`) code if empty
:ivar keyword_loc: location of ``for``
:ivar in_loc: location of ``in``
:ivar for_colon_loc: location of colon after ``for``
:ivar else_loc: location of ``else``, if any
:ivar else_colon_loc: location of colon after ``else``, if any
"""
_fields = ("target", "iter", "body", "orelse")
_locs = keywordloc._locs + ("in_loc", "for_colon_loc", "else_loc", "else_colon_loc")
class FunctionDef(stmt, keywordloc):
"""
The ``def f(x):· y`` (2.6) or ``def f(x) -> t:· y`` (3.0) statement.
:ivar name: (string) name
:ivar args: (:class:`arguments`) formal arguments
:ivar returns: (:class:`AST`) return type annotation; **emitted since 3.0**
:ivar body: (list of :class:`AST`) body
:ivar decorator_list: (list of :class:`AST`) decorators
:ivar keyword_loc: location of ``def``
:ivar name_loc: location of name
:ivar arrow_loc: location of ``->``, if any; **emitted since 3.0**
:ivar colon_loc: location of ``:``, if any
:ivar at_locs: locations of decorator ``@``
"""
_fields = ("name", "args", "returns", "body", "decorator_list")
_locs = keywordloc._locs + ("name_loc", "arrow_loc", "colon_loc", "at_locs")
class Global(stmt, keywordloc):
"""
The ``global x, y`` statement.
:ivar names: (list of string) names
:ivar name_locs: locations of names
"""
_fields = ("names",)
_locs = keywordloc._locs + ("name_locs",)
class If(stmt, keywordloc):
"""
The ``if x:· y·else:· z`` or ``if x:· y·elif: z· t`` statement.
:ivar test: (:class:`AST`) condition
:ivar body: (list of :class:`AST`) code if true
:ivar orelse: (list of :class:`AST`) code if false
:ivar if_colon_loc: location of colon after ``if`` or ``elif``
:ivar else_loc: location of ``else``, if any
:ivar else_colon_loc: location of colon after ``else``, if any
"""
_fields = ("test", "body", "orelse")
_locs = keywordloc._locs + ("if_colon_loc", "else_loc", "else_colon_loc")
class Import(stmt, keywordloc):
"""
The ``import x, y`` statement.
:ivar names: (list of :class:`alias`) names
"""
_fields = ("names",)
class ImportFrom(stmt, keywordloc):
"""
The ``from ...x import y, z`` or ``from x import (y, z)`` or
``from x import *`` statement.
:ivar names: (list of :class:`alias`) names
:ivar module: (string) module name, if any
:ivar level: (integer) amount of dots before module name
:ivar keyword_loc: location of ``from``
:ivar dots_loc: location of dots, if any
:ivar module_loc: location of module name, if any
:ivar import_loc: location of ``import``
:ivar lparen_loc: location of ``(``, if any
:ivar rparen_loc: location of ``)``, if any
"""
_fields = ("names", "module", "level")
_locs = keywordloc._locs + ("dots_loc", "module_loc", "import_loc", "lparen_loc", "rparen_loc")
class Nonlocal(stmt, keywordloc):
"""
The ``nonlocal x, y`` statement.
**Emitted since 3.0.**
:ivar names: (list of string) names
:ivar name_locs: locations of names
"""
_fields = ("names",)
_locs = keywordloc._locs + ("name_locs",)
class Pass(stmt, keywordloc):
"""The ``pass`` statement."""
class Print(stmt, keywordloc):
"""
The ``print >>x, y, z,`` statement.
**Emitted until 3.0 or until print_function future flag is activated.**
:ivar dest: (:class:`AST`) destination stream, if any
:ivar values: (list of :class:`AST`) values to print
:ivar nl: (boolean) whether to print newline after values
:ivar dest_loc: location of ``>>``
"""
_fields = ("dest", "values", "nl")
_locs = keywordloc._locs + ("dest_loc",)
class Raise(stmt, keywordloc):
"""
The ``raise exc, arg, traceback`` (2.6) or
or ``raise exc from cause`` (3.0) statement.
:ivar exc: (:class:`AST`) exception type or instance
:ivar cause: (:class:`AST`) cause of exception, if any; **emitted since 3.0**
:ivar inst: (:class:`AST`) exception instance or argument list, if any; **emitted until 3.0**
:ivar tback: (:class:`AST`) traceback, if any; **emitted until 3.0**
:ivar from_loc: location of ``from``, if any; **emitted since 3.0**
"""
_fields = ("exc", "cause", "inst", "tback")
_locs = keywordloc._locs + ("from_loc",)
class Return(stmt, keywordloc):
"""
The ``return x`` statement.
:ivar value: (:class:`AST`) return value, if any
"""
_fields = ("value",)
class Try(stmt, keywordloc):
"""
The ``try:· x·except y:· z·else:· t`` or
``try:· x·finally:· y`` statement.
:ivar body: (list of :class:`AST`) code to try
:ivar handlers: (list of :class:`ExceptHandler`) exception handlers
:ivar orelse: (list of :class:`AST`) code if no exception
:ivar finalbody: (list of :class:`AST`) code to finalize
:ivar keyword_loc: location of ``try``
:ivar try_colon_loc: location of ``:`` after ``try``
:ivar else_loc: location of ``else``
:ivar else_colon_loc: location of ``:`` after ``else``
:ivar finally_loc: location of ``finally``
:ivar finally_colon_loc: location of ``:`` after ``finally``
"""
_fields = ("body", "handlers", "orelse", "finalbody")
_locs = keywordloc._locs + ("try_colon_loc", "else_loc", "else_colon_loc",
"finally_loc", "finally_colon_loc",)
class While(stmt, keywordloc):
"""
The ``while x:· y·else:· z`` statement.
:ivar test: (:class:`AST`) condition
:ivar body: (list of :class:`AST`) code for every iteration
:ivar orelse: (list of :class:`AST`) code if empty
:ivar keyword_loc: location of ``while``
:ivar while_colon_loc: location of colon after ``while``
:ivar else_loc: location of ``else``, if any
:ivar else_colon_loc: location of colon after ``else``, if any
"""
_fields = ("test", "body", "orelse")
_locs = keywordloc._locs + ("while_colon_loc", "else_loc", "else_colon_loc")
class With(stmt, keywordloc):
"""
The ``with x as y:· z`` statement.
:ivar items: (list of :class:`withitem`) bindings
:ivar body: (:class:`AST`) body
:ivar keyword_loc: location of ``with``
:ivar colon_loc: location of ``:``
"""
_fields = ("items", "body")
_locs = keywordloc._locs + ("colon_loc",)
class unaryop(AST, commonloc):
"""Base class for unary numeric and boolean operators."""
class Invert(unaryop):
"""The ``~`` operator."""
class Not(unaryop):
"""The ``not`` operator."""
class UAdd(unaryop):
"""The unary ``+`` operator."""
class USub(unaryop):
"""The unary ``-`` operator."""
class withitem(AST, commonloc):
"""
The ``x as y`` clause in ``with x as y:``.
:ivar context_expr: (:class:`AST`) context
:ivar optional_vars: (assignable :class:`AST`) context binding, if any
:ivar as_loc: location of ``as``, if any
"""
_fields = ("context_expr", "optional_vars")
_locs = commonloc._locs + ("as_loc",)
| google/grumpy | third_party/pythonparser/ast.py | Python | apache-2.0 | 26,727 |
__author__ = 'paulo.rodenas'
from models.Cnpj import Cnpj
for idx in xrange(0, 100):
from_idx = idx * 1000000
to_idx = (idx + 1) * 1000000
file_name = 'cnpjs_base_' + str(from_idx).zfill(7) + '.txt'
print 'Generating from', from_idx, 'to', to_idx, file_name
f = open(file_name, 'w')
for cnpj_idx in xrange(from_idx, to_idx):
cnpj_zero_padding = str(cnpj_idx).zfill(8)
cnpj_zero_padding += '0001'
last_two_digits = Cnpj.calculate_last_digits(cnpj_zero_padding)
cnpj_zero_padding += ''.join(last_two_digits)
f.write(cnpj_zero_padding + '\n')
f.flush()
f.close() | nfscan/fuzzy_cnpj_matcher | bulk/generate_cnpj_base_small.py | Python | mit | 644 |
#!/usr/bin/env python
# encoding: utf-8
class ApiException(Exception):
def __init__(self, api_response, previous=None):
self.__apiResponse = api_response
message = previous.message if previous and hasattr(previous, 'message') else 'Unknown error'
status = 0 # previous.status if previous else 0
if api_response:
if api_response.error():
message = api_response.error()
if api_response.response() and api_response.response().status_code:
status = api_response.response().status_code
super(ApiException, self).__init__(message)
def api_response(self):
return self.__apiResponse | ringcentral/ringcentral-python | ringcentral/http/api_exception.py | Python | mit | 695 |
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the eos_lacp_interfaces module
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class Lacp_interfacesArgs(object):
"""The arg spec for the eos_lacp_interfaces module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
'config': {'elements': 'dict',
'options': {'name': {'type': 'str'},
'port_priority': {'type': 'int'},
'rate': {'choices': ['fast', 'normal'], 'type': 'str'}},
'type': 'list'},
'state': {'choices': ['merged', 'replaced', 'overridden', 'deleted'],
'default': 'merged',
'type': 'str'}}
| thaim/ansible | lib/ansible/module_utils/network/eos/argspec/lacp_interfaces/lacp_interfaces.py | Python | mit | 1,399 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EventsPageViewInfo(Model):
"""The page view information.
:param name: The name of the page
:type name: str
:param url: The URL of the page
:type url: str
:param duration: The duration of the page view
:type duration: str
:param performance_bucket: The performance bucket of the page view
:type performance_bucket: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'duration': {'key': 'duration', 'type': 'str'},
'performance_bucket': {'key': 'performanceBucket', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EventsPageViewInfo, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.url = kwargs.get('url', None)
self.duration = kwargs.get('duration', None)
self.performance_bucket = kwargs.get('performance_bucket', None)
| Azure/azure-sdk-for-python | sdk/applicationinsights/azure-applicationinsights/azure/applicationinsights/models/events_page_view_info.py | Python | mit | 1,447 |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^codeparser/', include('codeparser.urls')),
]
| CWelshE/dataquest | django_coderunner/urls.py | Python | mit | 148 |
"""Application settings."""
from os import environ as _env
DEBUG = _env.get('DEBUG', '').lower() == 'true'
SECRET_KEY = _env.get('SECRET_KEY', 'You need to define a secret key')
# Miscellaneous
GOOGLE_ANALYTICS_ACCOUNT = _env.get('GOOGLE_ANALYTICS_ACCOUNT')
GOOGLE_ANALYTICS_DOMAIN = _env.get('GOOGLE_ANALYTICS_DOMAIN')
# Flask-Mail
MAIL_SERVER = _env.get('MAIL_SERVER', 'localhost')
MAIL_PORT = int(_env.get('MAIL_PORT', 25))
MAIL_USERNAME = _env.get('MAIL_USERNAME')
MAIL_PASSWORD = _env.get('MAIL_PASSWORD')
MAIL_USE_SSL = _env.get('MAIL_USE_SSL', '').lower() != 'false'
MAIL_USE_TLS = _env.get('MAIL_USE_TLS', '').lower() != 'false'
MAIL_DEBUG = _env.get('MAIL_DEBUG', '').lower() == 'true'
DEFAULT_MAIL_SENDER = _env.get('DEFAULT_MAIL_SENDER')
# Flask-Security
SECURITY_CHANGEABLE = True
SECURITY_DEFAULT_REMEMBER_ME = True
SECURITY_EMAIL_SENDER = DEFAULT_MAIL_SENDER
SECURITY_EMAIL_SUBJECT_REGISTER = 'Welcome to Secret Santa'
SECURITY_PASSWORD_HASH = 'pbkdf2_sha512'
SECURITY_PASSWORD_SALT = _env.get('SECURITY_PASSWORD_SALT', 'Salt goes here')
SECURITY_POST_CHANGE_VIEW = 'users.profile'
SECURITY_RECOVERABLE = True
SECURITY_REGISTERABLE = False
SECURITY_TRACKABLE = True
# Flask-SQLAlchemy
SQLALCHEMY_DATABASE_URI = _env.get('DATABASE_URL')
| dirn/Secret-Santa | xmas/settings.py | Python | bsd-3-clause | 1,256 |
# -*- coding: utf-8 -*-
"""Tests of Beautiful Soup as a whole."""
from pdb import set_trace
import logging
import unittest
import sys
import tempfile
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.builder import (
TreeBuilder,
ParserRejectedMarkup,
)
from bs4.element import (
CharsetMetaAttributeValue,
Comment,
ContentMetaAttributeValue,
SoupStrainer,
NamespacedAttribute,
Tag,
NavigableString,
)
import bs4.dammit
from bs4.dammit import (
EntitySubstitution,
UnicodeDammit,
EncodingDetector,
)
from bs4.testing import (
default_builder,
SoupTest,
skipIf,
)
import warnings
try:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
LXML_PRESENT = True
except ImportError as e:
LXML_PRESENT = False
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
class TestConstructor(SoupTest):
def test_short_unicode_input(self):
data = "<h1>éé</h1>"
soup = self.soup(data)
self.assertEqual("éé", soup.h1.string)
def test_embedded_null(self):
data = "<h1>foo\0bar</h1>"
soup = self.soup(data)
self.assertEqual("foo\0bar", soup.h1.string)
def test_exclude_encodings(self):
utf8_data = "Räksmörgås".encode("utf-8")
soup = self.soup(utf8_data, exclude_encodings=["utf-8"])
self.assertEqual("windows-1252", soup.original_encoding)
def test_custom_builder_class(self):
# Verify that you can pass in a custom Builder class and
# it'll be instantiated with the appropriate keyword arguments.
class Mock(object):
def __init__(self, **kwargs):
self.called_with = kwargs
self.is_xml = True
self.store_line_numbers = False
self.cdata_list_attributes = []
self.preserve_whitespace_tags = []
self.string_containers = {}
def initialize_soup(self, soup):
pass
def feed(self, markup):
self.fed = markup
def reset(self):
pass
def ignore(self, ignore):
pass
set_up_substitutions = can_be_empty_element = ignore
def prepare_markup(self, *args, **kwargs):
yield "prepared markup", "original encoding", "declared encoding", "contains replacement characters"
kwargs = dict(
var="value",
# This is a deprecated BS3-era keyword argument, which
# will be stripped out.
convertEntities=True,
)
with warnings.catch_warnings(record=True):
soup = BeautifulSoup('', builder=Mock, **kwargs)
assert isinstance(soup.builder, Mock)
self.assertEqual(dict(var="value"), soup.builder.called_with)
self.assertEqual("prepared markup", soup.builder.fed)
# You can also instantiate the TreeBuilder yourself. In this
# case, that specific object is used and any keyword arguments
# to the BeautifulSoup constructor are ignored.
builder = Mock(**kwargs)
with warnings.catch_warnings(record=True) as w:
soup = BeautifulSoup(
'', builder=builder, ignored_value=True,
)
msg = str(w[0].message)
assert msg.startswith("Keyword arguments to the BeautifulSoup constructor will be ignored.")
self.assertEqual(builder, soup.builder)
self.assertEqual(kwargs, builder.called_with)
def test_parser_markup_rejection(self):
# If markup is completely rejected by the parser, an
# explanatory ParserRejectedMarkup exception is raised.
class Mock(TreeBuilder):
def feed(self, *args, **kwargs):
raise ParserRejectedMarkup("Nope.")
def prepare_markup(self, *args, **kwargs):
# We're going to try two different ways of preparing this markup,
# but feed() will reject both of them.
yield markup, None, None, False
yield markup, None, None, False
import re
self.assertRaisesRegex(
ParserRejectedMarkup,
"The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.",
BeautifulSoup, '', builder=Mock,
)
def test_cdata_list_attributes(self):
# Most attribute values are represented as scalars, but the
# HTML standard says that some attributes, like 'class' have
# space-separated lists as values.
markup = '<a id=" an id " class=" a class "></a>'
soup = self.soup(markup)
# Note that the spaces are stripped for 'class' but not for 'id'.
a = soup.a
self.assertEqual(" an id ", a['id'])
self.assertEqual(["a", "class"], a['class'])
# TreeBuilder takes an argument called 'mutli_valued_attributes' which lets
# you customize or disable this. As always, you can customize the TreeBuilder
# by passing in a keyword argument to the BeautifulSoup constructor.
soup = self.soup(markup, builder=default_builder, multi_valued_attributes=None)
self.assertEqual(" a class ", soup.a['class'])
# Here are two ways of saying that `id` is a multi-valued
# attribute in this context, but 'class' is not.
for switcheroo in ({'*': 'id'}, {'a': 'id'}):
with warnings.catch_warnings(record=True) as w:
# This will create a warning about not explicitly
# specifying a parser, but we'll ignore it.
soup = self.soup(markup, builder=None, multi_valued_attributes=switcheroo)
a = soup.a
self.assertEqual(["an", "id"], a['id'])
self.assertEqual(" a class ", a['class'])
def test_replacement_classes(self):
# Test the ability to pass in replacements for element classes
# which will be used when building the tree.
class TagPlus(Tag):
pass
class StringPlus(NavigableString):
pass
class CommentPlus(Comment):
pass
soup = self.soup(
"<a><b>foo</b>bar</a><!--whee-->",
element_classes = {
Tag: TagPlus,
NavigableString: StringPlus,
Comment: CommentPlus,
}
)
# The tree was built with TagPlus, StringPlus, and CommentPlus objects,
# rather than Tag, String, and Comment objects.
assert all(
isinstance(x, (TagPlus, StringPlus, CommentPlus))
for x in soup.recursiveChildGenerator()
)
def test_alternate_string_containers(self):
# Test the ability to customize the string containers for
# different types of tags.
class PString(NavigableString):
pass
class BString(NavigableString):
pass
soup = self.soup(
"<div>Hello.<p>Here is <b>some <i>bolded</i></b> text",
string_containers = {
'b': BString,
'p': PString,
}
)
# The string before the <p> tag is a regular NavigableString.
assert isinstance(soup.div.contents[0], NavigableString)
# The string inside the <p> tag, but not inside the <i> tag,
# is a PString.
assert isinstance(soup.p.contents[0], PString)
# Every string inside the <b> tag is a BString, even the one that
# was also inside an <i> tag.
for s in soup.b.strings:
assert isinstance(s, BString)
# Now that parsing was complete, the string_container_stack
# (where this information was kept) has been cleared out.
self.assertEqual([], soup.string_container_stack)
class TestWarnings(SoupTest):
def _no_parser_specified(self, s, is_there=True):
v = s.startswith(BeautifulSoup.NO_PARSER_SPECIFIED_WARNING[:80])
self.assertTrue(v)
def test_warning_if_no_parser_specified(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>")
msg = str(w[0].message)
self._assert_no_parser_specified(msg)
def test_warning_if_parser_specified_too_vague(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", "html")
msg = str(w[0].message)
self._assert_no_parser_specified(msg)
def test_no_warning_if_explicit_parser_specified(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", "html.parser")
self.assertEqual([], w)
def test_parseOnlyThese_renamed_to_parse_only(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
msg = str(w[0].message)
self.assertTrue("parseOnlyThese" in msg)
self.assertTrue("parse_only" in msg)
self.assertEqual(b"<b></b>", soup.encode())
def test_fromEncoding_renamed_to_from_encoding(self):
with warnings.catch_warnings(record=True) as w:
utf8 = b"\xc3\xa9"
soup = self.soup(utf8, fromEncoding="utf8")
msg = str(w[0].message)
self.assertTrue("fromEncoding" in msg)
self.assertTrue("from_encoding" in msg)
self.assertEqual("utf8", soup.original_encoding)
def test_unrecognized_keyword_argument(self):
self.assertRaises(
TypeError, self.soup, "<a>", no_such_argument=True)
class TestWarnings(SoupTest):
def test_disk_file_warning(self):
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
try:
with warnings.catch_warnings(record=True) as w:
soup = self.soup(filename)
msg = str(w[0].message)
self.assertTrue("looks like a filename" in msg)
finally:
filehandle.close()
# The file no longer exists, so Beautiful Soup will no longer issue the warning.
with warnings.catch_warnings(record=True) as w:
soup = self.soup(filename)
self.assertEqual(0, len(w))
def test_url_warning_with_bytes_url(self):
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup(b"http://www.crummybytes.com/")
# Be aware this isn't the only warning that can be raised during
# execution..
self.assertTrue(any("looks like a URL" in str(w.message)
for w in warning_list))
def test_url_warning_with_unicode_url(self):
with warnings.catch_warnings(record=True) as warning_list:
# note - this url must differ from the bytes one otherwise
# python's warnings system swallows the second warning
soup = self.soup("http://www.crummyunicode.com/")
self.assertTrue(any("looks like a URL" in str(w.message)
for w in warning_list))
def test_url_warning_with_bytes_and_space(self):
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup(b"http://www.crummybytes.com/ is great")
self.assertFalse(any("looks like a URL" in str(w.message)
for w in warning_list))
def test_url_warning_with_unicode_and_space(self):
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup("http://www.crummyuncode.com/ is great")
self.assertFalse(any("looks like a URL" in str(w.message)
for w in warning_list))
class TestSelectiveParsing(SoupTest):
def test_parse_with_soupstrainer(self):
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
strainer = SoupStrainer("b")
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>")
class TestEntitySubstitution(unittest.TestCase):
"""Standalone tests of the EntitySubstitution class."""
def setUp(self):
self.sub = EntitySubstitution
def test_simple_html_substitution(self):
# Unicode characters corresponding to named HTML entites
# are substituted, and no others.
s = "foo\u2200\N{SNOWMAN}\u00f5bar"
self.assertEqual(self.sub.substitute_html(s),
"foo∀\N{SNOWMAN}õbar")
def test_smart_quote_substitution(self):
# MS smart quotes are a common source of frustration, so we
# give them a special test.
quotes = b"\x91\x92foo\x93\x94"
dammit = UnicodeDammit(quotes)
self.assertEqual(self.sub.substitute_html(dammit.markup),
"‘’foo“”")
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, False), s)
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
self.assertEqual(self.sub.substitute_xml("Welcome", True),
'"Welcome"')
self.assertEqual(self.sub.substitute_xml("Bob's Bar", True),
'"Bob\'s Bar"')
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, True),
"'Welcome to \"my bar\"'")
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
s = 'Welcome to "Bob\'s Bar"'
self.assertEqual(
self.sub.substitute_xml(s, True),
'"Welcome to "Bob\'s Bar""')
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
quoted = 'Welcome to "Bob\'s Bar"'
self.assertEqual(self.sub.substitute_xml(quoted), quoted)
def test_xml_quoting_handles_angle_brackets(self):
self.assertEqual(
self.sub.substitute_xml("foo<bar>"),
"foo<bar>")
def test_xml_quoting_handles_ampersands(self):
self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&T")
def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml("ÁT&T"),
"&Aacute;T&T")
def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml_containing_entities("ÁT&T"),
"ÁT&T")
def test_quotes_not_html_substituted(self):
"""There's no need to do this except inside attribute values."""
text = 'Bob\'s "bar"'
self.assertEqual(self.sub.substitute_html(text), text)
class TestEncodingConversion(SoupTest):
# Test Beautiful Soup's ability to decode and encode from various
# encodings.
def setUp(self):
super(TestEncodingConversion, self).setUp()
self.unicode_data = '<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
self.utf8_data = self.unicode_data.encode("utf-8")
# Just so you know what it looks like.
self.assertEqual(
self.utf8_data,
b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>')
def test_ascii_in_unicode_out(self):
# ASCII input is converted to Unicode. The original_encoding
# attribute is set to 'utf-8', a superset of ASCII.
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
# Disable chardet, which will realize that the ASCII is ASCII.
bs4.dammit.chardet_dammit = noop
ascii = b"<foo>a</foo>"
soup_from_ascii = self.soup(ascii)
unicode_output = soup_from_ascii.decode()
self.assertTrue(isinstance(unicode_output, str))
self.assertEqual(unicode_output, self.document_for(ascii.decode()))
self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8")
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_unicode_in_unicode_out(self):
# Unicode input is left alone. The original_encoding attribute
# is not set.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.decode(), self.unicode_data)
self.assertEqual(soup_from_unicode.foo.string, 'Sacr\xe9 bleu!')
self.assertEqual(soup_from_unicode.original_encoding, None)
def test_utf8_in_unicode_out(self):
# UTF-8 input is converted to Unicode. The original_encoding
# attribute is set.
soup_from_utf8 = self.soup(self.utf8_data)
self.assertEqual(soup_from_utf8.decode(), self.unicode_data)
self.assertEqual(soup_from_utf8.foo.string, 'Sacr\xe9 bleu!')
def test_utf8_out(self):
# The internal data structures can be encoded as UTF-8.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data)
@skipIf(
PYTHON_3_PRE_3_2,
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
def test_attribute_name_containing_unicode_characters(self):
markup = '<div><a \N{SNOWMAN}="snowman"></a></div>'
self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8"))
class TestUnicodeDammit(unittest.TestCase):
"""Standalone tests of UnicodeDammit."""
def test_unicode_input(self):
markup = "I'm already Unicode! \N{SNOWMAN}"
dammit = UnicodeDammit(markup)
self.assertEqual(dammit.unicode_markup, markup)
def test_smart_quotes_to_unicode(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup)
self.assertEqual(
dammit.unicode_markup, "<foo>\u2018\u2019\u201c\u201d</foo>")
def test_smart_quotes_to_xml_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_html_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="html")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_ascii(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
self.assertEqual(
dammit.unicode_markup, """<foo>''""</foo>""")
def test_detect_utf8(self):
utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83"
dammit = UnicodeDammit(utf8)
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
self.assertEqual(dammit.unicode_markup, 'Sacr\xe9 bleu! \N{SNOWMAN}')
def test_convert_hebrew(self):
hebrew = b"\xed\xe5\xec\xf9"
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8')
self.assertEqual(dammit.unicode_markup, '\u05dd\u05d5\u05dc\u05e9')
def test_dont_see_smart_quotes_where_there_are_none(self):
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
dammit = UnicodeDammit(utf_8)
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8)
def test_ignore_inappropriate_codecs(self):
utf8_data = "Räksmörgås".encode("utf-8")
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_ignore_invalid_codecs(self):
utf8_data = "Räksmörgås".encode("utf-8")
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
dammit = UnicodeDammit(utf8_data, [bad_encoding])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_exclude_encodings(self):
# This is UTF-8.
utf8_data = "Räksmörgås".encode("utf-8")
# But if we exclude UTF-8 from consideration, the guess is
# Windows-1252.
dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"])
self.assertEqual(dammit.original_encoding.lower(), 'windows-1252')
# And if we exclude that, there is no valid guess at all.
dammit = UnicodeDammit(
utf8_data, exclude_encodings=["utf-8", "windows-1252"])
self.assertEqual(dammit.original_encoding, None)
def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self):
detected = EncodingDetector(
b'<?xml version="1.0" encoding="UTF-\xdb" ?>')
encodings = list(detected.encodings)
assert 'utf-\N{REPLACEMENT CHARACTER}' in encodings
def test_detect_html5_style_meta_tag(self):
for data in (
b'<html><meta charset="euc-jp" /></html>',
b"<html><meta charset='euc-jp' /></html>",
b"<html><meta charset=euc-jp /></html>",
b"<html><meta charset=euc-jp/></html>"):
dammit = UnicodeDammit(data, is_html=True)
self.assertEqual(
"euc-jp", dammit.original_encoding)
def test_last_ditch_entity_replacement(self):
# This is a UTF-8 document that contains bytestrings
# completely incompatible with UTF-8 (ie. encoded with some other
# encoding).
#
# Since there is no consistent encoding for the document,
# Unicode, Dammit will eventually encode the document as UTF-8
# and encode the incompatible characters as REPLACEMENT
# CHARACTER.
#
# If chardet is installed, it will detect that the document
# can be converted into ISO-8859-1 without errors. This happens
# to be the wrong encoding, but it is a consistent encoding, so the
# code we're testing here won't run.
#
# So we temporarily disable chardet if it's present.
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
<html><b>\330\250\330\252\330\261</b>
<i>\310\322\321\220\312\321\355\344</i></html>"""
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
bs4.dammit.chardet_dammit = noop
dammit = UnicodeDammit(doc)
self.assertEqual(True, dammit.contains_replacement_characters)
self.assertTrue("\ufffd" in dammit.unicode_markup)
soup = BeautifulSoup(doc, "html.parser")
self.assertTrue(soup.contains_replacement_characters)
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_byte_order_mark_removed(self):
# A document written in UTF-16LE will have its byte order marker stripped.
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
dammit = UnicodeDammit(data)
self.assertEqual("<a>áé</a>", dammit.unicode_markup)
self.assertEqual("utf-16le", dammit.original_encoding)
def test_detwingle(self):
# Here's a UTF8 document.
utf8 = ("\N{SNOWMAN}" * 3).encode("utf8")
# Here's a Windows-1252 document.
windows_1252 = (
"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
# Through some unholy alchemy, they've been stuck together.
doc = utf8 + windows_1252 + utf8
# The document can't be turned into UTF-8:
self.assertRaises(UnicodeDecodeError, doc.decode, "utf8")
# Unicode, Dammit thinks the whole document is Windows-1252,
# and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
# But if we run it through fix_embedded_windows_1252, it's fixed:
fixed = UnicodeDammit.detwingle(doc)
self.assertEqual(
"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8"))
def test_detwingle_ignores_multibyte_characters(self):
# Each of these characters has a UTF-8 representation ending
# in \x93. \x93 is a smart quote if interpreted as
# Windows-1252. But our code knows to skip over multibyte
# UTF-8 characters, so they'll survive the process unscathed.
for tricky_unicode_char in (
"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
):
input = tricky_unicode_char.encode("utf8")
self.assertTrue(input.endswith(b'\x93'))
output = UnicodeDammit.detwingle(input)
self.assertEqual(output, input)
def test_find_declared_encoding(self):
# Test our ability to find a declared encoding inside an
# XML or HTML document.
#
# Even if the document comes in as Unicode, it may be
# interesting to know what encoding was claimed
# originally.
html_unicode = '<html><head><meta charset="utf-8"></head></html>'
html_bytes = html_unicode.encode("ascii")
xml_unicode= '<?xml version="1.0" encoding="ISO-8859-1" ?>'
xml_bytes = xml_unicode.encode("ascii")
m = EncodingDetector.find_declared_encoding
self.assertEqual(None, m(html_unicode, is_html=False))
self.assertEqual("utf-8", m(html_unicode, is_html=True))
self.assertEqual("utf-8", m(html_bytes, is_html=True))
self.assertEqual("iso-8859-1", m(xml_unicode))
self.assertEqual("iso-8859-1", m(xml_bytes))
# Normally, only the first few kilobytes of a document are checked for
# an encoding.
spacer = b' ' * 5000
self.assertEqual(None, m(spacer + html_bytes))
self.assertEqual(None, m(spacer + xml_bytes))
# But you can tell find_declared_encoding to search an entire
# HTML document.
self.assertEqual(
"utf-8",
m(spacer + html_bytes, is_html=True, search_entire_document=True)
)
# The XML encoding declaration has to be the very first thing
# in the document. We'll allow whitespace before the document
# starts, but nothing else.
self.assertEqual(
"iso-8859-1",
m(xml_bytes, search_entire_document=True)
)
self.assertEqual(
None, m(b'a' + xml_bytes, search_entire_document=True)
)
class TestNamedspacedAttribute(SoupTest):
def test_name_may_be_none_or_missing(self):
a = NamespacedAttribute("xmlns", None)
self.assertEqual(a, "xmlns")
a = NamespacedAttribute("xmlns")
self.assertEqual(a, "xmlns")
def test_attribute_is_equivalent_to_colon_separated_string(self):
a = NamespacedAttribute("a", "b")
self.assertEqual("a:b", a)
def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
a = NamespacedAttribute("a", "b", "c")
b = NamespacedAttribute("a", "b", "c")
self.assertEqual(a, b)
# The actual namespace is not considered.
c = NamespacedAttribute("a", "b", None)
self.assertEqual(a, c)
# But name and prefix are important.
d = NamespacedAttribute("a", "z", "c")
self.assertNotEqual(a, d)
e = NamespacedAttribute("z", "b", "c")
self.assertNotEqual(a, e)
class TestAttributeValueWithCharsetSubstitution(unittest.TestCase):
def test_content_meta_attribute_value(self):
value = CharsetMetaAttributeValue("euc-jp")
self.assertEqual("euc-jp", value)
self.assertEqual("euc-jp", value.original_value)
self.assertEqual("utf8", value.encode("utf8"))
def test_content_meta_attribute_value(self):
value = ContentMetaAttributeValue("text/html; charset=euc-jp")
self.assertEqual("text/html; charset=euc-jp", value)
self.assertEqual("text/html; charset=euc-jp", value.original_value)
self.assertEqual("text/html; charset=utf8", value.encode("utf8"))
| mdworks2016/work_development | Python/05_FirstPython/Chapter9_WebApp/fppython_develop/lib/python3.7/site-packages/bs4/tests/test_soup.py | Python | apache-2.0 | 28,802 |
import logging
logging.basicConfig(level=logging.INFO,
format="[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
def is_positive_number(value):
try:
value = int(value)
return value > 0
except ValueError as e:
logger.exception(e)
| willandskill/bacman | bacman/utils.py | Python | bsd-3-clause | 383 |
#!/usr/bin/env python
# coding: utf-8
import json
import os
import unittest
class TestGeohex(unittest.TestCase):
def setUp(self):
with open(os.path.join(os.path.dirname(__file__), 'hex_v3.2_test_code2HEX.json'), 'r') as f:
self.tests = json.load(f)
def test_consts(self):
from geohex import geohex
self.assertEqual(geohex.h_deg, 0.5235987755982988)
self.assertAlmostEqual(geohex.h_k, 0.5773502691896257, 15)
def test_calc_hex_size(self):
from geohex import geohex
cases = [
247376.6461728395,
82458.88205761317,
27486.29401920439,
9162.098006401464,
3054.0326688004875,
1018.0108896001626,
339.3369632000542,
113.11232106668473,
37.70410702222824,
12.56803567407608,
4.189345224692027,
1.3964484082306756,
0.4654828027435586,
0.15516093424785285,
0.05172031141595095,
0.017240103805316983,
0.005746701268438995
]
for idx, case in enumerate(cases):
self.assertAlmostEqual(geohex.calc_hex_size(idx + 1), case, 15)
def test_loc2xy(self):
from geohex import geohex
xy = geohex.loc2xy(139.745433, 35.65858)
self.assertEqual(xy['x'], 15556390.440080063)
self.assertEqual(xy['y'], 4253743.631945749)
def test_xy2loc(self):
from geohex import geohex
loc = geohex.xy2loc(15556390.440080063, 4253743.631945749)
self.assertEqual(loc['lon'], 139.745433)
self.assertEqual(loc['lat'], 35.65858)
def test_adjust_xy(self):
from geohex import geohex
self.assertEqual(geohex.adjust_xy(15556390.440080063, 4253743.631945749, 1), {'x': 15556363.440080062, 'y': 4253770.63194575, 'rev': 0})
self.assertEqual(geohex.adjust_xy(15556390.440080063, 4253743.631945749, 17), {'x': 15556390.440080063, 'y': 4253743.631945749, 'rev': 0})
def test_get_xy_by_location(self):
from geohex import geohex
xy = geohex.get_xy_by_location(35.65858, 139.745433, 11)
self.assertEqual(xy['x'], 912000)
self.assertEqual(xy['y'], -325774)
def test_get_zone_by_xy(self):
from geohex import geohex
zone = geohex.get_zone_by_xy(912000, -325774, 11)
self.assertEqual(zone.code, 'XM48854457273')
self.assertAlmostEqual(zone.lat, 35.658618718910624, 11)
self.assertAlmostEqual(zone.lon, 139.7454091799466, 11)
self.assertEqual(zone.x, 912000)
self.assertEqual(zone.y, -325774)
def test_get_xy_by_code(self):
from geohex import geohex
xy = geohex.get_xy_by_code('XM48854457273')
self.assertEqual(xy, {'x': 912000, 'y': -325774})
def test_get_zone_by_location(self):
from geohex import geohex
zone = geohex.get_zone_by_location(35.65858, 139.745433, 11)
self.assertIsInstance(zone, geohex.Zone)
self.assertEqual(zone.get_hex_size(), 4.189345224692027)
self.assertEqual(zone.get_level(), 11)
for geohex_val, latitude, longitude in self.tests:
zone = geohex.get_zone_by_location(latitude, longitude, len(geohex_val))
self.assertTrue(zone.code.startswith(geohex_val), msg="geohex code=%s, zone=%s" % (geohex_val, zone))
# ValueError
self.assertRaises(ValueError, geohex.get_zone_by_location, 35.65858, 139.745433, -1)
self.assertRaises(TypeError, geohex.get_zone_by_location, 35.65858, 139.745433, '1')
def test_get_zone_by_code(self):
from geohex import geohex
zone = geohex.get_zone_by_code('XM48854457273')
self.assertEqual(zone.get_level(), 11)
self.assertEqual(zone.get_hex_size(), 4.189345224692027)
self.assertEqual(zone.x, 912000)
self.assertEqual(zone.y, -325774)
self.assertAlmostEqual(zone.lat, 35.658618718910624)
self.assertAlmostEqual(zone.lon, 139.7454091799466)
for geohex_val, latitude, longitude in self.tests:
zone = geohex.get_zone_by_code(geohex_val)
self.assertAlmostEqual(latitude, zone.lat, msg="geohex code=%s" % geohex_val)
self.assertAlmostEqual(longitude, zone.lon, msg="geohex code=%s" % geohex_val)
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(TestGeohex))
return suite
| uncovertruth/py-geohex3 | test/geohex_test.py | Python | mit | 4,461 |
import unittest
from constants import *
from wow import *
from util import *
class BaseTest(unittest.TestCase):
def test_for_normal_query_split(self):
# Tests to ensure that the query gets split properly when the bot gets a message.
# Example query: '!armory pve/pvp <name> <realm> <region>'
sample_query = '!armory pve jimo burning-legion us'
self.assertEqual(split_query(sample_query, 'pve'), ['jimo', 'burning-legion', 'pve', 'us'])
def test_for_url_query_split(self):
# Tests to ensure that the query string gets split properly when the bot gets a url based message.
# Example query: '!armory pve/pvp <armory-link> <region>' (Accepts either a world of warcraft or battle net link)
sample_wow_url = '!armory pve https://worldofwarcraft.com/en-us/character/burning-legion/jimo us'
sample_battlenet_url = '!armory pve http://us.battle.net/wow/en/character/burning-legion/jimo/advanced us'
self.assertEqual(split_query(sample_wow_url, 'pve'), ['jimo', 'burning-legion', 'pve', 'us'])
self.assertEqual(split_query(sample_battlenet_url, 'pvp'), ['jimo', 'burning-legion', 'pvp', 'us'])
def test_for_warrior_class(self):
# Makes sure that when the id for the Warrior class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_WARRIOR),
{'colour': 0xC79C6E, 'name': 'Warrior'})
def test_for_paladin_class(self):
# Makes sure that when the id for the Paladin class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_PALADIN),
{'colour': 0xF58CBA, 'name': 'Paladin'})
def test_for_hunter_class(self):
# Makes sure that when the id for the Hunter class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_HUNTER),
{'colour': 0xABD473, 'name': 'Hunter'})
def test_for_rogue_class(self):
# Makes sure that when the id for the Rogue class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_ROGUE),
{'colour': 0xFFF569, 'name': 'Rogue'})
def test_for_priest_class(self):
# Makes sure that when the id for the Priest class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_PRIEST),
{'colour': 0xFFFFFF, 'name': 'Priest'})
def test_for_death_knight_class(self):
# Makes sure that when the id for the Death Knight class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DEATH_KNIGHT),
{'colour': 0xC41F3B, 'name': 'Death Knight'})
def test_for_shaman_class(self):
# Makes sure that when the id for the Shaman class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_SHAMAN),
{'colour': 0x0070DE, 'name': 'Shaman'})
def test_for_mage_class(self):
# Makes sure that when the id for the Mage class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_MAGE),
{'colour': 0x69CCF0, 'name': 'Mage'})
def test_for_warlock_class(self):
# Makes sure that when the id for the Warlock class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_WARLOCK),
{'colour': 0x9482C9, 'name': 'Warlock'})
def test_for_monk_class(self):
# Makes sure that when the id for the Monk class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_MONK),
{'colour': 0x00FF96, 'name': 'Monk'})
def test_for_druid_class(self):
# Makes sure that when the id for the Druid class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DRUID),
{'colour': 0xFF7D0A, 'name': 'Druid'})
def test_for_demon_hunter_class(self):
# Makes sure that when the id for the Demon Hunter class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DEMON_HUNTER),
{'colour': 0xA330C9, 'name': 'Demon Hunter'})
def test_for_faction_name(self):
# Makes sure that when the id for either the Horde or Alliance faction is
# passsed we get the correct name in return.
self.assertEqual(faction_details(FACTION_ALLIANCE), 'Alliance')
self.assertEqual(faction_details(FACTION_HORDE), 'Horde')
def test_for_achievement_progress(self):
# Passes in some mock API data and expects it to return as completed.
# Tests for accuracy on each id check, not API data.
self.maxDiff = None
input_data_horde_sample = {
"achievements": {
"achievementsCompleted": [11611, 11162, 11185, 11184, 2090, 2093,
2092, 2091, 11194, 11581, 11195, 11874, 5356, 5353, 5349, 11191, 11192, 11874]
}
}
input_data_alliance_sample = {
"achievements": {
"achievementsCompleted": [11611, 11162, 11185, 11184, 2090, 2093,
2092, 2091, 11194, 11581, 11195, 11874, 5343, 5339, 5334, 11192, 11874, 11875]
}
}
expected_horde_data = {
'challenging_look': 'Completed',
'keystone_master': 'Completed',
'keystone_conqueror': 'Completed',
'keystone_challenger': 'Completed',
'arena_challenger': 'Completed',
'arena_rival': 'Completed',
'arena_duelist': 'Completed',
'arena_gladiator': 'Completed',
'rbg_2400_name': AC_HIGH_WARLORD_NAME,
'rbg_2000_name': AC_CHAMPION_NAME,
'rbg_1500_name': AC_FIRST_SERGEANT_NAME,
'rbg_2400': 'Completed',
'rbg_2000': 'Completed',
'rbg_1500': 'Completed',
'en_feat': 'Cutting Edge',
'tov_feat': 'Ahead of the Curve',
'nh_feat': 'Cutting Edge',
'tos_feat': 'Ahead of the Curve'
}
expected_alliance_data = {
'challenging_look': 'Completed',
'keystone_master': 'Completed',
'keystone_conqueror': 'Completed',
'keystone_challenger': 'Completed',
'arena_challenger': 'Completed',
'arena_rival': 'Completed',
'arena_duelist': 'Completed',
'arena_gladiator': 'Completed',
'rbg_2400_name': AC_GRAND_MARSHALL_NAME,
'rbg_2000_name': AC_LIEAUTENANT_COMMANDER_NAME,
'rbg_1500_name': AC_SERGEANT_MAJOR_NAME,
'rbg_2400': 'Completed',
'rbg_2000': 'Completed',
'rbg_1500': 'Completed',
'en_feat': 'Ahead of the Curve',
'tov_feat': 'Ahead of the Curve',
'nh_feat': 'Cutting Edge',
'tos_feat': 'Cutting Edge'
}
self.assertEqual(character_achievements(input_data_horde_sample, 'Horde'), expected_horde_data)
self.assertEqual(character_achievements(input_data_alliance_sample, 'Alliance'), expected_alliance_data)
def test_pvp_progression(self):
# Passes in some mock API data and expects it to return an object with the correct data.
# Tests for accuracy on each data check, not API data.
self.maxDiff = None
sample_data = {
"pvp": {
"brackets": {
"ARENA_BRACKET_2v2": {
"rating": 5928,
},
"ARENA_BRACKET_3v3": {
"rating": 1858,
},
"ARENA_BRACKET_RBG": {
"rating": 5999,
},
"ARENA_BRACKET_2v2_SKIRMISH": {
"rating": 2985,
}
}
},
"totalHonorableKills": 888399
}
expected_data = {
'2v2': 5928,
'2v2s': 2985,
'3v3': 1858,
'rbg': 5999,
'kills': 888399
}
self.assertEqual(character_arena_progress(sample_data), expected_data)
def test_pve_progression(self):
# Passes in some mock API data and expects it to return an object with the correct data.
# Tests for accuracy on each data check, not API data.
self.maxDiff = None
sample_data = {
"progression": {
"raids": [
{
"id": 8026,
"bosses": [{
"lfrKills": 19,
"normalKills": 8,
"heroicKills": 5,
"mythicKills": 3,
},
{
"lfrKills": 3,
"normalKills": 7,
"heroicKills": 3,
"mythicKills": 2,
}]
},
{
"id": 8440,
"bosses": [{
"lfrKills": 7,
"normalKills": 1,
"heroicKills": 1,
"mythicKills": 0,
}]
},
{
"id": 8524,
"bosses": [{
"lfrKills": 3,
"normalKills": 2,
"heroicKills": 4,
"mythicKills": 1,
}]
},
{
"id": 8025,
"bosses": [{
"lfrKills": 3,
"normalKills": 2,
"heroicKills": 1,
"mythicKills": 0,
},
{
"lfrKills": 5,
"normalKills": 2,
"heroicKills": 2,
"mythicKills": 0,
}]
}]
}
}
expected_data = {
'emerald_nightmare':{
'lfr':2,
'normal':2,
'heroic':2,
'mythic':2,
'bosses':2
},
'trial_of_valor':{
'lfr':1,
'normal':1,
'heroic':1,
'mythic':0,
'bosses':1
},
'the_nighthold':{
'lfr':2,
'normal':2,
'heroic':2,
'mythic':0,
'bosses':2
},
'tomb_of_sargeras': {
'lfr':1,
'normal':1,
'heroic':1,
'mythic':1,
'bosses':1
}
}
self.assertEqual(character_progression(sample_data), expected_data)
def test_player_talents(self):
# Passes in some mock API data and expects it to return an object with the correct data.
# Tests for accuracy on each data check, not API data.
sample_data = {
'talents': [
{
'selected':True,
'spec':{
'name':'Holy',
'role':'HEALING'
}
},
{
'spec':{
'name':'Shadow',
'role': 'DAMAGE'
}
},
{
'spec':{
'name':'Discipline',
'role':'HEALING'
}
}
]}
expected_data = {
'active_spec': 'Holy'
}
self.assertEqual(character_talents(sample_data), expected_data)
if __name__ == '__main__':
unittest.main()
| remond-andre/discord-wow-armory-bot-modified | tests.py | Python | mit | 12,125 |
"""Python package version query tools."""
import logging
import json
import pathlib
from .version import Version
_LOG = logging.getLogger(__name__)
def query_metadata_json(path: pathlib.Path) -> Version:
"""Get version from metadata.json file."""
with path.open('r') as metadata_file:
metadata = json.load(metadata_file)
version_str = metadata['version']
return Version.from_str(version_str)
def query_pkg_info(path: pathlib.Path) -> Version:
"""Get version from PKG-INFO file."""
with path.open('r') as pkginfo_file:
for line in pkginfo_file:
if line.startswith('Version:'):
version_str = line.replace('Version:', '').strip()
return Version.from_str(version_str)
raise ValueError(path)
def query_package_folder(path: pathlib.Path, search_parent_directories: bool = False) -> Version:
"""Get version from Python package folder."""
paths = [path]
if search_parent_directories:
paths += path.parents
metadata_json_paths, pkg_info_paths = None, None
for pth in paths:
metadata_json_paths = list(pth.parent.glob(f'{pth.name}*.dist-info/metadata.json'))
pkg_info_paths = list(pth.parent.glob(f'{pth.name}*.egg-info/PKG-INFO'))
pkg_info_paths += list(pth.parent.glob(f'{pth.name}*.dist-info/METADATA'))
if len(metadata_json_paths) == 1 and not pkg_info_paths:
return query_metadata_json(metadata_json_paths[0])
if not metadata_json_paths and len(pkg_info_paths) == 1:
return query_pkg_info(pkg_info_paths[0])
raise ValueError(paths, metadata_json_paths, pkg_info_paths)
| mbdevpl/version-query | version_query/py_query.py | Python | apache-2.0 | 1,657 |
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
import haystack
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, EmptyResults, log_query
from haystack.constants import DJANGO_CT, DJANGO_ID, ID
from haystack.exceptions import MissingDependency, MoreLikeThisError, SkipDocument
from haystack.inputs import Clean, Exact, PythonData, Raw
from haystack.models import SearchResult
from haystack.utils import log as logging
from haystack.utils import get_identifier, get_model_ct
from haystack.utils.app_loading import haystack_get_model
try:
from pysolr import Solr, SolrError
except ImportError:
raise MissingDependency("The 'solr' backend requires the installation of 'pysolr'. Please refer to the documentation.")
class SolrSearchBackend(BaseSearchBackend):
# Word reserved by Solr for special use.
RESERVED_WORDS = (
'AND',
'NOT',
'OR',
'TO',
)
# Characters reserved by Solr for special use.
# The '\\' must come first, so as not to overwrite the other slash replacements.
RESERVED_CHARACTERS = (
'\\', '+', '-', '&&', '||', '!', '(', ')', '{', '}',
'[', ']', '^', '"', '~', '*', '?', ':', '/',
)
def __init__(self, connection_alias, **connection_options):
super(SolrSearchBackend, self).__init__(connection_alias, **connection_options)
if 'URL' not in connection_options:
raise ImproperlyConfigured("You must specify a 'URL' in your settings for connection '%s'." % connection_alias)
self.collate = connection_options.get('COLLATE_SPELLING', True)
self.conn = Solr(connection_options['URL'], timeout=self.timeout,
**connection_options.get('KWARGS', {}))
self.log = logging.getLogger('haystack')
def update(self, index, iterable, commit=True):
docs = []
for obj in iterable:
try:
docs.append(index.full_prepare(obj))
except SkipDocument:
self.log.debug(u"Indexing for object `%s` skipped", obj)
except UnicodeDecodeError:
if not self.silently_fail:
raise
# We'll log the object identifier but won't include the actual object
# to avoid the possibility of that generating encoding errors while
# processing the log message:
self.log.error(u"UnicodeDecodeError while preparing object for update", exc_info=True,
extra={"data": {"index": index,
"object": get_identifier(obj)}})
if len(docs) > 0:
try:
self.conn.add(docs, commit=commit, boost=index.get_field_weights())
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to add documents to Solr: %s", e, exc_info=True)
def remove(self, obj_or_string, commit=True):
solr_id = get_identifier(obj_or_string)
try:
kwargs = {
'commit': commit,
'id': solr_id
}
self.conn.delete(**kwargs)
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to remove document '%s' from Solr: %s", solr_id, e, exc_info=True)
def clear(self, models=None, commit=True):
if models is not None:
assert isinstance(models, (list, tuple))
try:
if models is None:
# *:* matches all docs in Solr
self.conn.delete(q='*:*', commit=commit)
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
self.conn.delete(q=" OR ".join(models_to_delete), commit=commit)
if commit:
# Run an optimize post-clear. http://wiki.apache.org/solr/FAQ#head-9aafb5d8dff5308e8ea4fcf4b71f19f029c4bb99
self.conn.optimize()
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
if models is not None:
self.log.error("Failed to clear Solr index of models '%s': %s", ','.join(models_to_delete), e,
exc_info=True)
else:
self.log.error("Failed to clear Solr index: %s", e, exc_info=True)
@log_query
def search(self, query_string, **kwargs):
if len(query_string) == 0:
return {
'results': [],
'hits': 0,
}
search_kwargs = self.build_search_kwargs(query_string, **kwargs)
try:
raw_results = self.conn.search(query_string, **search_kwargs)
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to query Solr using '%s': %s", query_string, e, exc_info=True)
raw_results = EmptyResults()
return self._process_results(raw_results,
highlight=kwargs.get('highlight'),
result_class=kwargs.get('result_class', SearchResult),
distance_point=kwargs.get('distance_point'))
def build_search_kwargs(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None,
date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
within=None, dwithin=None, distance_point=None,
models=None, limit_to_registered_models=None,
result_class=None, stats=None, collate=None,
**extra_kwargs):
index = haystack.connections[self.connection_alias].get_unified_index()
kwargs = {
'fl': '* score',
'df': index.document_field,
}
if fields:
if isinstance(fields, (list, set)):
fields = " ".join(fields)
kwargs['fl'] = fields
if sort_by is not None:
if sort_by in ['distance asc', 'distance desc'] and distance_point:
# Do the geo-enabled sort.
lng, lat = distance_point['point'].coords
kwargs['sfield'] = distance_point['field']
kwargs['pt'] = '%s,%s' % (lat, lng)
if sort_by == 'distance asc':
kwargs['sort'] = 'geodist() asc'
else:
kwargs['sort'] = 'geodist() desc'
else:
if sort_by.startswith('distance '):
warnings.warn("In order to sort by distance, you must call the '.distance(...)' method.")
# Regular sorting.
kwargs['sort'] = sort_by
if start_offset is not None:
kwargs['start'] = start_offset
if end_offset is not None:
kwargs['rows'] = end_offset - start_offset
if highlight:
# `highlight` can either be True or a dictionary containing custom parameters
# which will be passed to the backend and may override our default settings:
kwargs['hl'] = 'true'
kwargs['hl.fragsize'] = '200'
if isinstance(highlight, dict):
# autoprefix highlighter options with 'hl.', all of them start with it anyway
# this makes option dicts shorter: {'maxAnalyzedChars': 42}
# and lets some of options be used as keyword arguments: `.highlight(preserveMulti=False)`
kwargs.update({
key if key.startswith("hl.") else ('hl.' + key): highlight[key]
for key in highlight.keys()
})
if collate is None:
collate = self.collate
if self.include_spelling is True:
kwargs['spellcheck'] = 'true'
kwargs['spellcheck.collate'] = str(collate).lower()
kwargs['spellcheck.count'] = 1
if spelling_query:
kwargs['spellcheck.q'] = spelling_query
if facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.field'] = facets.keys()
for facet_field, options in facets.items():
for key, value in options.items():
kwargs['f.%s.facet.%s' % (facet_field, key)] = self.conn._from_python(value)
if date_facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.date'] = date_facets.keys()
kwargs['facet.date.other'] = 'none'
for key, value in date_facets.items():
kwargs["f.%s.facet.date.start" % key] = self.conn._from_python(value.get('start_date'))
kwargs["f.%s.facet.date.end" % key] = self.conn._from_python(value.get('end_date'))
gap_by_string = value.get('gap_by').upper()
gap_string = "%d%s" % (value.get('gap_amount'), gap_by_string)
if value.get('gap_amount') != 1:
gap_string += "S"
kwargs["f.%s.facet.date.gap" % key] = '+%s/%s' % (gap_string, gap_by_string)
if query_facets is not None:
kwargs['facet'] = 'on'
kwargs['facet.query'] = ["%s:%s" % (field, value) for field, value in query_facets]
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(model_choices)))
if narrow_queries is not None:
kwargs['fq'] = list(narrow_queries)
if stats:
kwargs['stats'] = "true"
for k in stats.keys():
kwargs['stats.field'] = k
for facet in stats[k]:
kwargs['f.%s.stats.facet' % k] = facet
if within is not None:
from haystack.utils.geo import generate_bounding_box
kwargs.setdefault('fq', [])
((min_lat, min_lng), (max_lat, max_lng)) = generate_bounding_box(within['point_1'], within['point_2'])
# Bounding boxes are min, min TO max, max. Solr's wiki was *NOT*
# very clear on this.
bbox = '%s:[%s,%s TO %s,%s]' % (within['field'], min_lat, min_lng, max_lat, max_lng)
kwargs['fq'].append(bbox)
if dwithin is not None:
kwargs.setdefault('fq', [])
lng, lat = dwithin['point'].coords
geofilt = '{!geofilt pt=%s,%s sfield=%s d=%s}' % (lat, lng, dwithin['field'], dwithin['distance'].km)
kwargs['fq'].append(geofilt)
# Check to see if the backend should try to include distances
# (Solr 4.X+) in the results.
if self.distance_available and distance_point:
# In early testing, you can't just hand Solr 4.X a proper bounding box
# & request distances. To enable native distance would take calculating
# a center point & a radius off the user-provided box, which kinda
# sucks. We'll avoid it for now, since Solr 4.x's release will be some
# time yet.
# kwargs['fl'] += ' _dist_:geodist()'
pass
if extra_kwargs:
kwargs.update(extra_kwargs)
return kwargs
def more_like_this(self, model_instance, additional_query_string=None,
start_offset=0, end_offset=None, models=None,
limit_to_registered_models=None, result_class=None, **kwargs):
from haystack import connections
# Deferred models will have a different class ("RealClass_Deferred_fieldname")
# which won't be in our registry:
model_klass = model_instance._meta.concrete_model
index = connections[self.connection_alias].get_unified_index().get_index(model_klass)
field_name = index.get_content_field()
params = {
'fl': '*,score',
}
if start_offset is not None:
params['start'] = start_offset
if end_offset is not None:
params['rows'] = end_offset
narrow_queries = set()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add('%s:(%s)' % (DJANGO_CT, ' OR '.join(model_choices)))
if additional_query_string:
narrow_queries.add(additional_query_string)
if narrow_queries:
params['fq'] = list(narrow_queries)
query = "%s:%s" % (ID, get_identifier(model_instance))
try:
raw_results = self.conn.more_like_this(query, field_name, **params)
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
self.log.error("Failed to fetch More Like This from Solr for document '%s': %s",
query, e, exc_info=True)
raw_results = EmptyResults()
return self._process_results(raw_results, result_class=result_class)
def _process_results(self, raw_results, highlight=False, result_class=None, distance_point=None):
from haystack import connections
results = []
hits = raw_results.hits
facets = {}
stats = {}
spelling_suggestion = spelling_suggestions = None
if result_class is None:
result_class = SearchResult
if hasattr(raw_results, 'stats'):
stats = raw_results.stats.get('stats_fields', {})
if hasattr(raw_results, 'facets'):
facets = {
'fields': raw_results.facets.get('facet_fields', {}),
'dates': raw_results.facets.get('facet_dates', {}),
'queries': raw_results.facets.get('facet_queries', {}),
}
for key in ['fields']:
for facet_field in facets[key]:
# Convert to a two-tuple, as Solr's json format returns a list of
# pairs.
facets[key][facet_field] = list(zip(facets[key][facet_field][::2],
facets[key][facet_field][1::2]))
if self.include_spelling and hasattr(raw_results, 'spellcheck'):
try:
spelling_suggestions = self.extract_spelling_suggestions(raw_results)
except Exception as exc:
self.log.error('Error extracting spelling suggestions: %s', exc, exc_info=True,
extra={'data': {'spellcheck': raw_results.spellcheck}})
if not self.silently_fail:
raise
spelling_suggestions = None
if spelling_suggestions:
# Maintain compatibility with older versions of Haystack which returned a single suggestion:
spelling_suggestion = spelling_suggestions[-1]
assert isinstance(spelling_suggestion, six.string_types)
else:
spelling_suggestion = None
unified_index = connections[self.connection_alias].get_unified_index()
indexed_models = unified_index.get_indexed_models()
for raw_result in raw_results.docs:
app_label, model_name = raw_result[DJANGO_CT].split('.')
additional_fields = {}
model = haystack_get_model(app_label, model_name)
if model and model in indexed_models:
index = unified_index.get_index(model)
index_field_map = index.field_map
for key, value in raw_result.items():
string_key = str(key)
# re-map key if alternate name used
if string_key in index_field_map:
string_key = index_field_map[key]
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
additional_fields[string_key] = index.fields[string_key].convert(value)
else:
additional_fields[string_key] = self.conn._to_python(value)
del(additional_fields[DJANGO_CT])
del(additional_fields[DJANGO_ID])
del(additional_fields['score'])
if raw_result[ID] in getattr(raw_results, 'highlighting', {}):
additional_fields['highlighted'] = raw_results.highlighting[raw_result[ID]]
if distance_point:
additional_fields['_point_of_origin'] = distance_point
if raw_result.get('__dist__'):
from haystack.utils.geo import Distance
additional_fields['_distance'] = Distance(km=float(raw_result['__dist__']))
else:
additional_fields['_distance'] = None
result = result_class(app_label, model_name, raw_result[DJANGO_ID], raw_result['score'], **additional_fields)
results.append(result)
else:
hits -= 1
return {
'results': results,
'hits': hits,
'stats': stats,
'facets': facets,
'spelling_suggestion': spelling_suggestion,
'spelling_suggestions': spelling_suggestions,
}
def extract_spelling_suggestions(self, raw_results):
# There are many different formats for Legacy, 6.4, and 6.5 e.g.
# https://issues.apache.org/jira/browse/SOLR-3029 and depending on the
# version and configuration the response format may be a dict of dicts,
# a list of dicts, or a list of strings.
collations = raw_results.spellcheck.get('collations', None)
suggestions = raw_results.spellcheck.get('suggestions', None)
# We'll collect multiple suggestions here. For backwards
# compatibility with older versions of Haystack we'll still return
# only a single suggestion but in the future we can expose all of
# them.
spelling_suggestions = []
if collations:
if isinstance(collations, dict):
# Solr 6.5
collation_values = collations['collation']
if isinstance(collation_values, six.string_types):
collation_values = [collation_values]
elif isinstance(collation_values, dict):
# spellcheck.collateExtendedResults changes the format to a dictionary:
collation_values = [collation_values['collationQuery']]
elif isinstance(collations[1], dict):
# Solr 6.4
collation_values = collations
else:
# Older versions of Solr
collation_values = collations[-1:]
for i in collation_values:
# Depending on the options the values are either simple strings or dictionaries:
spelling_suggestions.append(i['collationQuery'] if isinstance(i, dict) else i)
elif suggestions:
if isinstance(suggestions, dict):
for i in suggestions.values():
for j in i['suggestion']:
if isinstance(j, dict):
spelling_suggestions.append(j['word'])
else:
spelling_suggestions.append(j)
elif isinstance(suggestions[0], six.string_types) and isinstance(suggestions[1], dict):
# Solr 6.4 uses a list of paired (word, dictionary) pairs:
for suggestion in suggestions:
if isinstance(suggestion, dict):
for i in suggestion['suggestion']:
if isinstance(i, dict):
spelling_suggestions.append(i['word'])
else:
spelling_suggestions.append(i)
else:
# Legacy Solr
spelling_suggestions.append(suggestions[-1])
return spelling_suggestions
def build_schema(self, fields):
content_field_name = ''
schema_fields = []
for field_name, field_class in fields.items():
field_data = {
'field_name': field_class.index_fieldname,
'type': 'text_en',
'indexed': 'true',
'stored': 'true',
'multi_valued': 'false',
}
if field_class.document is True:
content_field_name = field_class.index_fieldname
# DRL_FIXME: Perhaps move to something where, if none of these
# checks succeed, call a custom method on the form that
# returns, per-backend, the right type of storage?
if field_class.field_type in ['date', 'datetime']:
field_data['type'] = 'date'
elif field_class.field_type == 'integer':
field_data['type'] = 'long'
elif field_class.field_type == 'float':
field_data['type'] = 'float'
elif field_class.field_type == 'boolean':
field_data['type'] = 'boolean'
elif field_class.field_type == 'ngram':
field_data['type'] = 'ngram'
elif field_class.field_type == 'edge_ngram':
field_data['type'] = 'edge_ngram'
elif field_class.field_type == 'location':
field_data['type'] = 'location'
if field_class.is_multivalued:
field_data['multi_valued'] = 'true'
if field_class.stored is False:
field_data['stored'] = 'false'
# Do this last to override `text` fields.
if field_class.indexed is False:
field_data['indexed'] = 'false'
# If it's text and not being indexed, we probably don't want
# to do the normal lowercase/tokenize/stemming/etc. dance.
if field_data['type'] == 'text_en':
field_data['type'] = 'string'
# If it's a ``FacetField``, make sure we don't postprocess it.
if hasattr(field_class, 'facet_for'):
# If it's text, it ought to be a string.
if field_data['type'] == 'text_en':
field_data['type'] = 'string'
schema_fields.append(field_data)
return (content_field_name, schema_fields)
def extract_file_contents(self, file_obj, **kwargs):
"""Extract text and metadata from a structured file (PDF, MS Word, etc.)
Uses the Solr ExtractingRequestHandler, which is based on Apache Tika.
See the Solr wiki for details:
http://wiki.apache.org/solr/ExtractingRequestHandler
Due to the way the ExtractingRequestHandler is implemented it completely
replaces the normal Haystack indexing process with several unfortunate
restrictions: only one file per request, the extracted data is added to
the index with no ability to modify it, etc. To simplify the process and
allow for more advanced use we'll run using the extract-only mode to
return the extracted data without adding it to the index so we can then
use it within Haystack's normal templating process.
Returns None if metadata cannot be extracted; otherwise returns a
dictionary containing at least two keys:
:contents:
Extracted full-text content, if applicable
:metadata:
key:value pairs of text strings
"""
try:
return self.conn.extract(file_obj, **kwargs)
except Exception as e:
self.log.warning(u"Unable to extract file contents: %s", e,
exc_info=True, extra={"data": {"file": file_obj}})
return None
class SolrSearchQuery(BaseSearchQuery):
def matching_all_fragment(self):
return '*:*'
def build_query_fragment(self, field, filter_type, value):
from haystack import connections
query_frag = ''
if not hasattr(value, 'input_type_name'):
# Handle when we've got a ``ValuesListQuerySet``...
if hasattr(value, 'values_list'):
value = list(value)
if isinstance(value, six.string_types):
# It's not an ``InputType``. Assume ``Clean``.
value = Clean(value)
else:
value = PythonData(value)
# Prepare the query using the InputType.
prepared_value = value.prepare(self)
if not isinstance(prepared_value, (set, list, tuple)):
# Then convert whatever we get back to what pysolr wants if needed.
prepared_value = self.backend.conn._from_python(prepared_value)
# 'content' is a special reserved word, much like 'pk' in
# Django's ORM layer. It indicates 'no special field'.
if field == 'content':
index_fieldname = ''
else:
index_fieldname = u'%s:' % connections[self._using].get_unified_index().get_index_fieldname(field)
filter_types = {
'content': u'%s',
'contains': u'*%s*',
'endswith': u'*%s',
'startswith': u'%s*',
'exact': u'%s',
'gt': u'{%s TO *}',
'gte': u'[%s TO *]',
'lt': u'{* TO %s}',
'lte': u'[* TO %s]',
'fuzzy': u'%s~',
}
if value.post_process is False:
query_frag = prepared_value
else:
if filter_type in ['content', 'contains', 'startswith', 'endswith', 'fuzzy']:
if value.input_type_name == 'exact':
query_frag = prepared_value
else:
# Iterate over terms & incorportate the converted form of each into the query.
terms = []
for possible_value in prepared_value.split(' '):
terms.append(filter_types[filter_type] % self.backend.conn._from_python(possible_value))
if len(terms) == 1:
query_frag = terms[0]
else:
query_frag = u"(%s)" % " AND ".join(terms)
elif filter_type == 'in':
in_options = []
if not prepared_value:
query_frag = u'(!*:*)'
else:
for possible_value in prepared_value:
in_options.append(u'"%s"' % self.backend.conn._from_python(possible_value))
query_frag = u"(%s)" % " OR ".join(in_options)
elif filter_type == 'range':
start = self.backend.conn._from_python(prepared_value[0])
end = self.backend.conn._from_python(prepared_value[1])
query_frag = u'["%s" TO "%s"]' % (start, end)
elif filter_type == 'exact':
if value.input_type_name == 'exact':
query_frag = prepared_value
else:
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
else:
if value.input_type_name != 'exact':
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
if len(query_frag) and not isinstance(value, Raw):
if not query_frag.startswith('(') and not query_frag.endswith(')'):
query_frag = "(%s)" % query_frag
return u"%s%s" % (index_fieldname, query_frag)
def build_alt_parser_query(self, parser_name, query_string='', **kwargs):
if query_string:
query_string = Clean(query_string).prepare(self)
kwarg_bits = []
for key in sorted(kwargs.keys()):
if isinstance(kwargs[key], six.string_types) and ' ' in kwargs[key]:
kwarg_bits.append(u"%s='%s'" % (key, kwargs[key]))
else:
kwarg_bits.append(u"%s=%s" % (key, kwargs[key]))
return u'_query_:"{!%s %s}%s"' % (parser_name, Clean(' '.join(kwarg_bits)), query_string)
def build_params(self, spelling_query=None, **kwargs):
search_kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class
}
order_by_list = None
if self.order_by:
if order_by_list is None:
order_by_list = []
for order_by in self.order_by:
if order_by.startswith('-'):
order_by_list.append('%s desc' % order_by[1:])
else:
order_by_list.append('%s asc' % order_by)
search_kwargs['sort_by'] = ", ".join(order_by_list)
if self.date_facets:
search_kwargs['date_facets'] = self.date_facets
if self.distance_point:
search_kwargs['distance_point'] = self.distance_point
if self.dwithin:
search_kwargs['dwithin'] = self.dwithin
if self.end_offset is not None:
search_kwargs['end_offset'] = self.end_offset
if self.facets:
search_kwargs['facets'] = self.facets
if self.fields:
search_kwargs['fields'] = self.fields
if self.highlight:
search_kwargs['highlight'] = self.highlight
if self.models:
search_kwargs['models'] = self.models
if self.narrow_queries:
search_kwargs['narrow_queries'] = self.narrow_queries
if self.query_facets:
search_kwargs['query_facets'] = self.query_facets
if self.within:
search_kwargs['within'] = self.within
if spelling_query:
search_kwargs['spelling_query'] = spelling_query
elif self.spelling_query:
search_kwargs['spelling_query'] = self.spelling_query
if self.stats:
search_kwargs['stats'] = self.stats
return search_kwargs
def run(self, spelling_query=None, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
search_kwargs = self.build_params(spelling_query, **kwargs)
if kwargs:
search_kwargs.update(kwargs)
results = self.backend.search(final_query, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = self.post_process_facets(results)
self._stats = results.get('stats', {})
self._spelling_suggestion = results.get('spelling_suggestion', None)
def run_mlt(self, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
if self._more_like_this is False or self._mlt_instance is None:
raise MoreLikeThisError("No instance was provided to determine 'More Like This' results.")
additional_query_string = self.build_query()
search_kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class,
'models': self.models
}
if self.end_offset is not None:
search_kwargs['end_offset'] = self.end_offset - self.start_offset
results = self.backend.more_like_this(self._mlt_instance, additional_query_string, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
class SolrEngine(BaseEngine):
backend = SolrSearchBackend
query = SolrSearchQuery
| celerityweb/django-haystack | haystack/backends/solr_backend.py | Python | bsd-3-clause | 33,372 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import json
from mock import MagicMock, patch, PropertyMock
# External imports
# Bokeh imports
from bokeh.document.document import Document
from bokeh.io.state import State
# Module under test
import bokeh.io.notebook as binb
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_install_notebook_hook():
binb.install_notebook_hook("foo", "load", "doc", "app")
assert binb._HOOKS["foo"]['load'] == "load"
assert binb._HOOKS["foo"]['doc'] == "doc"
assert binb._HOOKS["foo"]['app'] == "app"
with pytest.raises(RuntimeError):
binb.install_notebook_hook("foo", "load2", "doc2", "app2")
binb.install_notebook_hook("foo", "load2", "doc2", "app2", overwrite=True)
assert binb._HOOKS["foo"]['load'] == "load2"
assert binb._HOOKS["foo"]['doc'] == "doc2"
assert binb._HOOKS["foo"]['app'] == "app2"
@patch('bokeh.io.notebook.get_comms')
@patch('bokeh.io.notebook.publish_display_data')
@patch('bokeh.embed.notebook.notebook_content')
def test_show_doc_no_server(mock_notebook_content,
mock__publish_display_data,
mock_get_comms):
mock_get_comms.return_value = "comms"
s = State()
d = Document()
mock_notebook_content.return_value = ["notebook_script", "notebook_div", d]
class Obj(object):
id = None
def references(self): return []
assert mock__publish_display_data.call_count == 0
binb.show_doc(Obj(), s, True)
expected_args = ({'application/javascript': 'notebook_script', 'application/vnd.bokehjs_exec.v0+json': ''},)
expected_kwargs = {'metadata': {'application/vnd.bokehjs_exec.v0+json': {'id': None}}}
assert d._hold is not None
assert mock__publish_display_data.call_count == 2 # two mime types
assert mock__publish_display_data.call_args[0] == expected_args
assert mock__publish_display_data.call_args[1] == expected_kwargs
class Test_push_notebook(object):
@patch('bokeh.io.notebook.CommsHandle.comms', new_callable=PropertyMock)
def test_no_events(self, mock_comms):
mock_comms.return_value = MagicMock()
d = Document()
handle = binb.CommsHandle("comms", d)
binb.push_notebook(d, None, handle)
assert mock_comms.call_count == 0
@patch('bokeh.io.notebook.CommsHandle.comms', new_callable=PropertyMock)
def test_with_events(self, mock_comms):
mock_comm = MagicMock()
mock_send = MagicMock(return_value="junk")
mock_comm.send = mock_send
mock_comms.return_value = mock_comm
d = Document()
handle = binb.CommsHandle("comms", d)
d.title = "foo"
binb.push_notebook(d, None, handle)
assert mock_comms.call_count > 0
assert mock_send.call_count == 3 # sends header, metadata, then content
assert json.loads(mock_send.call_args[0][0]) == {u"events": [{u"kind": u"TitleChanged", u"title": u"foo"}], u"references": []}
assert mock_send.call_args[1] == {}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def test__origin_url():
assert binb._origin_url("foo.com:8888") == "foo.com:8888"
assert binb._origin_url("http://foo.com:8888") == "foo.com:8888"
assert binb._origin_url("https://foo.com:8888") == "foo.com:8888"
def test__server_url():
assert binb._server_url("foo.com:8888", 10) == "http://foo.com:10/"
assert binb._server_url("http://foo.com:8888", 10) == "http://foo.com:10/"
assert binb._server_url("https://foo.com:8888", 10) == "https://foo.com:10/"
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| stonebig/bokeh | bokeh/io/tests/test_notebook.py | Python | bsd-3-clause | 5,073 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutDecoratingWithFunctions(Koan):
def addcowbell(fn):
fn.wow_factor = 'COWBELL BABY!'
return fn
@addcowbell
def mediocre_song(self):
return "o/~ We all live in a broken submarine o/~"
def test_decorators_can_modify_a_function(self):
self.assertMatch(__, self.mediocre_song())
self.assertEqual(__, self.mediocre_song.wow_factor)
# ------------------------------------------------------------------
def xmltag(fn):
def func(*args):
return '<' + fn(*args) + '/>'
return func
@xmltag
def render_tag(self, name):
return name
def test_decorators_can_change_a_function_output(self):
self.assertEqual(__, self.render_tag('llama'))
| Krakn/learning | src/python/python_koans/python2/about_decorating_with_functions.py | Python | isc | 832 |
from .cursor import Cursor
from collections import namedtuple
from itertools import zip_longest
class BasicSpan(object):
Range = namedtuple('Range', 'y line start_x end_x')
def __init__(self, start_curs): #, end_curs):
#assert isinstance(start_curs, Cursor)
#assert isinstance(end_curs, Cursor)
#start_curs = start_curs.clone()
#end_curs = end_curs.clone()
#if end_curs.pos < start_curs.pos:
# start_curs, end_curs = end_curs, start_curs
self.start_curs = start_curs
#self.end_curs = end_curs
@property
def buffer(self):
return self.start_curs.buffer
def __contains__(self, pos):
if isinstance(pos, Cursor):
pos = pos.pos
return self.start_curs.pos <= pos < self.end_curs.pos
def contains_inclusive(self, pos):
if isinstance(pos, Cursor):
pos = pos.pos
return self.start_curs.pos <= pos <= self.end_curs.pos
@property
def ranges(self):
start_curs, end_curs = self.start_curs, self.end_curs
start_curs_y, start_curs_x = start_curs.pos
end_curs_y, end_curs_x = end_curs.pos
for y, line in enumerate(start_curs.buffer.lines[start_curs_y:end_curs_y+1], start_curs_y):
start_x = start_curs_x if y == start_curs_y else 0
end_x = end_curs_x if y == end_curs_y else None
yield self.Range(y, line, start_x, end_x)
@property
def region(self):
return Region(self)
def set_attributes(self, **kw):
for key, val in kw.items():
self.start_curs.set_attribute_to(self.end_curs, key, val)
def __sub__(self, other):
return self.region - other.region
def __repr__(self):
return 'Span({!r}, {!r})'.format(self.start_curs, self.end_curs)
def __eq__(self, other):
return (self.start_curs.pos, self.end_curs.pos) == (other.start_curs.pos, other.end_curs.pos)
@property
def text(self):
return self.start_curs.text_to(self.end_curs)
@text.setter
def text(self, value):
self.start_curs.remove_to(self.end_curs)
p = self.start_curs.pos
self.start_curs.insert(value)
self.start_curs.pos = p
def remove(self):
self.text = ''
class Span(BasicSpan):
def __init__(self, start_curs, end_curs):
assert isinstance(start_curs, Cursor)
assert isinstance(end_curs, Cursor)
start_curs = start_curs.clone()
end_curs = end_curs.clone()
if end_curs.pos < start_curs.pos:
start_curs, end_curs = end_curs, start_curs
super().__init__(start_curs)
self.end_curs = end_curs
@classmethod
def from_pos(cls, buff, pos, *, length=None, end=None):
if not ((length is None) ^ (end is None)):
raise TypeError('Must provide at least one of `length`, `end`.')
if length is not None:
c1 = Cursor(buff).move(pos)
c2 = c1.clone().advance(length)
return cls(c1, c2)
else:
c1 = Cursor(buff).move(pos)
c2 = c1.clone().move(end)
return cls(c1, c2)
def append(self, text):
self.end_curs.insert(text)
def prepend(self, text):
p = self.start_curs.pos
self.start_curs.insert(text)
self.start_curs.move(p)
def clone(self):
return Span(self.start_curs.clone(),
self.end_curs.clone())
def __getitem__(self, key):
if not isinstance(key, slice):
key = slice(key, key + 1)
start, stop, step = key.indices(len(self.text))
if step != 1:
raise TypeError('step is unsupported')
return Span(self.start_curs.clone().advance(start),
self.start_curs.clone().advance(stop))
import logging
class Region(object):
Debug = True
def __init__(self, *spans):
self.spans = tuple(sorted(spans, key=lambda span: span.start_curs.pos))
if self.Debug:
for span1, span2 in zip(self.spans, self.spans[1:]):
if span1.end_curs.pos > span2.start_curs.pos:
logging.error('spans in regions must be exclusive: %r', self)
assert False, 'spans in regions must be exclusive'
def clone(self):
return Region(*(s.clone() for s in self.spans))
def __contains__(self, pos):
return any(pos in x for x in self.spans)
def contains_inclusive(self, pos):
return any(x.contains_inclusive(pos) for x in self.spans)
def __repr__(self):
return 'Region{!r}'.format(self.spans)
@property
def ranges(self):
for span in self.spans:
yield from span.ranges
@property
def region(self):
return self
@property
def buffer(self):
if self.spans:
return self.spans[0].buffer
else:
return None
@property
def text(self):
return ''.join(span.text for span in self.spans)
def subtract_span(self, other_span):
def gen():
for self_span in self.spans:
start_equal = self_span.start_curs.pos == other_span.start_curs.pos
end_equal = self_span.end_curs.pos == other_span.end_curs.pos
start_contained = self_span.start_curs in other_span
end_contained = self_span.end_curs in other_span or end_equal
contained = start_contained and end_contained
if not contained and not (start_equal and end_equal):
intersect_head = other_span.start_curs in self_span
intersect_tail = other_span.end_curs in self_span or end_equal
self_start, self_end = self_span.start_curs, self_span.end_curs
other_start, other_end = other_span.start_curs, other_span.end_curs
if intersect_head and self_start.pos != other_start.pos:
yield Span(self_start, other_start) #self_span.start_curs, other_span.start_curs)
if intersect_tail and other_end.pos != self_end.pos:
yield Span(other_end, self_end) #other_span.end_curs, self_span.end_curs)
if not (intersect_head or intersect_tail):
yield self_span
return Region(*gen())
def __sub__(self, other):
other = other.region
result = self
while other.spans:
result = result.subtract_span(other.spans[0])
other = Region(*other.spans[1:])
return result
def set_attributes(self, **kw):
for span in self.spans:
span.set_attributes(**kw)
def __eq__(self, other):
return self.spans == other.spans
def remove(self):
for span in self.spans:
span.remove()
| sam-roth/Keypad | keypad/buffers/span.py | Python | gpl-3.0 | 7,061 |
# -*- coding: utf-8 -*-
import factory
from TWLight.resources.factories import PartnerFactory
from TWLight.users.factories import EditorFactory
from TWLight.applications.models import Application
class ApplicationFactory(factory.django.DjangoModelFactory):
class Meta:
model = Application
strategy = factory.CREATE_STRATEGY
editor = factory.SubFactory(EditorFactory)
partner = factory.SubFactory(PartnerFactory)
| WikipediaLibrary/TWLight | TWLight/applications/factories.py | Python | mit | 446 |
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
if not matrix:
return []
left = 0
up = 0
right =len(matrix[0])
down = len(matrix)
direction = 0
res = []
while True:
if direction == 0:
for i in range(left, right):
print(up,i)
res.append(matrix[up][i])
up += 1
if direction == 1:
for j in range(up, down):
print(j,right - 1)
res.append(matrix[j][right - 1])
right -= 1
if direction == 2:
for k in range(right-1, left-1, -1):
res.append(matrix[down - 1][k])
down -= 1
if direction == 3:
for l in range(down -1, up -1, -1):
res.append(matrix[l][left])
left += 1
direction += 1
direction %= 4
if up > down-1 or left > right -1 :
return res | darkpeach/AlgorithmCoding | _054_SpiralMatrix.py | Python | epl-1.0 | 1,157 |
from django.conf.urls import patterns, url, include
from .api import *
from rest_framework import routers
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'indicators', IndicatorViewSet, base_name='indicator')
urlpatterns = patterns(
'',
url(r'^$', Base.as_view(), name="indicator-base"),
url(r'', include(router.urls))
)
| mmilaprat/policycompass-services | apps/indicatorservice/urls.py | Python | agpl-3.0 | 357 |
# oppia/preview/views.py
import codecs
import os
import re
from django.conf import settings
from django.shortcuts import render,render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from oppia.models import Course, Activity, Tracker
from oppia.views import check_can_view
from oppia.course_xml_reader import CourseXML
def home_view(request):
course_list = []
# only get courses that are already published for preview
for dir in os.listdir(settings.MEDIA_ROOT + "courses/"):
try:
if request.user.is_staff:
course = Course.objects.get(is_archived=False, shortname=dir)
else:
course = Course.objects.get(is_draft=False,is_archived=False, shortname=dir)
course_list.append(course)
except Course.DoesNotExist:
pass
return render_to_response('oppia/preview/home.html',
{'course_list': course_list},
context_instance=RequestContext(request))
def course_home_view(request, id):
course = check_can_view(request, id)
return render_to_response('oppia/preview/course_home.html',
{'course': course },
context_instance=RequestContext(request))
def course_activity_view(request, course_id, activity_id):
course = check_can_view(request, course_id)
activity = Activity.objects.get(pk=activity_id)
# log the activity in the tracker
tracker = Tracker()
tracker.user = request.user
tracker.course = course
tracker.type = activity.type
tracker.data = ""
tracker.ip = request.META.get('REMOTE_ADDR','0.0.0.0')
tracker.agent = request.META.get('HTTP_USER_AGENT','unknown')
tracker.activity_title = activity.title
tracker.section_title = activity.section.title
tracker.save()
if activity.type == "page":
activity_content_file = activity.get_content()
with codecs.open(settings.MEDIA_ROOT + "courses/" + course.shortname + "/" + activity_content_file, "r", "utf-8") as f:
s = f.read()
template = re.compile('\<body(?P<body>.*?)>(?P<content>.*)\<\/body\>', re.DOTALL)
activity_content = template.search(s).group('content')
activity_content = activity_content.replace("images/",settings.MEDIA_URL + "courses/" + course.shortname + "/images/")
return render_to_response('oppia/preview/course_activity_page.html',
{'course': course, 'activity': activity , 'content' : activity_content },
context_instance=RequestContext(request))
else:
activity_content= None
return render_to_response('oppia/preview/course_activity_not_supported.html',
{'course': course, 'activity': activity , 'content' : activity_content },
context_instance=RequestContext(request)) | DigitalCampus/django-maf-oppia | oppia/preview/views.py | Python | gpl-3.0 | 3,111 |
# Copyright 2017 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class RHOCIException(Exception):
"""Base RHOCI Exception.
To use this class, inherit from it and define a 'message' property.
"""
message = "An unknown exception occurred."
def __init__(self, *args, **kwargs):
if len(args) > 0:
self.message = args[0]
super(RHOCIException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
class APIException(RHOCIException):
msg = "Something, somewhere went terribly wrong"
class MissingInputException(RHOCIException):
message = "You forget to specify critical parameter: %(parameter)s"
| bregman-arie/rhoci | rhoci/common/exceptions.py | Python | apache-2.0 | 1,218 |
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import OrderedDict
from calibre.ebooks.docx.block_styles import ( # noqa
inherit, simple_color, LINE_STYLES, simple_float, binary_property, read_shd)
# Read from XML {{{
def read_text_border(parent, dest, XPath, get):
border_color = border_style = border_width = padding = inherit
elems = XPath('./w:bdr')(parent)
if elems and elems[0].attrib:
border_color = simple_color('auto')
border_style = 'none'
border_width = 1
for elem in elems:
color = get(elem, 'w:color')
if color is not None:
border_color = simple_color(color)
style = get(elem, 'w:val')
if style is not None:
border_style = LINE_STYLES.get(style, 'solid')
space = get(elem, 'w:space')
if space is not None:
try:
padding = float(space)
except (ValueError, TypeError):
pass
sz = get(elem, 'w:sz')
if sz is not None:
# we dont care about art borders (they are only used for page borders)
try:
# A border of less than 1pt is not rendered by WebKit
border_width = min(96, max(8, float(sz))) / 8
except (ValueError, TypeError):
pass
setattr(dest, 'border_color', border_color)
setattr(dest, 'border_style', border_style)
setattr(dest, 'border_width', border_width)
setattr(dest, 'padding', padding)
def read_color(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:color[@w:val]')(parent):
val = get(col, 'w:val')
if not val:
continue
ans = simple_color(val)
setattr(dest, 'color', ans)
def convert_highlight_color(val):
return {
'darkBlue': '#000080', 'darkCyan': '#008080', 'darkGray': '#808080',
'darkGreen': '#008000', 'darkMagenta': '#800080', 'darkRed': '#800000', 'darkYellow': '#808000',
'lightGray': '#c0c0c0'}.get(val, val)
def read_highlight(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:highlight[@w:val]')(parent):
val = get(col, 'w:val')
if not val:
continue
if not val or val == 'none':
val = 'transparent'
else:
val = convert_highlight_color(val)
ans = val
setattr(dest, 'highlight', ans)
def read_lang(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:lang[@w:val]')(parent):
val = get(col, 'w:val')
if not val:
continue
try:
code = int(val, 16)
except (ValueError, TypeError):
ans = val
else:
from calibre.ebooks.docx.lcid import lcid
val = lcid.get(code, None)
if val:
ans = val
setattr(dest, 'lang', ans)
def read_letter_spacing(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:spacing[@w:val]')(parent):
val = simple_float(get(col, 'w:val'), 0.05)
if val is not None:
ans = val
setattr(dest, 'letter_spacing', ans)
def read_sz(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:sz[@w:val]')(parent):
val = simple_float(get(col, 'w:val'), 0.5)
if val is not None:
ans = val
setattr(dest, 'font_size', ans)
def read_underline(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:u[@w:val]')(parent):
val = get(col, 'w:val')
if val:
ans = val if val == 'none' else 'underline'
setattr(dest, 'text_decoration', ans)
def read_vert_align(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:vertAlign[@w:val]')(parent):
val = get(col, 'w:val')
if val and val in {'baseline', 'subscript', 'superscript'}:
ans = val
setattr(dest, 'vert_align', ans)
def read_position(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:position[@w:val]')(parent):
val = get(col, 'w:val')
try:
ans = float(val)/2.0
except Exception:
pass
setattr(dest, 'position', ans)
def read_font_family(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:rFonts')(parent):
val = get(col, 'w:asciiTheme')
if val:
val = '|%s|' % val
else:
val = get(col, 'w:ascii')
if val:
ans = val
setattr(dest, 'font_family', ans)
# }}}
class RunStyle(object):
all_properties = {
'b', 'bCs', 'caps', 'cs', 'dstrike', 'emboss', 'i', 'iCs', 'imprint',
'rtl', 'shadow', 'smallCaps', 'strike', 'vanish', 'webHidden',
'border_color', 'border_style', 'border_width', 'padding', 'color', 'highlight', 'background_color',
'letter_spacing', 'font_size', 'text_decoration', 'vert_align', 'lang', 'font_family', 'position',
}
toggle_properties = {
'b', 'bCs', 'caps', 'emboss', 'i', 'iCs', 'imprint', 'shadow', 'smallCaps', 'strike', 'vanish',
}
def __init__(self, namespace, rPr=None):
self.namespace = namespace
self.linked_style = None
if rPr is None:
for p in self.all_properties:
setattr(self, p, inherit)
else:
for p in (
'b', 'bCs', 'caps', 'cs', 'dstrike', 'emboss', 'i', 'iCs', 'imprint', 'rtl', 'shadow',
'smallCaps', 'strike', 'vanish', 'webHidden',
):
setattr(self, p, binary_property(rPr, p, namespace.XPath, namespace.get))
for x in ('text_border', 'color', 'highlight', 'shd', 'letter_spacing', 'sz', 'underline', 'vert_align', 'position', 'lang', 'font_family'):
f = globals()['read_%s' % x]
f(rPr, self, namespace.XPath, namespace.get)
for s in namespace.XPath('./w:rStyle[@w:val]')(rPr):
self.linked_style = namespace.get(s, 'w:val')
self._css = None
def update(self, other):
for prop in self.all_properties:
nval = getattr(other, prop)
if nval is not inherit:
setattr(self, prop, nval)
if other.linked_style is not None:
self.linked_style = other.linked_style
def resolve_based_on(self, parent):
for p in self.all_properties:
val = getattr(self, p)
if val is inherit:
setattr(self, p, getattr(parent, p))
def get_border_css(self, ans):
for x in ('color', 'style', 'width'):
val = getattr(self, 'border_'+x)
if x == 'width' and val is not inherit:
val = '%.3gpt' % val
if val is not inherit:
ans['border-%s' % x] = val
def clear_border_css(self):
for x in ('color', 'style', 'width'):
setattr(self, 'border_'+x, inherit)
@property
def css(self):
if self._css is None:
c = self._css = OrderedDict()
td = set()
if self.text_decoration is not inherit:
td.add(self.text_decoration)
if self.strike and self.strike is not inherit:
td.add('line-through')
if self.dstrike and self.dstrike is not inherit:
td.add('line-through')
if td:
c['text-decoration'] = ' '.join(td)
if self.caps is True:
c['text-transform'] = 'uppercase'
if self.i is True:
c['font-style'] = 'italic'
if self.shadow and self.shadow is not inherit:
c['text-shadow'] = '2px 2px'
if self.smallCaps is True:
c['font-variant'] = 'small-caps'
if self.vanish is True or self.webHidden is True:
c['display'] = 'none'
self.get_border_css(c)
if self.padding is not inherit:
c['padding'] = '%.3gpt' % self.padding
for x in ('color', 'background_color'):
val = getattr(self, x)
if val is not inherit:
c[x.replace('_', '-')] = val
for x in ('letter_spacing', 'font_size'):
val = getattr(self, x)
if val is not inherit:
c[x.replace('_', '-')] = '%.3gpt' % val
if self.position is not inherit:
c['vertical-align'] = '%.3gpt' % self.position
if self.highlight is not inherit and self.highlight != 'transparent':
c['background-color'] = self.highlight
if self.b:
c['font-weight'] = 'bold'
if self.font_family is not inherit:
c['font-family'] = self.font_family
return self._css
def same_border(self, other):
return self.get_border_css({}) == other.get_border_css({})
| timpalpant/calibre | src/calibre/ebooks/docx/char_styles.py | Python | gpl-3.0 | 9,136 |
import numpy as np
import scipy.ndimage as nd
class Transformation(object):
"""
Transformation provides facilities to transform a frame of
reference by rotation, translation (shifting) and/or
magnification. If the frame is an image, the input is
interpolatated using a Spline function.
Members
-------
params: Dictionary with:
{'rotation': # Radians of rotation about the
# frame center. It has reverse sign
# as the input argument to be compatible
# with IRAF.
'shift':(xshift,yshift) # Shift in pixels
'magnification':(xmag,ymag) # (scalars). Magnification
# about the frame center.
}
matrix: Matrix rotation. Set when affine_transform is used.
notransform: Boolean flag indicating whether to apply the
transformation function or not.
xy_coords: A tuple is (x_array, y_array) contanining the
linear transformation equations. It is used when
the map_coordinates method is reuqested.
affine: Bool. Default is True. Use the affine_tranformation
function.
map_coords: Bool. Default is False. To use the map_coordinates
function.
dq_data: Bool. Default is False. To use the 'trasform_dq'
function.
order: Spline interpolator order. If 1 it use a linear
interpolation method, zero is the 'nearest' method.
Anything greater than 1 will give the specified order
of spline interpolation.
mode: string. Default is 'constant'. Points outside the
boundaries of the input are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap').
cval: scalar. Value used for points outside the boundaries of
the input if ``mode='constant'``. Default is 0.0
Methods
-------
affine_init
Set the affine_transformation function parameters:
matrix and offset.
map_coords_init
Set the linear equations to transform the data
For each (x,y) there is one (x_out,y_out) which
are function of rotation, shift and/or magnification.
affine_transform
Front end method to the scipy.ndimage.affine_transform
function.
map_coordinates
Front end method to the scipy.ndimage.affine_transform
function.
transform
High level method to drive an already set transformation
function. The default one is 'affine'
set_affine
Set attribute affine to True or False
set_map_coords
Set attribute map_coords to True or False
set_dq_data
Set attribute dq_data to True or False
set_interpolator
Changing the interpolation method to use when
correcting the blocks for rotation, shifting and
magnification.
transform_dq
Transform the data quality plane. Must be handled a little
differently. DQ flags are set bit-wise, such that each pixel is the
sum of any of the following values: 0=good pixel,
1=bad pixel (from bad pixel mask), 2=nonlinear, 4=saturated, etc.
To transform the DQ plane without losing flag information, it is
unpacked into separate masks, each of which is transformed in the same
way as the science data. A pixel is flagged if it had greater than
1% influence from a bad pixel. The transformed masks are then added
back together to generate the transformed DQ plane.
"""
def __init__(self, rotation, shift, magnification, interpolator='affine',
order=1, as_iraf=True):
"""
parameters
----------
rotation: Rotation in degrees from the x-axis in the
counterwise direction about the center.
shift: Tuple (x_shit,y_shift), amount to shift in pixel.
magnification:
Tuple of floats, (x,y) magnification. Amount
to magnify the output frame about the center.
offset: For affine transform, indicates the offset
into the array where the transform is applied.
interpolator: The interpolator function use, values are:
'affine': (Default), uses ndimage.affine_tranform
which uses a spline interpolator of order (order).
'map_coords': Uses ndimage.maps_coordinates using a
spline interpolator of order (order)
'dq_data': Uses the method transform_dq.
order: The order of the spline interpolator use in
ndimage.affine_transform and ndimage.maps_coordinates.
as_iraf: (bool). Default: True. If True, set the transformation
to be compatible with IRAF/geotran task, where the
rotation is in the counterwise direction.
"""
# Set rotation angle (degrees) to radians
rot_rads = np.radians(rotation)
# Turn input tuples to single values
xshift,yshift = shift
xmag,ymag = magnification
# Convert rotation and magnification values to IRAF/geotran
# types.
if as_iraf:
rot_rads = -rot_rads # The rotation direction in Python
# scipy.ndimage package is clockwise,
# so we change the sign.
# If the rot,shift and magnification have default values
# then set member notransform to True.
self.notransform = False
if (rot_rads==0.) and (shift==(0.,0.)) and (magnification==(1,1)):
self.notransform = True
# For clarity put the pars in a dictionary.
self.params ={'rotation':rot_rads, 'shift':(xshift,yshift),
'magnification':(xmag,ymag)}
if interpolator not in ['affine','map_coords']:
raise ValueError('Bad input parameter "interpolator" value')
# Set default values
self.affine = True
self.map_coords = False
self.dq_data = False
# Reset if parameter does not have the default value.
if interpolator == 'map_coords':
self.set_map_coords()
if order>5:
raise ValueError('Spline order cannot be greater than 5')
self.order = min(5, max(order,1)) # Spline order
# Set default values for these members. The methods affine_transform
# and map_coordinates allow changing.
self.mode = 'constant'
self.cval = 0
def affine_init(self,imagesize):
"""
Set the affine_transformation function parameters:
matrix and offset.
Input
-----
imagesize:
Tuple with image.shape values or (npixel_y, npixels_x).
These are used to put the center of rotation to the
center of the frame.
"""
# Set rotation origin as the center of the image
ycen,xcen = np.asarray(imagesize)/2.
xmag,ymag = self.params['magnification']
xshift,yshift = self.params['shift']
rot_rads = self.params['rotation']
cx = xmag*np.cos(rot_rads)
sx = xmag*np.sin(rot_rads)
cy = ymag*np.cos(rot_rads)
sy = ymag*np.sin(rot_rads)
self.matrix = np.array([[cy, sy], [-sx, cx]])
# Add translation to this point
xcen += xshift
ycen += yshift
xoff = (1.-cx)*xcen + sx*ycen
yoff = (1.-cy)*ycen - sy*xcen
# We add back the shift
xoff -= xshift
yoff -= yshift
self.offset = (yoff, xoff)
def affine_transform(self, image, order=None, mode=None, cval=None):
"""
Front end method to the scipy.ndimage.affine_transform
function.
Inputs
------
Image: ndarray with image data.
order: Spline interpolator order. If 1 it use a linear
interpolation method, zero is the 'nearest' method.
Anything greater than 1 will give the specified order
of spline interpolation.
mode: string. Default is 'constant'. Points outside the
boundaries of the input are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap').
cval: scalar. Value used for points outside the boundaries of
the input if ``mode='constant'``. Default is 0.0
If order, mode and/or cval are different from None, they
replace the default values.
"""
if order == None:
order = self.order
if mode == None:
mode = self.mode
if cval == None:
cval = self.cval
if self.notransform:
return image
if not hasattr(self,'offset'):
self.affine_init(image.shape)
prefilter = order > 1
matrix = self.matrix
offset = self.offset
image = nd.affine_transform(image, matrix, offset=offset,
prefilter=prefilter,
mode=mode, order=order, cval=cval)
return image
def map_coords_init(self, imagesize):
"""
Set the linear equations to transform the data
For each (x,y) there is one (x_out,y_out) which
are function of rotation, shift and/or magnification.
Input
-----
imagesize: The shape of the frame where the (x,y)
coordinates are taken from. It sets
the center of rotation.
"""
# Set rotation origin as the center of the image
ycen, xcen = np.asarray(imagesize)/2.
xsc,ysc = self.params['magnification']
xshift,yshift = self.params['shift']
rot_rads = self.params['rotation']
cx = xsc*np.cos(rot_rads)
sx = xsc*np.sin(rot_rads)
cy = ysc*np.cos(rot_rads)
sy = ysc*np.sin(rot_rads)
# Create am open mesh_grid. Only one-dimension
# of each argument is returned.
Y,X = np.ogrid[:imagesize[0],:imagesize[1]]
# Add translation to this point
xcen += xshift
ycen += yshift
xcc = xcen - X
ycc = ycen - Y
xx = -xcc*cx + ycc*sx + xcen - xshift
yy = -ycc*cy - xcc*sy + ycen - yshift
self.xy_coords = np.array([yy,xx])
def map_coordinates(self, image, order=None, mode=None, cval=None):
"""
Front end to scipy.ndimage.map_cordinates function
Input
-----
Image: ndarray with image data.
order: Spline interpolator order. If 1 it use a linear
interpolation method, zero is the 'nearest' method.
Anything greater than 1 will give the specified order
of spline interpolation.
mode: string. Default is 'constant'. Points outside the
boundaries of the input are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap').
cval: scalar. Value used for points outside the boundaries of
the input if ``mode='constant'``. Default is 0.0
If order, mode and/or cval are different from None, they
replace the default values.
"""
if order == None:
order = self.order
if mode == None:
mode = self.mode
if cval == None:
cval = self.cval
if self.notransform:
return image
if not hasattr(self,'xy_coords'):
self.map_coords_init(image.shape)
prefilter = order > 1
# The xy_ccords member is set in map_coords_init().
image = nd.map_coordinates(image, self.xy_coords,
prefilter=prefilter,
mode=mode, order=order, cval=cval)
del self.xy_coords
return image
def transform(self,data):
"""
High level method to drive an already set transformation
function. The default one is 'affine'
"""
if self.affine: # Use affine_transform
output = self.affine_transform(data)
elif self.map_coords: # Use map_coordinates
output = self.map_coordinates(data)
elif self.dq_data: # For DQ data use map_coordinates
output = self.transform_dq(data)
else:
raise ValueError("Transform function not defined.")
return output
def set_affine(self):
"""Set attribute affine to True"""
self.affine = True
self.map_coords = False
self.dq_data = False
def set_map_coords(self):
"""Set attribute map_coords to True or False"""
self.map_coords = True
self.affine = False
self.dq_data = False
def set_dq_data(self):
"""Set attribute dq_data to True or False"""
self.dq_data = True
self.affine = False
self.map_coords = False
def set_interpolator(self,tfunction='linear',spline_order=2):
"""
Changing the interpolation method to use when
correcting the blocks for rotation, shifting and
magnification. The methods and order are defined
in the global variable 'trans_functions' at the
bottom of this module.
Parameters
----------
:param tfunction:
Interpolator name. The supported values are:
'linear', 'nearest', 'spline'.
The order for 'nearest' is set to 0. The order
for 'linear' is set to 1. Anything greater
than 1 will give the specified order of spline
interpolation.
:param spline_order: Used when tfunction is 'spline'. The order
of the spline interpolator. (default is 2).
"""
# check for allowed values:
if tfunction not in ('linear', 'nearest', 'spline'):
raise ValueError("Interpolator:'"+tfunction+\
"' is not in ('linear','nearest','spline')")
# Set the order:
if tfunction == 'linear':
order = 1
elif tfunction == 'nearest':
order = 0
else:
order = min(5, max(spline_order,2)) # Spline. No more than 5
self.order=order
def transform_dq(self, data):
"""
Transform the data quality plane. Must be handled a little
differently. DQ flags are set bit-wise, such that each pixel is the
sum of any of the following values: 0=good pixel,
1=bad pixel (from bad pixel mask), 2=nonlinear, 4=saturated, etc.
To transform the DQ plane without losing flag information, it is
unpacked into separate masks, each of which is transformed in the same
way as the science data. A pixel is flagged if it had greater than
1% influence from a bad pixel. The transformed masks are then added
back together to generate the transformed DQ plane.
DQ flags are set bit-wise
bit 1: bad pixel (1)
bit 2: nonlinear (2)
bit 3: saturated (4)
bit 4:
bit 5: nodata (5)
A pixel can be 0 (good, no flags), or the sum of
any of the above flags
(or any others I don't know about)
(Note: This code was taken from resample.py)
Parameters
----------
:param data: Ndarray to transform
:param data,rot,shift, mag, jfactor: See self.affine_tranform help
Output
------
:param outdata: The tranformed input data. Input remains unchanged.
"""
if self.notransform:
return data
# unpack the DQ data into separate masks
# NOTE: this method only works for 8-bit masks!
unp = data.shape + (8,)
unpack_data = np.unpackbits(np.uint8(data)).reshape(unp)
# transform each mask
outdata = np.zeros(data.shape)
do_nans = False
gnan = None
for j in range(0,8):
# skip the transformation if there are no flags set
# (but always do the bad pixel mask because it is
# needed to mask the part of the array that was
# padded out to match the reference image)
if not unpack_data[:,:,j].any() and j!=7:
# first bit is j=7 because unpack is backwards
continue
mask = np.float32(unpack_data[:,:,j])
# if bad pix bit, pad with 1. Otherwise, pad with 0
if j==7:
cval = 1
else:
# Instead of padding with zeros, we mark the
# no-data values with Nans
cval = np.nan
do_nans = True
trans_mask = self.map_coordinates(mask,cval=cval)
del mask; mask = None
# Get the nans indices:
if do_nans:
gnan = np.where(np.isnan(trans_mask))
# We mark only once
do_nans = False
# flag any pixels with >1% influence from bad pixel
trans_mask = np.where(np.abs(trans_mask)>0.01, 2**(7-j),0)
# add the flags into the overall mask
outdata += trans_mask
del trans_mask; trans_mask = None
# QUESTION: Do we need to put outdata[gnan] for each plane != 7?
# put nan's back
if gnan != None:
outdata[gnan] = np.nan
return outdata
| pyrrho314/recipesystem | trunk/gempy/library/transformation.py | Python | mpl-2.0 | 18,587 |
from iddt.dispatcher import Dispatcher
class MyDispatcher(Dispatcher):
def __init__(self):
super(MyDispatcher, self).__init__()
d = MyDispatcher()
d.dispatch({
'target_url': 'http://example.com',
'link_level': 0,
'allowed_domains': [],
})
| thequbit/iddt | iddt/dispatcher_example.py | Python | gpl-3.0 | 267 |
#!/usr/bin/python
"""
Script to verify errors on autotest code contributions (patches).
The workflow is as follows:
* Patch will be applied and eventual problems will be notified.
* If there are new files created, remember user to add them to VCS.
* If any added file looks like a executable file, remember user to make them
executable.
* If any of the files added or modified introduces trailing whitespaces, tabs
or incorrect indentation, report problems.
* If any of the files have problems during pylint validation, report failures.
* If any of the files changed have a unittest suite, run the unittest suite
and report any failures.
Usage: check_patch.py -p [/path/to/patch]
check_patch.py -i [patchwork id]
check_patch.py -g [github pull request id]
check_patch.py --full --yes [check all autotest tree]
@copyright: Red Hat Inc, 2009.
@author: Lucas Meneghel Rodrigues <[email protected]>
"""
import os, stat, logging, sys, optparse, re, urllib, shutil, unittest
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.shared import utils, error, logging_config
from autotest.client.shared import logging_manager
from autotest.utils import run_pylint, reindent
# Hostname of patchwork server to use
PWHOST = "patchwork.virt.bos.redhat.com"
class CheckPatchLoggingConfig(logging_config.LoggingConfig):
def configure_logging(self, results_dir=None, verbose=True):
super(CheckPatchLoggingConfig, self).configure_logging(use_console=True,
verbose=verbose)
p = os.path.join(os.getcwd(), 'check-patch.log')
self.add_file_handler(file_path=p)
class VCS(object):
"""
Abstraction layer to the version control system.
"""
def __init__(self):
"""
Class constructor. Guesses the version control name and instantiates it
as a backend.
"""
backend_name = self.guess_vcs_name()
if backend_name == "SVN":
self.backend = SubVersionBackend()
self.type = "subversion"
elif backend_name == "git":
self.backend = GitBackend()
self.type = "git"
else:
self.backend = None
def guess_vcs_name(self):
if os.path.isdir(".svn"):
return "SVN"
elif os.path.isdir(".git"):
return "git"
else:
logging.error("Could not figure version control system. Are you "
"on a working directory? Aborting.")
sys.exit(1)
def get_unknown_files(self):
"""
Return a list of files unknown to the VCS.
"""
return self.backend.get_unknown_files()
def get_modified_files(self):
"""
Return a list of files that were modified, according to the VCS.
"""
return self.backend.get_modified_files()
def is_file_tracked(self, fl):
"""
Return whether a file is tracked by the VCS.
"""
return self.backend.is_file_tracked(fl)
def add_untracked_file(self, fl):
"""
Add an untracked file to version control.
"""
return self.backend.add_untracked_file(file)
def revert_file(self, fl):
"""
Restore file according to the latest state on the reference repo.
"""
return self.backend.revert_file(file)
def apply_patch(self, patch):
"""
Applies a patch using the most appropriate method to the particular VCS.
"""
return self.backend.apply_patch(patch)
def update(self):
"""
Updates the tree according to the latest state of the public tree
"""
return self.backend.update()
class SubVersionBackend(object):
"""
Implementation of a subversion backend for use with the VCS abstraction
layer.
"""
def __init__(self):
self.ignored_extension_list = ['.orig', '.bak']
def get_unknown_files(self):
status = utils.system_output("svn status --ignore-externals")
unknown_files = []
for line in status.split("\n"):
status_flag = line[0]
if line and status_flag == "?":
for extension in self.ignored_extension_list:
if not line.endswith(extension):
unknown_files.append(line[1:].strip())
return unknown_files
def get_modified_files(self):
status = utils.system_output("svn status --ignore-externals")
modified_files = []
for line in status.split("\n"):
status_flag = line[0]
if line and status_flag == "M" or status_flag == "A":
modified_files.append(line[1:].strip())
return modified_files
def is_file_tracked(self, fl):
stdout = None
try:
cmdresult = utils.run("svn status --ignore-externals %s" % fl,
verbose=False)
stdout = cmdresult.stdout
except error.CmdError:
return False
if stdout is not None:
if stdout:
if stdout.startswith("?"):
return False
else:
return True
else:
return True
else:
return False
def add_untracked_file(self, fl):
"""
Add an untracked file under revision control.
@param file: Path to untracked file.
"""
try:
utils.run('svn add %s' % fl)
except error.CmdError, e:
logging.error("Problem adding file %s to svn: %s", fl, e)
sys.exit(1)
def revert_file(self, fl):
"""
Revert file against last revision.
@param file: Path to file to be reverted.
"""
try:
utils.run('svn revert %s' % fl)
except error.CmdError, e:
logging.error("Problem reverting file %s: %s", fl, e)
sys.exit(1)
def apply_patch(self, patch):
"""
Apply a patch to the code base. Patches are expected to be made using
level -p1, and taken according to the code base top level.
@param patch: Path to the patch file.
"""
try:
utils.system_output("patch -p1 < %s" % patch)
except:
logging.error("Patch applied incorrectly. Possible causes: ")
logging.error("1 - Patch might not be -p1")
logging.error("2 - You are not at the top of the autotest tree")
logging.error("3 - Patch was made using an older tree")
logging.error("4 - Mailer might have messed the patch")
sys.exit(1)
def update(self):
try:
utils.system("svn update")
except error.CmdError, e:
logging.error("SVN tree update failed: %s" % e)
class GitBackend(object):
"""
Implementation of a git backend for use with the VCS abstraction layer.
"""
def __init__(self):
self.ignored_extension_list = ['.orig', '.bak']
def get_unknown_files(self):
status = utils.system_output("git status --porcelain")
unknown_files = []
for line in status.split("\n"):
status_flag = line[0]
if line and status_flag == "??":
for extension in self.ignored_extension_list:
if not line.endswith(extension):
unknown_files.append(line[2:].strip())
return unknown_files
def get_modified_files(self):
status = utils.system_output("git status --porcelain")
modified_files = []
for line in status.split("\n"):
status_flag = line[0]
if line and status_flag == "M" or status_flag == "A":
modified_files.append(line[1:].strip())
return modified_files
def is_file_tracked(self, fl):
stdout = None
try:
cmdresult = utils.run("git status --porcelain %s" % fl,
verbose=False)
stdout = cmdresult.stdout
except error.CmdError:
return False
if stdout is not None:
if stdout:
if stdout.startswith("??"):
return False
else:
return True
else:
return True
else:
return False
def add_untracked_file(self, fl):
"""
Add an untracked file under revision control.
@param file: Path to untracked file.
"""
try:
utils.run('git add %s' % fl)
except error.CmdError, e:
logging.error("Problem adding file %s to git: %s", fl, e)
sys.exit(1)
def revert_file(self, fl):
"""
Revert file against last revision.
@param file: Path to file to be reverted.
"""
try:
utils.run('git checkout %s' % fl)
except error.CmdError, e:
logging.error("Problem reverting file %s: %s", fl, e)
sys.exit(1)
def apply_patch(self, patch):
"""
Apply a patch to the code base using git am.
A new branch will be created with the patch name.
@param patch: Path to the patch file.
"""
utils.run("git checkout next")
utils.run("git checkout -b %s" %
os.path.basename(patch).rstrip(".patch"))
try:
utils.run("git am -3 %s" % patch, verbose=False)
except error.CmdError, e:
logging.error("Failed to apply patch to the git repo: %s" % e)
sys.exit(1)
def update(self):
return
try:
utils.system("git pull")
except error.CmdError, e:
logging.error("git tree update failed: %s" % e)
class FileChecker(object):
"""
Picks up a given file and performs various checks, looking after problems
and eventually suggesting solutions.
"""
def __init__(self, path, vcs=None, confirm=False):
"""
Class constructor, sets the path attribute.
@param path: Path to the file that will be checked.
@param vcs: Version control system being used.
@param confirm: Whether to answer yes to all questions asked without
prompting the user.
"""
self.path = path
self.vcs = vcs
self.confirm = confirm
self.basename = os.path.basename(self.path)
if self.basename.endswith('.py'):
self.is_python = True
else:
self.is_python = False
mode = os.stat(self.path)[stat.ST_MODE]
self.is_executable = mode & stat.S_IXUSR
checked_file = open(self.path, "r")
self.first_line = checked_file.readline()
checked_file.close()
if "python" in self.first_line:
self.is_python = True
self.corrective_actions = []
self.indent_exception = 'cli/job_unittest.py'
if self.is_python:
logging.debug("Checking file %s",
self.path.replace("./", "autotest/"))
if self.is_python and not self.path.endswith(".py"):
self.bkp_path = "%s-cp.py" % self.path
shutil.copyfile(self.path, self.bkp_path)
else:
self.bkp_path = None
def _get_checked_filename(self):
if self.bkp_path is not None:
return self.bkp_path
else:
return self.path
def _check_indent(self):
"""
Verifies the file with the reindent module
This tool performs the following checks on python files:
* Trailing whitespaces
* Tabs
* End of line
* Incorrect indentation
And will automatically correct problems.
"""
success = True
if re.search (self.indent_exception, self.path):
return success
path = self._get_checked_filename()
try:
f = open(path)
except IOError, msg:
logging.error("Error opening %s: %s", path, str(msg))
return False
r = reindent.Reindenter(f)
f.close()
try:
if r.run():
success = False
logging.info("Reindenting %s", path)
f = open(path, "w")
r.write(f)
f.close()
except:
pass
if not success and path != self.path:
shutil.copyfile(path, self.path)
return success
def _check_code(self):
"""
Verifies the file with run_pylint.
This tool will call the static code checker pylint using the special
autotest conventions and warn about problems. Some of the problems
reported might be false positive, but it's allways good to look at them.
"""
success = True
path = self._get_checked_filename()
if run_pylint.check_file(path):
success = False
return success
def _check_unittest(self):
"""
Verifies if the file in question has a unittest suite, if so, run the
unittest and report on any failures. This is important to keep our
unit tests up to date.
"""
success = True
if "unittest" not in self.basename:
stripped_name = self.basename.rstrip(".py")
unittest_name = stripped_name + "_unittest.py"
unittest_path = self.path.replace(self.basename, unittest_name)
if os.path.isfile(unittest_path):
module = unittest_path.rstrip(".py")
module = module.split("/")
module[0] = "autotest"
try:
mod = common.setup_modules.import_module(module[-1],
".".join(module[:-1]))
test = unittest.defaultTestLoader.loadTestsFromModule(mod)
suite = unittest.TestSuite(test)
runner = unittest.TextTestRunner()
result = runner.run(suite)
if result.errors or result.failures:
success = False
msg = '%s had %d failures and %d errors.'
msg = (msg % '.'.join(module), len(result.failures),
len(result.errors))
logging.error(msg)
except ImportError:
logging.error("Unable to run unittest %s" %
".".join(module))
return success
def _check_permissions(self):
"""
Verifies the execution permissions.
* Files with no shebang and execution permissions are reported.
* Files with shebang and no execution permissions are reported.
"""
if self.first_line.startswith("#!"):
if not self.is_executable:
if self.vcs.type == "subversion":
utils.run("svn propset svn:executable ON %s" % self.path,
ignore_status=True)
elif self.vcs.type == "git":
utils.run("chmod +x %s" % self.path,
ignore_status=True)
else:
if self.is_executable:
if self.vcs.type == "subversion":
utils.run("svn propdel svn:executable %s" % self.path,
ignore_status=True)
elif self.vcs.type == "git":
utils.run("chmod -x %s" % self.path,
ignore_status=True)
def report(self, skip_unittest=False):
"""
Executes all required checks, if problems are found, the possible
corrective actions are listed.
"""
success = True
self._check_permissions()
if self.is_python:
if not self._check_indent():
success = False
if not self._check_code():
success = False
if not skip_unittest:
if not self._check_unittest():
success = False
if self.bkp_path is not None and os.path.isfile(self.bkp_path):
os.unlink(self.bkp_path)
return success
class PatchChecker(object):
def __init__(self, patch=None, patchwork_id=None, github_id=None,
pwhost=None, vcs=None,
confirm=False):
self.confirm = confirm
self.base_dir = os.getcwd()
if pwhost is None:
self.pwhost = PWHOST
else:
self.pwhost = pwhost
if patch:
self.patch = os.path.abspath(patch)
if patchwork_id:
self.patch = self._fetch_from_patchwork(patchwork_id)
if github_id:
self.patch = self._fetch_from_github(github_id)
if not os.path.isfile(self.patch):
logging.error("Invalid patch file %s provided. Aborting.",
self.patch)
sys.exit(1)
self.vcs = vcs
changed_files_before = self.vcs.get_modified_files()
if changed_files_before:
logging.error("Repository has changed files prior to patch "
"application. ")
answer = utils.ask("Would you like to revert them?", auto=self.confirm)
if answer == "n":
logging.error("Not safe to proceed without reverting files.")
sys.exit(1)
else:
for changed_file in changed_files_before:
self.vcs.revert_file(changed_file)
self.untracked_files_before = self.vcs.get_unknown_files()
self.vcs.update()
def _fetch_from_patchwork(self, pw_id):
"""
Gets a patch file from patchwork and puts it under the cwd so it can
be applied.
@param id: Patchwork patch id.
"""
patch_url = "http://%s/patch/%s/mbox/" % (self.pwhost, pw_id)
patch_dest = os.path.join(self.base_dir, 'patchwork-%s.patch' % pw_id)
patch = utils.get_file(patch_url, patch_dest)
# Patchwork sometimes puts garbage on the path, such as long
# sequences of underscores (_______). Get rid of those.
patch_ro = open(patch, 'r')
patch_contents = patch_ro.readlines()
patch_ro.close()
patch_rw = open(patch, 'w')
for line in patch_contents:
if not line.startswith("___"):
patch_rw.write(line)
patch_rw.close()
return patch
def _fetch_from_github(self, gh_id):
"""
Gets a patch file from patchwork and puts it under the cwd so it can
be applied.
@param gh_id: Patchwork patch id.
"""
patch_url = "https://github.com/autotest/autotest/pull/%s.patch" % gh_id
patch_dest = os.path.join(self.base_dir, 'github-%s.patch' % gh_id)
urllib.urlretrieve(patch_url, patch_dest)
return patch_dest
def _check_files_modified_patch(self):
modified_files_after = []
files_failed_check = []
if self.vcs.type == "subversion":
untracked_files_after = self.vcs.get_unknown_files()
modified_files_after = self.vcs.get_modified_files()
add_to_vcs = []
for untracked_file in untracked_files_after:
if untracked_file not in self.untracked_files_before:
add_to_vcs.append(untracked_file)
if add_to_vcs:
logging.info("The files: ")
for untracked_file in add_to_vcs:
logging.info(untracked_file)
logging.info("Might need to be added to VCS")
answer = utils.ask("Would you like to add them to VCS ?")
if answer == "y":
for untracked_file in add_to_vcs:
self.vcs.add_untracked_file(untracked_file)
modified_files_after.append(untracked_file)
elif answer == "n":
pass
elif self.vcs.type == "git":
patch = open(self.patch)
for line in patch.readlines():
if line.startswith("diff --git"):
m_file = line.split()[-1][2:]
if m_file not in modified_files_after:
modified_files_after.append(m_file)
patch.close()
for modified_file in modified_files_after:
# Additional safety check, new commits might introduce
# new directories
if os.path.isfile(modified_file):
file_checker = FileChecker(path=modified_file, vcs=self.vcs,
confirm=self.confirm)
if not file_checker.report():
files_failed_check.append(modified_file)
if files_failed_check:
return (False, files_failed_check)
else:
return (True, [])
def check(self):
self.vcs.apply_patch(self.patch)
return self._check_files_modified_patch()
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option('-l', '--patch', dest="local_patch", action='store',
help='path to a patch file that will be checked')
parser.add_option('-p', '--patchwork-id', dest="pw_id", action='store',
help='id of a given patchwork patch')
parser.add_option('-g', '--github-id', dest="gh_id", action='store',
help='id of a given github patch')
parser.add_option('--verbose', dest="debug", action='store_true',
help='include debug messages in console output')
parser.add_option('-f', '--full-check', dest="full_check",
action='store_true',
help='check the full tree for corrective actions')
parser.add_option('--patchwork-host', dest="patchwork_host",
help='patchwork custom server url')
parser.add_option('-y', '--yes', dest="confirm",
action='store_true',
help='Answer yes to all questions')
options, args = parser.parse_args()
local_patch = options.local_patch
pw_id = options.pw_id
gh_id = options.gh_id
debug = options.debug
run_pylint.set_verbosity(debug)
full_check = options.full_check
confirm = options.confirm
pwhost = options.patchwork_host
vcs = VCS()
if vcs.backend is None:
vcs = None
logging_manager.configure_logging(CheckPatchLoggingConfig(), verbose=debug)
ignore_list = ['common.py', ".svn", ".git", '.pyc', ".orig", ".rej", ".bak"]
if full_check:
run_pylint.set_verbosity(False)
logging.info("Autotest full tree check")
logging.info("")
for root, dirs, files in os.walk("."):
for fl in files:
check = True
path = os.path.join(root, fl)
for pattern in ignore_list:
if re.search(pattern, path):
check = False
if check:
if vcs is not None:
if not vcs.is_file_tracked(fl=path):
check = False
if check:
file_checker = FileChecker(path=path, vcs=vcs,
confirm=confirm)
file_checker.report(skip_unittest=True)
else:
if local_patch:
logging.info("Checking local patch %s", local_patch)
logging.info("")
patch_checker = PatchChecker(patch=local_patch, vcs=vcs,
confirm=confirm)
elif pw_id:
logging.info("Checking patchwork patch #%s", pw_id)
logging.info("")
patch_checker = PatchChecker(patchwork_id=pw_id, pwhost=pwhost,
vcs=vcs,
confirm=confirm)
elif gh_id:
logging.info("Checking github pull request #%s", gh_id)
logging.info("")
patch_checker = PatchChecker(github_id=gh_id, vcs=vcs,
confirm=confirm)
else:
logging.error('No patch or patchwork id specified. Aborting.')
sys.exit(1)
(success, files_failed_check) = patch_checker.check()
| ColinIanKing/autotest | utils/check_patch.py | Python | gpl-2.0 | 24,686 |
"""
Standard interface for connecting client protocols to the operation extensions.
"""
from cStringIO import StringIO
import lxml.etree, lxml.builder
from zope import interface
from delta import DeltaObserverPool as dop
import opdev, delta
from pyofwave.utils.command import CommandInterface
from .action import ACTION_REGISTRY
class OperationBase(object):
"""
An operation is a transformation that alters an entity (blip,
document, ...) and is made of one or more Actions.
"""
interface.implements(CommandInterface)
def do(self, aDocument):
"""
Perform the operation using the scenario
"""
cursor_position = 0 # Cursor index in the document
# Apply transformations using actions
for action in self.scenario():
cursor_position = action.do(aDocument=aDocument,
cursor_position=cursor_position)
assert(aDocument.length == cursor_position)
def scenario(self):
"""
Yield here the flow of Actions required to perform this
operation.
"""
raise NotImplementedError
def to_xml_etree(self):
"""
Turns an operation to an XML etree
"""
# XXX This namespace shouldn't be hardcoded
E = lxml.builder.ElementMaker(namespace="pyofwave.info/2012/dtd/document.dtd")
xml_etree = E.op()
for action in self.scenario():
xml_etree.append(action.to_xml_etree())
return xml_etree
def to_xml(self):
"""
Turns an operation to an XML stream
"""
return lxml.etree.tostring(self.to_xml_etree())
class XMLOperation(OperationBase):
"""
An Operation that reads its Actions from a XML stream.
"""
def __init__(self, xml):
self.xml = xml
def scenario(self):
root = lxml.etree.fromstring(self.xml)
# XXX This namespace shouldn't be hardcoded
operation_tag = root.find(".//{pyofwave.info/2012/dtd/document.dtd}op")
for element in operation_tag.iterchildren():
# Lookup the action name (using: "{NS}NAME") from the
# action registry and yield the corresponding Action(s)
if element.tag in ACTION_REGISTRY:
kwargs = dict(element.items())
try:
yield ACTION_REGISTRY[element.tag](**kwargs)
except Exception, e:
raise OperationError("Wrong usage of '%s': %s" % (element.tag, e))
else:
raise OperationError("Unknown Action: %s" % element.tag)
# Perform operation
def _getChildren(tag):
rep = [tag.text, ]
for child in tag:
rep.append(child)
rep.append(child.tail)
return rep
def performOperation(event, operation):
"""
Execute an operation.
"""
rep = opdev._receive[operation.tag](event, *_getChildren(operation), **operation.attrib)
EventRegisty.notify(operation)
return rep
# Events
def get(obj, prop, default = {}):
if not obj.get(prop):
obj[prop] = default
return obj[prop]
_handlers = {}
class EventRegisty(object):
"""
Keeps track of all the events a user registers to.
"""
def __init__(self, user, callback):
self.user = user
self._callback = callback
def _handlers(self, url, operation):
# XXX : Why is it a list that is associated to an operation ?
# XXX : Is it possible to assign several callback to an operation ?
return get(get(_handlers, url), operation, [])
def register(self, url, operation):
# XXX: All registered operations will have the save callback
self._handlers(url, operation).append(self._callback)
def unregister(self, url, operation="*"):
url_handlers = get(_handlers, url)
if operation == "*":
for operation in url_handlers.keys():
operation_callback = self._handlers(url, operation)
if self._callback in operation_callback:
operation_callback.remove(self._callback)
else:
self._handlers(url, operation).remove(self._callback)
@staticmethod
def notify(operation, src=None):
if src == None:
src = operation.get("href", operation.get("src", ""))
for handler in _handlers.get(src, {}).get(operation.tag, []):
## FIXME : apply_async fails because handler seems to not be pickable
#dop.apply_async(handler, [operation])
# XXX : amha, for the moment, a synchronous call is enought
handler(operation)
@delta.alphaDeltaObservable.addObserver
@staticmethod
def applyDelta(doc, delta):
""" Calculate and send events. """
## -- Exceptions -- #
class OperationError(Exception):
pass
| pyofwave/PyOfWave | pyofwave_server/pyofwave/core/operation.py | Python | mpl-2.0 | 4,886 |
# for constants which need to be accessed by various parts of Sentinel
# skip proposals on superblock creation if the SB isn't within the fudge window
SUPERBLOCK_FUDGE_WINDOW = 60 * 60 * 2
| dashpay/sentinel | lib/constants.py | Python | mit | 190 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "blog.tuticfruti.com",
"name": "tuticfruti_blog"
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| tuticfruti/tuticfruti_blog | tuticfruti_blog/contrib/sites/migrations/0002_set_site_domain_and_name.py | Python | bsd-3-clause | 955 |
from django import forms
from django.forms.models import modelformset_factory
#https://docs.djangoproject.com/en/dev/topics/forms/modelforms/
from datetime import date
#from olwidget.forms import MapModelForm
from whyqd.wiqi.models import Wiqi, Text #, Image #, Book#, Geomap
class WiqiStackRevertForm(forms.Form):
comment = forms.CharField(max_length=500, required=False, label="Reason for reversion")
class WiqiStackRangeForm(forms.Form):
range_from = forms.DateField(label="from", required=False)
range_to = forms.DateField(label="to", required=False, initial=date.today())
def __init__(self, *args, **kwargs):
super(WiqiStackRangeForm, self).__init__(*args, **kwargs)
self.fields['range_from'].widget.attrs['class'] = 'datepicker'
self.fields['range_to'].widget.attrs['class'] = 'datepicker'
class WiqiForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(WiqiForm, self).__init__(*args, **kwargs)
self.fields['is_live_from'].widget.attrs['class'] = 'datepicker'
self.fields['is_live_to'].widget.attrs['class'] = 'datepicker'
class Meta:
model = Wiqi
fields = ('is_live_from', 'is_live_to', )
class WiqiPriceForm(forms.ModelForm):
class Meta:
model = Wiqi
fields = ('price', 'read_if',)
class TextForm(forms.ModelForm):
class Meta:
model = Text
fields = ('title', 'content', 'custom_div')
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 20}),
}
#class ImageForm(forms.ModelForm):
# class Meta:
# model = Image
# fields = ('image', )
#
# class BookFullForm(forms.ModelForm):
# class Meta:
# model = Book
# fields = ('title', 'authors', 'authorsort', 'pitch', 'cover_image', 'summary', 'language', 'series', 'series_index', 'ISBN', )
WIQI_FORM_TYPE_DICT = {
'text': TextForm,
#'image': ImageForm,
#'book': BookForm,
#'geomap': GeomapForm,
}
'''
class GeomapForm(MapModelForm):
class Meta:
model = Geomap
fields = ('name', 'map_shape', 'comment', )
# Dynamic Form Generation - http://jacobian.org/writing/dynamic-form-generation/
# Django passing object ID in hiddeninput by populating - http://stackoverflow.com/q/16668463
# https://docs.djangoproject.com/en/dev/topics/forms/formsets/
# Multi-Object-Edit With Django FormSets - http://christiankaula.com/multi-object-edit-django-formsets.html
''' | turukawa/whyqd | whyqd/wiqi/forms.py | Python | agpl-3.0 | 2,599 |
"""Scraper for Fifth Circuit of Appeals
CourtID: ca5
Court Short Name: ca5
Reviewer: mlr
History:
- 2014-07-19: Created by Andrei Chelaru
- 2014-11-08: Updated for new site by mlr.
"""
from datetime import datetime
from juriscraper.OralArgumentSite import OralArgumentSite
class Site(OralArgumentSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = 'http://www.ca5.uscourts.gov/rss.aspx?Feed=OralArgRecs'
def _get_download_urls(self):
path = "//item/link"
return [e.tail for e in self.html.xpath(path)]
def _get_case_names(self):
path = "//item/description/text()[2]"
return [s for s in self.html.xpath(path)]
def _get_case_dates(self):
path = "//item/description/text()[3]"
return [datetime.strptime(date_string[9:], '%m/%d/%Y').date() for
date_string in self.html.xpath(path)]
def _get_docket_numbers(self):
path = "//item/description/text()[1]"
return [s for s in self.html.xpath(path)]
| Andr3iC/juriscraper | oral_args/united_states/federal_appellate/ca5.py | Python | bsd-2-clause | 1,102 |
# coding: utf8
from __future__ import unicode_literals
from ..char_classes import LIST_ELLIPSES, LIST_ICONS
from ..char_classes import QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER
_quotes = QUOTES.replace("'", '')
_infixes = (LIST_ELLIPSES + LIST_ICONS +
[r'(?<=[{}])\.(?=[{}])'.format(ALPHA_LOWER, ALPHA_UPPER),
r'(?<=[{a}])[,!?](?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}"])[:<>=](?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}])([{q}\)\]\(\[])(?=[\{a}])'.format(a=ALPHA, q=_quotes),
r'(?<=[{a}])--(?=[{a}])'.format(a=ALPHA),
r'(?<=[0-9])-(?=[0-9])'])
TOKENIZER_INFIXES = _infixes
| aikramer2/spaCy | spacy/lang/de/punctuation.py | Python | mit | 701 |
#!/usr/bin/env python
# coding=utf-8
import pprint
import csv
import click
import requests
import datetime as datetime
from datetime import date
from xml.etree import ElementTree as ET
import os
# from random import sample
import random
import json
# import logging
import subprocess
import glob
import time
@click.command()
@click.option('--days', default=10, type=int)
@click.option('--span', default=5, type=int)
# @click.option('--duration', default=3, type=int)
# @click.option('--days', default=1, type=int)
def ctripmultiplus(days, span):
start_days = days
for i in range(span):
subprocess.call(['python', 'ctripplus.py', '--days', str(start_days + i*10)])
for i in range(3):
print('sleeping..')
time.sleep(1)
# newest = max(glob.iglob('output_Search_item_hr_*.csv'), key=os.path.getctime)
# subprocess.call(['python', 'sendmail.py', '--filename', 'output_hotel_ref_*.csv', '--title', 'Ctrip_hotel_ref'])
if __name__ == '__main__':
ctripmultiplus() | Fatman13/gta_swarm | ctripmultiplus.py | Python | mit | 1,017 |
"""Tests for the teams module."""
import functools
import json
from auvsi_suas.models import AerialPosition
from auvsi_suas.models import GpsPosition
from auvsi_suas.models import MissionClockEvent
from auvsi_suas.models import TakeoffOrLandingEvent
from auvsi_suas.models import UasTelemetry
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils import timezone
login_url = reverse('auvsi_suas:login')
teams_url = reverse('auvsi_suas:teams')
teams_id_url = functools.partial(reverse, 'auvsi_suas:teams_id')
class TestTeamsViewLoggedOut(TestCase):
def test_not_authenticated(self):
"""Tests requests that have not yet been authenticated."""
response = self.client.get(teams_url)
self.assertEqual(403, response.status_code)
class TestTeamsView(TestCase):
"""Tests the teams view."""
def setUp(self):
self.superuser = User.objects.create_superuser(
'superuser', '[email protected]', 'superpass')
self.superuser.save()
# Login
response = self.client.post(login_url, {
'username': 'superuser',
'password': 'superpass'
})
self.assertEqual(200, response.status_code)
def create_data(self):
"""Create a basic sample dataset."""
self.user1 = User.objects.create_user('user1', '[email protected]',
'testpass')
self.user1.save()
self.user2 = User.objects.create_user('user2', '[email protected]',
'testpass')
self.user2.save()
# user1 is on mission
event = MissionClockEvent(user=self.user1,
team_on_clock=True,
team_on_timeout=False)
event.save()
# user1 is flying
event = TakeoffOrLandingEvent(user=self.user1, uas_in_air=True)
event.save()
# user2 has landed
event = TakeoffOrLandingEvent(user=self.user2, uas_in_air=True)
event.save()
event = TakeoffOrLandingEvent(user=self.user2, uas_in_air=False)
event.save()
# user2 is active
self.timestamp = timezone.now()
gps = GpsPosition(latitude=38.6462, longitude=-76.2452)
gps.save()
pos = AerialPosition(gps_position=gps, altitude_msl=0)
pos.save()
telem = UasTelemetry(user=self.user2, uas_position=pos, uas_heading=90)
telem.save()
telem.timestamp = self.timestamp
telem.save()
def test_normal_user(self):
"""Normal users not allowed access."""
user = User.objects.create_user('testuser', '[email protected]',
'testpass')
user.save()
# Login
response = self.client.post(login_url, {
'username': 'testuser',
'password': 'testpass'
})
self.assertEqual(200, response.status_code)
response = self.client.get(teams_url)
self.assertEqual(403, response.status_code)
def test_no_users(self):
"""No users results in empty list, no superusers."""
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
self.assertEqual([], json.loads(response.content))
def test_post(self):
"""POST not allowed"""
response = self.client.post(teams_url)
self.assertEqual(405, response.status_code)
def test_correct_json(self):
"""Response JSON is properly formatted."""
self.create_data()
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(2, len(data))
for user in data:
self.assertIn('id', user)
self.assertIn('name', user)
self.assertIn('on_clock', user)
self.assertIn('on_timeout', user)
self.assertIn('in_air', user)
self.assertIn('active', user)
def test_users_correct(self):
"""User names and status correct."""
self.create_data()
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
names = [d['name'] for d in data]
self.assertIn('user1', names)
self.assertIn('user2', names)
user1 = data[names.index('user1')]
self.assertEqual(True, user1['on_clock'])
self.assertEqual(False, user1['on_timeout'])
self.assertEqual(True, user1['in_air'])
self.assertEqual(False, user1['active'])
user2 = data[names.index('user2')]
self.assertEqual(False, user2['on_clock'])
self.assertEqual(False, user2['on_timeout'])
self.assertEqual(False, user2['in_air'])
self.assertEqual(True, user2['active'])
class TestTeamsIdViewLoggedOut(TestCase):
def test_not_authenticated(self):
"""Tests requests that have not yet been authenticated."""
response = self.client.get(teams_id_url(args=[1]))
self.assertEqual(403, response.status_code)
class TestTeamsIdView(TestCase):
"""Tests the teams-by-id view."""
def setUp(self):
self.user1 = User.objects.create_user('user1', '[email protected]',
'testpass')
self.user1.save()
self.superuser = User.objects.create_superuser(
'superuser', '[email protected]', 'superpass')
self.superuser.save()
# Login
response = self.client.post(login_url, {
'username': 'superuser',
'password': 'superpass'
})
self.assertEqual(200, response.status_code)
def test_bad_id(self):
"""Invalid user id rejected"""
response = self.client.get(teams_id_url(args=[999]))
self.assertGreaterEqual(400, response.status_code)
def test_correct_user(self):
"""User requested is correct"""
response = self.client.get(teams_id_url(args=[self.user1.pk]))
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual('user1', data['name'])
self.assertEqual(self.user1.pk, data['id'])
self.assertEqual(False, data['on_clock'])
self.assertEqual(False, data['on_timeout'])
self.assertEqual(False, data['in_air'])
self.assertEqual(False, data['active'])
def test_bad_json(self):
"""Invalid json rejected"""
response = self.client.put(
teams_id_url(args=[self.user1.pk]),
'Hi there!')
self.assertGreaterEqual(400, response.status_code)
def test_invalid_in_air(self):
"""invalid in_air rejected"""
data = json.dumps({
'name': self.user1.username,
'id': self.user1.pk,
'active': False,
'in_air': 'Hi!',
})
response = self.client.put(teams_id_url(args=[self.user1.pk]), data)
self.assertGreaterEqual(400, response.status_code)
def test_invalid_clock(self):
"""invalid on_clock and on_timeout rejected"""
invalid_values = [('Hi', False), (False, 'Hi'), (True, True), ]
for on_clock, on_timeout in invalid_values:
data = json.dumps({
'name': self.user1.username,
'id': self.user1.pk,
'active': False,
'on_clock': on_clock,
'on_timeout': on_timeout,
})
response = self.client.put(
teams_id_url(args=[self.user1.pk]),
data)
self.assertGreaterEqual(400, response.status_code)
def test_no_extra_events(self):
"""No new TakeoffOrLandingEvents created if status doesn't change"""
data = json.dumps({
'name': self.user1.username,
'id': self.user1.pk,
'active': False,
'on_clock': False,
'on_timeout': False,
'in_air': False,
})
response = self.client.put(teams_id_url(args=[self.user1.pk]), data)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(0, TakeoffOrLandingEvent.objects.count())
self.assertEqual(0, MissionClockEvent.objects.count())
def test_update_in_air(self):
"""In-air can be updated"""
data = json.dumps({
'name': self.user1.username,
'id': self.user1.pk,
'active': False,
'in_air': True,
})
response = self.client.put(teams_id_url(args=[self.user1.pk]), data)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(True, data['in_air'])
# Event created
event = TakeoffOrLandingEvent.objects.get()
self.assertEqual(self.user1, event.user)
self.assertEqual(True, event.uas_in_air)
def test_update_on_clock(self):
"""on_clock can be updated"""
data = json.dumps({
'name': self.user1.username,
'id': self.user1.pk,
'active': False,
'on_clock': True,
})
response = self.client.put(teams_id_url(args=[self.user1.pk]), data)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(True, data['on_clock'])
self.assertEqual(False, data['on_timeout'])
# Event created
event = MissionClockEvent.objects.get()
self.assertEqual(self.user1, event.user)
self.assertEqual(True, event.team_on_clock)
def test_update_on_timeout(self):
"""on_timeout can be updated"""
data = json.dumps({
'name': self.user1.username,
'id': self.user1.pk,
'active': False,
'on_timeout': True,
})
response = self.client.put(teams_id_url(args=[self.user1.pk]), data)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(False, data['on_clock'])
self.assertEqual(True, data['on_timeout'])
# Event created
event = MissionClockEvent.objects.get()
self.assertEqual(self.user1, event.user)
self.assertEqual(True, event.team_on_timeout)
def test_name_ignored(self):
"""name field ignored"""
expected = self.user1.username
data = json.dumps({
'name': 'Hello World!',
'id': self.user1.pk,
'active': False,
'in_air': False,
})
response = self.client.put(teams_id_url(args=[self.user1.pk]), data)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(expected, data['name'])
def test_id_ignored(self):
"""id field ignored"""
expected = self.user1.pk
data = json.dumps({
'name': self.user1.username,
'id': 999,
'active': False,
'in_air': False,
})
response = self.client.put(teams_id_url(args=[self.user1.pk]), data)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(expected, data['id'])
def test_active_ignored(self):
"""active field ignored"""
data = json.dumps({
'name': self.user1.username,
'id': self.user1.pk,
'active': True,
'in_air': False,
})
response = self.client.put(teams_id_url(args=[self.user1.pk]), data)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(False, data['active'])
| justineaster/interop | server/auvsi_suas/views/teams_test.py | Python | apache-2.0 | 11,961 |
"""Singleton class decorator"""
def singleton(cls):
"""
Singleton class decorator
>>> from chula.singleton import singleton
>>>
>>> @singleton
... class MyClass(object):
... pass
...
>>> a = MyClass()
>>> b = MyClass()
>>> c = MyClass()
>>>
>>> a is b
True
>>> a is c
True
"""
instances = {}
def get_instance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return get_instance
| jmcfarlane/chula | chula/singleton.py | Python | gpl-2.0 | 547 |
# -*- coding: utf-8 -*-
"""This module defines functions generally useful in scikit-ci."""
import os
from .constants import SERVICES, SERVICES_ENV_VAR
def current_service():
for service, env_var in SERVICES_ENV_VAR.items():
if os.environ.get(env_var, 'false').lower() == 'true':
return service
raise LookupError(
"unknown service: None of the environment variables {} are set "
"to 'true' or 'True'".format(", ".join(SERVICES_ENV_VAR.values()))
)
def current_operating_system(service):
return os.environ[SERVICES[service]] if SERVICES[service] else None
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
Copied from textwrap.py available in python 3 (cpython/cpython@a2d2bef)
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
| scikit-build/scikit-ci | ci/utils.py | Python | apache-2.0 | 1,327 |
#
# $LicenseInfo:firstyear=2010&license=mit$
#
# Copyright (c) 2010, Linden Research, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# $/LicenseInfo$
#
'''
This module contains the main Apiary code. The BeeKeeper spawns and manages
all subprocesses. The QueenBee reads jobs from disk and enqueues them for
workers. WorkerBee is a class to be extended by protocol-specific classes. It
contains the basic workings of a worker client to make it simple to implement a
new protocol. The StatsGatherer receives, aggregates, and prints status
messages from the WorkerBees.
'''
import optparse
import os
import re
import random
import socket
import sys
import tempfile
import cPickle
import MySQLdb
import time
import warnings
from datetime import datetime
from threading import Thread
from multiprocessing import Value, Queue
from multiprocessing.queues import Empty
from collections import defaultdict
from itertools import chain
from apiary.tools.childprocess import ChildProcess
from apiary.tools.debug import debug, traced_func, traced_method
from apiary.tools.stats import Tally, Level, Series
from apiary.tools.table import format_table, ALIGN_LEFT, ALIGN_CENTER, ALIGN_RIGHT
verbose = False
class BeeKeeper(object):
"""Manages the hive, including QueenBee, WorkerBees, and StatsGatherer."""
def __init__(self, options, arguments):
self.options = options
self.arguments = arguments
try:
self.protocol = options.protocols[options.protocol]
except KeyError:
sys.exit('invalid protocol: %s (valid protocols: %s)' %
(options.protocol, " ".join(options.protocols)))
def start(self):
"""Run the load test."""
start_time = time.time()
job_queue = Queue()
stats_queue = Queue()
workers = []
delay = self.options.stagger_workers / 1000.0
for i in xrange(self.options.workers):
worker = WorkerBeeProcess(self.options, self.protocol, job_queue, stats_queue)
worker.start()
workers.append(worker)
time.sleep(delay)
# TODO: consider waiting until workers are ready
#if self.options.startup_wait:
# print "workers started; waiting %d seconds..." % self.options.startup_wait
# time.sleep(self.options.startup_wait)
stats_gatherer = StatsGatherer(self.options, stats_queue)
stats_gatherer.start()
queen = QueenBee(self.options, self.arguments, job_queue, stats_queue)
queen.start()
# Now wait while the queen does its thing.
try:
queen.join()
except KeyboardInterrupt:
print "Interrupted, shutting down..."
queen.terminate()
print "Waiting for workers to complete jobs and terminate (may take up to %d seconds)..." % self.options.max_ahead
try:
stop = Message(Message.STOP)
for worker in xrange(self.options.workers * self.options.threads):
job_queue.put(stop)
# Now wait for the workers to get the message. This may take a few
# minutes as the QueenBee likes to stay ahead by a bit.
for worker in workers:
worker.join()
# Tell the Stats Gatherer that it's done.
stats_queue.put(Message(Message.STOP))
# Wait for it to finish.
stats_gatherer.join()
print "Completed %d jobs in %0.2f seconds." % (queen.jobs_sent.value, time.time() - start_time)
except KeyboardInterrupt:
print "Interrupted before shutdown process completed."
job_queue.cancel_join_thread()
stats_queue.cancel_join_thread()
class StatsGatherer(ChildProcess):
"""Gather and present stats.
The StatsGatherer process reads messages off of the stats queue sent by the
workers and aggregates them. It prints a report periodically.
See tools.stats for a description of the kinds of statistics that are
available.
"""
def __init__(self, options, stats_queue):
super(StatsGatherer, self).__init__()
self._options = options
self._verbose = options.verbose
self._tallies = defaultdict(Tally)
self._levels = defaultdict(Level)
self._series = defaultdict(Series)
self._last_report = time.time()
self._worker_count = 0
self._queue = stats_queue
@traced_method
def worker_status(self, message):
if message.type == Message.STOP:
debug('Stopping stats gatherer.')
return True
elif message.type == Message.STAT_TALLY:
#print "tally", message.body
self._tallies[message.body].add()
elif message.type == Message.STAT_LEVEL:
#print "level", message.body[0], message.body[1]
self._levels[message.body[0]].add(message.body[1])
elif message.type == Message.STAT_SERIES:
#print "series", message.body[0], message.body[1]
self._series[message.body[0]].add(message.body[1])
else:
print >> sys.stderr, "Received unknown worker status: %s" % message
def report(self):
self._last_report = time.time()
timestamp = datetime.now().strftime('%F %T')
print
print timestamp
print "=" * len(timestamp)
table = []
for name, stat in (sorted(self._tallies.items()) +
sorted(self._levels.iteritems()) +
sorted(self._series.iteritems())):
report = stat.report()
if report:
row = [(ALIGN_RIGHT, "%s: " % name)]
row.extend(report)
table.append(row)
print format_table(table) or "",
def run_child_process(self):
while True:
try:
done = self.worker_status(self._queue.get(timeout=1))
except Empty:
done = False
if done or time.time() - self._last_report > self._options.stats_interval:
self.report()
if done:
break
class QueenBee(ChildProcess):
"""A QueenBee process that distributes sequences of events"""
def __init__(self, options, arguments, job_queue, stats_queue):
super(QueenBee, self).__init__()
self._options = options
self._verbose = options.verbose
self._jobs_file = arguments[0]
self._index_file = arguments[0] + ".index"
self._time_scale = 1.0 / options.speedup
self._last_warning = 0
self._skip_counter = options.skip
self._last_job_start_time = 0
self._skip = options.skip
self.jobs_sent = Value('L', 0)
self.job_queue = job_queue
self.stats_queue = stats_queue
if os.path.exists(self._index_file):
self.use_index = True
else:
self.use_index = False
def run_child_process(self):
start_time = time.time() + self._options.startup_wait
if self.use_index:
jobs_file = open(self._index_file, 'rb')
else:
jobs_file = open(self._jobs_file, 'rb')
job_num = 0
while True:
try:
if self.use_index:
job_id, job_start_time, job_offset = cPickle.load(jobs_file)
else:
job_offset = jobs_file.tell()
job_id, tasks = cPickle.load(jobs_file)
if not tasks:
continue
job_start_time = tasks[0][0]
job_num += 1
if self._options.ramp_time:
# Adjust skip counter once per second since ramp_time has
# one-second resolution anyway.
job_start_second = int(job_start_time)
if job_start_second > self._last_job_start_time:
self._skip = max(self._options.min_skip,
self._options.skip - (job_start_second / self._options.ramp_time))
self._last_job_start_time = job_start_second
if self._skip:
if self._skip_counter == 0:
self._skip_counter = self._skip
else:
self._skip_counter -= 1
if self._skip_counter != self._options.offset:
continue
# Check whether we're falling behind, and throttle sending so as
# not to overfill the queue.
if not self._options.asap:
offset = job_start_time * self._time_scale - (time.time() - start_time)
if offset > self._options.max_ahead:
time.sleep(offset - self._options.max_ahead)
elif offset < -10.0:
if time.time() - self._last_warning > 60:
print "WARNING: Queenbee is %0.2f seconds behind." % (-offset)
self._last_warning = time.time()
message = Message(Message.JOB, (start_time, job_id, self._jobs_file, job_offset))
self.job_queue.put(message)
except EOFError:
break
self.jobs_sent.value = job_num
class WorkerBee(Thread):
"""The thread that does the actual job processing"""
EXCHANGE = 'b.direct'
def __init__(self, options, job_queue, stats_queue):
super(WorkerBee, self).__init__()
self.options = options
self.job_queue = job_queue
self.stats_queue = stats_queue
self.dry_run = options.dry_run
self.asap = options.asap
self.verbose = options.verbose >= 1
self.debug = options.debug
self.time_scale = 1.0 / options.speedup
def status(self, status, body=None):
self.stats_queue.put(Message(status, body))
def error(self, message):
self.status(Message.STAT_TALLY, "ERR: <%s>" % message)
def tally(self, name):
self.status(Message.STAT_TALLY, name)
def level(self, name, increment):
self.status(Message.STAT_LEVEL, (name, increment))
def series(self, name, value):
self.status(Message.STAT_SERIES, (name, value))
def process_message(self, message):
if message.type == Message.STOP:
return True
elif message.type == Message.JOB:
# Messages look like this:
# (start_time, job_id, job_file, offset)
start_time, job_id, job_file, offset = message.body
with open(job_file) as f:
f.seek(offset)
# Jobs look like this:
# (job_id, ((time, request), (time, request), ...))
read_job_id, tasks = cPickle.load(f)
if job_id != read_job_id:
print "ERROR: worker read the wrong job: expected %s, read %s" % (job_id, read_job_id)
return
if self.dry_run or not tasks:
return
started = False
error = False
for timestamp, request in tasks:
target_time = timestamp * self.time_scale + start_time
offset = target_time - time.time()
# TODO: warn if falling behind?
if offset > 0:
#print('sleeping %0.4f seconds' % offset)
debug('sleeping %0.4f seconds' % offset)
if offset > 120 and self.verbose:
print "long wait of %ds for job %s" % (offset, job_id)
time.sleep(offset)
#elif offset < -1:
# print "worker fell behind by %.5f seconds" % (-offset)
if not started:
self.level("Jobs Running", "+")
self.start_job(job_id)
started = True
#print "sending request", request
self.level("Requests Running", "+")
request_start_time = time.time()
error = not self.send_request(request)
request_end_time = time.time()
self.level("Requests Running", "-")
self.tally("Requests Completed")
self.series("Request Duration (ms)", (request_end_time - request_start_time) * 1000)
if error:
break
self.finish_job(job_id)
self.level("Jobs Running", "-")
if not error:
self.tally("Jobs Completed")
def start_job(self, job_id):
pass
def send_request(self, request):
raise NotImplementedError('protocol plugin must implement send_request()')
def finish_request(self, request):
raise NotImplementedError('protocol plugin must implement send_request()')
def finish_job(self, job_id):
pass
def run(self):
while True:
done = self.process_message(self.job_queue.get())
if done:
break
class WorkerBeeProcess(ChildProcess):
"""Manages the set of WorkerBee threads"""
def __init__(self, options, protocol, job_queue, stats_queue):
super(WorkerBeeProcess, self).__init__()
self.options = options
self.protocol = protocol
self.threads = []
self.job_queue = job_queue
self.stats_queue = stats_queue
def run_child_process(self):
delay = self.options.stagger_threads / 1000.0
for i in xrange(self.options.threads):
thread = self.protocol.WorkerBee(self.options, self.job_queue, self.stats_queue)
thread.setDaemon(True)
thread.start()
self.threads.append(thread)
time.sleep(delay)
debug("spawned %d threads" % len(self.threads))
for thread in self.threads:
thread.join()
debug('worker ended')
class Message (object):
STOP = 3
JOB = 8
STAT_TALLY = 9
STAT_LEVEL = 10
STAT_SERIES = 11
def __init__(self, type, body=None):
self.type = type
self.body = body
def __str__(self):
return repr(self)
def __repr__(self):
return "Message(%s, %s)" % (self.type, repr(self.body))
def add_options(parser):
parser.add_option('-v', '--verbose',
default=0, action='count',
help='increase output (0~2 times')
parser.add_option('-p', '--protocol', default='mysql',
help='''Protocol plugin to use (default: mysql).''')
parser.add_option('--profile', default=False, action='store_true',
help='Print profiling data. This will impact performance.')
parser.add_option('--debug', default=False, action='store_true', dest='debug',
help='Print debug messages.')
parser.add_option('--asap',
action='store_true', default=False,
help='send queries as fast as possible (default: off)')
parser.add_option('-w', '--workers', metavar='N',
default=100, type='int',
help='number of worker bee processes (default: 100)')
parser.add_option('--stagger-workers', metavar='MSEC',
default=0, type='int',
help='number of milliseconds to wait between starting workers (default: 0)')
parser.add_option('-t', '--threads', metavar='N',
default=1, type='int',
help='number of threads per worker process (default: 1)')
parser.add_option('--stagger-threads', metavar='MSEC',
default=0, type='int',
help='number of milliseconds to wait between starting threadss (default: 0)')
parser.add_option('--startup-wait', metavar='SEC',
default=0, type='int',
help='number of seconds to wait after starting all workers before enqueuing jobs (default: 0)')
parser.add_option('--speedup', default=1.0, dest='speedup', type='float',
help="Time multiple used when replaying query logs. 2.0 means "
"that queries run twice as fast (and the entire run takes "
"half the time the capture ran for).")
parser.add_option('--skip', default=0, type='int', metavar='NUM',
help='''Skip this many jobs before running a job. For example,
a value of 31 would skip 31 jobs, run one, skip 31, etc, so
1 out of every 32 jobs would be run.''')
parser.add_option('--ramp-time', default=0, type='int', metavar='SECONDS',
help='''After this number of seconds, decrement the --skip
by 1. Continue in this way until --skip reaches
--skip-min.''')
parser.add_option('--min-skip', default=0, type='int', metavar='NUM',
help='''Lower bound on --skip when using --ramp-time.''')
parser.add_option('--offset', default=0, type='int', metavar='NUM',
help='''When skipping jobs, this chooses which ones to run. For
example, with a skip of 1, you could run apiary on two
hosts, one with an offset of 0 and one with an offset of
1 to run all jobs.''')
parser.add_option('--max-ahead', default=300, type='int', metavar='SECONDS',
help='''How many seconds ahead the QueenBee may get in sending
jobs to the queue. Only change this if apiary consumes tpp
much memory''')
parser.add_option('-n', '--dry-run', default=False, action='store_true',
help='''Don't actually send any requests.''')
parser.add_option('-i', '--stats-interval', type=int, default=15, metavar='SECONDS',
help='''How often to report statistics, in seconds. (default: %default)''')
| lexelby/apiary | apiary/base.py | Python | mit | 19,231 |
"""
Django settings for nostril project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8-n#bx+ebbe$b(baal0ql7@oj47ri=7h0za^-qngv-#_%+*+%u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DEFAULT_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRDPARTY_APPS = ()
LOCAL_APPS = ()
INSTALLED_APPS = DEFAULT_APPS + THIRDPARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'nostril.urls'
WSGI_APPLICATION = 'nostril.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| bbiskup/nostril | nostril/settings.py | Python | mit | 2,075 |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scattermapbox.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/scattermapbox/hoverlabel/font/_color.py | Python | mit | 522 |
def knapsack():
knapsack = [1]
tmp = 0
for i in range(100):
for j in range(i):
tmp += knapsack[j]
tmp +=5
knapsack.append(tmp)
return knapsack
def ciphergen():
f = open("a.txt","r")
ptxt = f.read()
ctxt = 0
supsack = knapsack()
for i in range(len(supsack)):
print supsack[i]
for i in range(len(ptxt)):
if ptxt[i]== '1':
ctxt += supsack[i]
return ctxt
ciphertxt = ciphergen()
print ciphertxt
| L34p/HeXA-CTF-2015 | challenges/Crypto/Crypto_300/knapsack.py | Python | mit | 504 |
# Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from freezegun import freeze_time
from topydo.commands.TagCommand import TagCommand
from topydo.lib.TodoList import TodoList
from .command_testcase import CommandTest
class TagCommandTest(CommandTest):
def setUp(self):
super().setUp()
todos = [
"Foo",
"Bar due:2014-10-22",
"Baz due:2014-10-20",
"Fnord due:2014-10-20 due:2014-10-22",
]
self.todolist = TodoList(todos)
def test_add_tag1(self):
command = TagCommand(["1", "due", "2014-10-22"], self.todolist,
self.out, self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).source(), "Foo due:2014-10-22")
self.assertEqual(self.output, "| 1| Foo due:2014-10-22\n")
self.assertEqual(self.errors, "")
self.assertTrue(self.todolist.dirty)
def test_add_tag2(self):
command = TagCommand(["Foo", "due", "2014-10-22"], self.todolist,
self.out, self.error)
command.execute()
self.assertEqual(self.todolist.todo(1).source(), "Foo due:2014-10-22")
self.assertEqual(self.output, "| 1| Foo due:2014-10-22\n")
self.assertEqual(self.errors, "")
self.assertTrue(self.todolist.dirty)
def test_add_tag3(self):
command = TagCommand(["-a", "2", "due", "2014-10-19"], self.todolist,
self.out, self.error)
command.execute()
self.assertEqual(self.todolist.todo(2).source(),
"Bar due:2014-10-22 due:2014-10-19")
self.assertEqual(self.output,
"| 2| Bar due:2014-10-22 due:2014-10-19\n")
self.assertEqual(self.errors, "")
self.assertTrue(self.todolist.dirty)
def test_add_tag4(self):
command = TagCommand(["Foox", "due", "2014-10-22"], self.todolist,
self.out, self.error)
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertFalse(self.output)
self.assertEqual(self.errors, "Invalid todo number.\n")
def test_force_add_tag01(self):
'''Tries to different values to a tag for the same name 3 times.'''
for letter in ['a', 'b', 'c']:
command = TagCommand(['-a', '1', 'k', letter], self.todolist,
self.out, self.error)
command.execute()
self.assertEqual(self.errors, "")
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.todolist.todo(1).source(), "Foo k:a k:b k:c")
def test_set_tag04(self):
command = TagCommand(["3", "due", "2014-10-20"], self.todolist,
self.out, self.error)
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertEqual(self.output, "| 3| Baz due:2014-10-20\n")
self.assertEqual(self.errors, "")
def test_set_tag05(self):
command = TagCommand(["4", "due", "2014-10-20"], self.todolist,
self.out, self.error, lambda t: "all")
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output, " 1. 2014-10-20\n 2. 2014-10-22\n| 4| Fnord due:2014-10-20 due:2014-10-20\n")
self.assertEqual(self.errors, "")
def test_set_tag06(self):
command = TagCommand(["4", "due", "2014-10-20"], self.todolist,
self.out, self.error, lambda t: "1")
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertEqual(self.output, " 1. 2014-10-20\n 2. 2014-10-22\n| 4| Fnord due:2014-10-20 due:2014-10-22\n")
self.assertEqual(self.errors, "")
def test_set_tag07(self):
command = TagCommand(["4", "due", "2014-10-20"], self.todolist,
self.out, self.error, lambda t: "2")
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output, " 1. 2014-10-20\n 2. 2014-10-22\n| 4| Fnord due:2014-10-20 due:2014-10-20\n")
self.assertEqual(self.errors, "")
def test_set_tag08(self):
command = TagCommand(["4", "due", "2014-10-20"], self.todolist,
self.out, self.error, lambda t: "")
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertEqual(self.output, " 1. 2014-10-20\n 2. 2014-10-22\n| 4| Fnord due:2014-10-20 due:2014-10-22\n")
self.assertEqual(self.errors, "")
def test_set_tag09(self):
command = TagCommand(["4", "due", "2014-10-20"], self.todolist,
self.out, self.error, lambda t: "99")
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertEqual(self.output, " 1. 2014-10-20\n 2. 2014-10-22\n| 4| Fnord due:2014-10-20 due:2014-10-22\n")
self.assertEqual(self.errors, "")
def test_set_tag10(self):
command = TagCommand(["-f", "4", "due", "2014-10-20"], self.todolist,
self.out, self.error, lambda t: "99")
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output,
"| 4| Fnord due:2014-10-20 due:2014-10-20\n")
self.assertEqual(self.errors, "")
@freeze_time('2015, 11, 19')
def test_set_tag11(self):
command = TagCommand(["3", "due", "today"], self.todolist, self.out,
self.error)
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output, "| 3| Baz due:2015-11-19\n")
self.assertEqual(self.errors, "")
def test_set_tag12(self):
"""
Do not convert relative dates for tags that were not configured as
start/due date.
"""
command = TagCommand(["3", "foo", "today"], self.todolist, self.out,
self.error)
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output, "| 3| Baz due:2014-10-20 foo:today\n")
self.assertEqual(self.errors, "")
@freeze_time('2017, 1, 12')
def test_set_tag13(self):
"""
Convert relative dates when forced to.
"""
command = TagCommand(["-r", "3", "foo", "today"], self.todolist,
self.out, self.error)
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output, "| 3| Baz due:2014-10-20 foo:2017-01-12\n")
self.assertEqual(self.errors, "")
def test_set_tag14(self):
"""
Leave the original value when an invalid relative date was given.
"""
command = TagCommand(["-r", "3", "foo", "bar"], self.todolist,
self.out, self.error)
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output, "| 3| Baz due:2014-10-20 foo:bar\n")
self.assertEqual(self.errors, "")
def test_rm_tag01(self):
command = TagCommand(["1", "due"], self.todolist, self.out, self.error)
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertEqual(self.output, "| 1| Foo\n")
self.assertEqual(self.errors, "")
def test_rm_tag02(self):
command = TagCommand(["2", "due"], self.todolist, self.out, self.error)
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output, "| 2| Bar\n")
self.assertEqual(self.errors, "")
def test_rm_tag03(self):
command = TagCommand(["4", "due"], self.todolist, self.out,
self.error, lambda t: "all")
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output,
" 1. 2014-10-20\n 2. 2014-10-22\n| 4| Fnord\n")
self.assertEqual(self.errors, "")
def test_rm_tag04(self):
command = TagCommand(["4", "due"], self.todolist, self.out, self.error,
lambda t: "1")
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output, " 1. 2014-10-20\n 2. 2014-10-22\n| 4| Fnord due:2014-10-22\n")
self.assertEqual(self.errors, "")
def test_rm_tag06(self):
command = TagCommand(["4", "due"], self.todolist, self.out, self.error,
lambda t: "99")
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertEqual(self.output, " 1. 2014-10-20\n 2. 2014-10-22\n| 4| Fnord due:2014-10-20 due:2014-10-22\n")
self.assertEqual(self.errors, "")
def test_rm_tag07(self):
command = TagCommand(["4", "due"], self.todolist, self.out, self.error,
lambda t: "A")
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertEqual(self.output, " 1. 2014-10-20\n 2. 2014-10-22\n| 4| Fnord due:2014-10-20 due:2014-10-22\n")
self.assertEqual(self.errors, "")
def test_rm_tag08(self):
command = TagCommand(["5", "due"], self.todolist, self.out, self.error)
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertEqual(self.output, "")
self.assertEqual(self.errors, "Invalid todo number.\n")
def test_rm_tag09(self):
command = TagCommand(["A", "due"], self.todolist, self.out, self.error)
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertEqual(self.output, "")
self.assertEqual(self.errors, "Invalid todo number.\n")
def test_rm_tag10(self):
command = TagCommand(["-f", "4", "due"], self.todolist, self.out,
self.error, lambda t: "A")
command.execute()
self.assertTrue(self.todolist.dirty)
self.assertEqual(self.output, "| 4| Fnord\n")
self.assertEqual(self.errors, "")
def test_no_tag(self):
command = TagCommand(["4"], self.todolist, self.out, self.error)
command.execute()
self.assertFalse(self.todolist.dirty)
self.assertEqual(self.output, "")
self.assertEqual(self.errors, command.usage() + "\n")
def test_tag_name(self):
name = TagCommand.name()
self.assertEqual(name, 'tag')
def test_help(self):
command = TagCommand(["help"], self.todolist, self.out, self.error)
command.execute()
self.assertEqual(self.output, "")
self.assertEqual(self.errors,
command.usage() + "\n\n" + command.help() + "\n")
if __name__ == '__main__':
unittest.main()
| bram85/topydo | test/test_tag_command.py | Python | gpl-3.0 | 11,537 |
"""Support for Hydrawise cloud switches."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import (
ALLOWED_WATERING_TIME,
CONF_WATERING_TIME,
DATA_HYDRAWISE,
DEFAULT_WATERING_TIME,
SWITCHES,
HydrawiseEntity,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=SWITCHES): vol.All(
cv.ensure_list, [vol.In(SWITCHES)]
),
vol.Optional(CONF_WATERING_TIME, default=DEFAULT_WATERING_TIME): vol.All(
vol.In(ALLOWED_WATERING_TIME)
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a Hydrawise device."""
hydrawise = hass.data[DATA_HYDRAWISE].data
default_watering_timer = config.get(CONF_WATERING_TIME)
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
# Create a switch for each zone
for zone in hydrawise.relays:
sensors.append(HydrawiseSwitch(default_watering_timer, zone, sensor_type))
add_entities(sensors, True)
class HydrawiseSwitch(HydrawiseEntity, SwitchEntity):
"""A switch implementation for Hydrawise device."""
def __init__(self, default_watering_timer, *args):
"""Initialize a switch for Hydrawise device."""
super().__init__(*args)
self._default_watering_timer = default_watering_timer
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
relay_data = self.data["relay"] - 1
if self._sensor_type == "manual_watering":
self.hass.data[DATA_HYDRAWISE].data.run_zone(
self._default_watering_timer, relay_data
)
elif self._sensor_type == "auto_watering":
self.hass.data[DATA_HYDRAWISE].data.suspend_zone(0, relay_data)
def turn_off(self, **kwargs):
"""Turn the device off."""
relay_data = self.data["relay"] - 1
if self._sensor_type == "manual_watering":
self.hass.data[DATA_HYDRAWISE].data.run_zone(0, relay_data)
elif self._sensor_type == "auto_watering":
self.hass.data[DATA_HYDRAWISE].data.suspend_zone(365, relay_data)
def update(self):
"""Update device state."""
relay_data = self.data["relay"] - 1
mydata = self.hass.data[DATA_HYDRAWISE].data
_LOGGER.debug("Updating Hydrawise switch: %s", self._name)
if self._sensor_type == "manual_watering":
self._state = mydata.relays[relay_data]["timestr"] == "Now"
elif self._sensor_type == "auto_watering":
self._state = (mydata.relays[relay_data]["timestr"] != "") and (
mydata.relays[relay_data]["timestr"] != "Now"
)
| nkgilley/home-assistant | homeassistant/components/hydrawise/switch.py | Python | apache-2.0 | 3,053 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test starting qutebrowser with special arguments/environments."""
import subprocess
import socket
import sys
import logging
import re
import pytest
from PyQt5.QtCore import QProcess
def _base_args(config):
"""Get the arguments to pass with every invocation."""
args = ['--debug', '--json-logging', '--no-err-windows']
if config.webengine:
args += ['--backend', 'webengine']
else:
args += ['--backend', 'webkit']
args.append('about:blank')
return args
@pytest.fixture
def temp_basedir_env(tmpdir, short_tmpdir):
"""Return a dict of environment variables that fakes --temp-basedir.
We can't run --basedir or --temp-basedir for some tests, so we mess with
XDG_*_DIR to get things relocated.
"""
data_dir = tmpdir / 'data'
config_dir = tmpdir / 'config'
runtime_dir = short_tmpdir / 'rt'
cache_dir = tmpdir / 'cache'
runtime_dir.ensure(dir=True)
runtime_dir.chmod(0o700)
(data_dir / 'qutebrowser' / 'state').write_text(
'[general]\nquickstart-done = 1\nbackend-warning-shown=1',
encoding='utf-8', ensure=True)
env = {
'XDG_DATA_HOME': str(data_dir),
'XDG_CONFIG_HOME': str(config_dir),
'XDG_RUNTIME_DIR': str(runtime_dir),
'XDG_CACHE_HOME': str(cache_dir),
}
return env
@pytest.mark.linux
def test_downloads_with_ascii_locale(request, server, tmpdir, quteproc_new):
"""Test downloads with LC_ALL=C set.
https://github.com/qutebrowser/qutebrowser/issues/908
https://github.com/qutebrowser/qutebrowser/issues/1726
"""
args = ['--temp-basedir'] + _base_args(request.config)
quteproc_new.start(args, env={'LC_ALL': 'C'})
quteproc_new.set_setting('downloads.location.directory', str(tmpdir))
# Test a normal download
quteproc_new.set_setting('downloads.location.prompt', 'false')
url = 'http://localhost:{port}/data/downloads/ä-issue908.bin'.format(
port=server.port)
quteproc_new.send_cmd(':download {}'.format(url))
quteproc_new.wait_for(category='downloads',
message='Download ?-issue908.bin finished')
# Test :prompt-open-download
quteproc_new.set_setting('downloads.location.prompt', 'true')
quteproc_new.send_cmd(':download {}'.format(url))
quteproc_new.send_cmd(':prompt-open-download "{}" -c pass'
.format(sys.executable))
quteproc_new.wait_for(category='downloads',
message='Download ä-issue908.bin finished')
quteproc_new.wait_for(category='misc',
message='Opening * with [*python*]')
assert len(tmpdir.listdir()) == 1
assert (tmpdir / '?-issue908.bin').exists()
@pytest.mark.linux
@pytest.mark.parametrize('url', ['/föö.html', 'file:///föö.html'])
def test_open_with_ascii_locale(request, server, tmpdir, quteproc_new, url):
"""Test opening non-ascii URL with LC_ALL=C set.
https://github.com/qutebrowser/qutebrowser/issues/1450
"""
args = ['--temp-basedir'] + _base_args(request.config)
quteproc_new.start(args, env={'LC_ALL': 'C'})
quteproc_new.set_setting('url.auto_search', 'never')
# Test opening a file whose name contains non-ascii characters.
# No exception thrown means test success.
quteproc_new.send_cmd(':open {}'.format(url))
if not request.config.webengine:
line = quteproc_new.wait_for(message="Error while loading *: Error "
"opening /*: No such file or directory")
line.expected = True
quteproc_new.wait_for(message="load status for <* tab_id=* "
"url='*/f%C3%B6%C3%B6.html'>: LoadStatus.error")
@pytest.mark.linux
def test_open_command_line_with_ascii_locale(request, server, tmpdir,
quteproc_new):
"""Test opening file via command line with a non-ascii name with LC_ALL=C.
https://github.com/qutebrowser/qutebrowser/issues/1450
"""
# The file does not actually have to exist because the relevant checks will
# all be called. No exception thrown means test success.
args = (['--temp-basedir'] + _base_args(request.config) +
['/home/user/föö.html'])
quteproc_new.start(args, env={'LC_ALL': 'C'}, wait_focus=False)
if not request.config.webengine:
line = quteproc_new.wait_for(message="Error while loading *: Error "
"opening /*: No such file or directory")
line.expected = True
quteproc_new.wait_for(message="load status for <* tab_id=* "
"url='*/f*.html'>: LoadStatus.error")
@pytest.mark.linux
def test_misconfigured_user_dirs(request, server, temp_basedir_env,
tmpdir, quteproc_new):
"""Test downloads with a misconfigured XDG_DOWNLOAD_DIR.
https://github.com/qutebrowser/qutebrowser/issues/866
https://github.com/qutebrowser/qutebrowser/issues/1269
"""
home = tmpdir / 'home'
home.ensure(dir=True)
temp_basedir_env['HOME'] = str(home)
assert temp_basedir_env['XDG_CONFIG_HOME'] == tmpdir / 'config'
(tmpdir / 'config' / 'user-dirs.dirs').write('XDG_DOWNLOAD_DIR="relative"',
ensure=True)
quteproc_new.start(_base_args(request.config), env=temp_basedir_env)
quteproc_new.set_setting('downloads.location.prompt', 'false')
url = 'http://localhost:{port}/data/downloads/download.bin'.format(
port=server.port)
quteproc_new.send_cmd(':download {}'.format(url))
line = quteproc_new.wait_for(
loglevel=logging.ERROR, category='message',
message='XDG_DOWNLOAD_DIR points to a relative path - please check '
'your ~/.config/user-dirs.dirs. The download is saved in your '
'home directory.')
line.expected = True
quteproc_new.wait_for(category='downloads',
message='Download download.bin finished')
assert (home / 'download.bin').exists()
def test_no_loglines(request, quteproc_new):
"""Test qute://log with --loglines=0."""
quteproc_new.start(args=['--temp-basedir', '--loglines=0'] +
_base_args(request.config))
quteproc_new.open_path('qute://log')
assert quteproc_new.get_content() == 'Log output was disabled.'
@pytest.mark.not_frozen
@pytest.mark.parametrize('level', ['1', '2'])
def test_optimize(request, quteproc_new, capfd, level):
quteproc_new.start(args=['--temp-basedir'] + _base_args(request.config),
env={'PYTHONOPTIMIZE': level})
if level == '2':
msg = ("Running on optimize level higher than 1, unexpected behavior "
"may occur.")
line = quteproc_new.wait_for(message=msg)
line.expected = True
# Waiting for quit to make sure no other warning is emitted
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
@pytest.mark.not_frozen
@pytest.mark.flaky # Fails sometimes with empty output...
def test_version(request):
"""Test invocation with --version argument."""
args = ['-m', 'qutebrowser', '--version'] + _base_args(request.config)
# can't use quteproc_new here because it's confused by
# early process termination
proc = QProcess()
proc.setProcessChannelMode(QProcess.SeparateChannels)
proc.start(sys.executable, args)
ok = proc.waitForStarted(2000)
assert ok
ok = proc.waitForFinished(10000)
stdout = bytes(proc.readAllStandardOutput()).decode('utf-8')
print(stdout)
stderr = bytes(proc.readAllStandardError()).decode('utf-8')
print(stderr)
assert ok
assert proc.exitStatus() == QProcess.NormalExit
assert re.search(r'^qutebrowser\s+v\d+(\.\d+)', stdout) is not None
def test_qt_arg(request, quteproc_new, tmpdir):
"""Test --qt-arg."""
args = (['--temp-basedir', '--qt-arg', 'stylesheet',
str(tmpdir / 'does-not-exist')] + _base_args(request.config))
quteproc_new.start(args)
msg = 'QCss::Parser - Failed to load file "*does-not-exist"'
line = quteproc_new.wait_for(message=msg)
line.expected = True
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
def test_webengine_inspector(request, quteproc_new):
if not request.config.webengine:
pytest.skip()
args = (['--temp-basedir', '--enable-webengine-inspector'] +
_base_args(request.config))
quteproc_new.start(args)
line = quteproc_new.wait_for(
message='Remote debugging server started successfully. Try pointing a '
'Chromium-based browser to http://127.0.0.1:*')
port = int(line.message.split(':')[-1])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
s.close()
@pytest.mark.linux
def test_webengine_download_suffix(request, quteproc_new, tmpdir):
"""Make sure QtWebEngine does not add a suffix to downloads."""
if not request.config.webengine:
pytest.skip()
download_dir = tmpdir / 'downloads'
download_dir.ensure(dir=True)
(tmpdir / 'user-dirs.dirs').write(
'XDG_DOWNLOAD_DIR={}'.format(download_dir))
env = {'XDG_CONFIG_HOME': str(tmpdir)}
args = (['--temp-basedir'] + _base_args(request.config))
quteproc_new.start(args, env=env)
quteproc_new.set_setting('downloads.location.prompt', 'false')
quteproc_new.set_setting('downloads.location.directory', str(download_dir))
quteproc_new.open_path('data/downloads/download.bin', wait=False)
quteproc_new.wait_for(category='downloads', message='Download * finished')
quteproc_new.open_path('data/downloads/download.bin', wait=False)
quteproc_new.wait_for(message='Entering mode KeyMode.yesno *')
quteproc_new.send_cmd(':prompt-accept yes')
quteproc_new.wait_for(category='downloads', message='Download * finished')
files = download_dir.listdir()
assert len(files) == 1
assert files[0].basename == 'download.bin'
def test_command_on_start(request, quteproc_new):
"""Make sure passing a command on start works.
See https://github.com/qutebrowser/qutebrowser/issues/2408
"""
args = (['--temp-basedir'] + _base_args(request.config) +
[':quickmark-add https://www.example.com/ example'])
quteproc_new.start(args)
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
def test_launching_with_python2():
try:
proc = subprocess.run(['python2', '-m', 'qutebrowser',
'--no-err-windows'], stderr=subprocess.PIPE)
except FileNotFoundError:
pytest.skip("python2 not found")
assert proc.returncode == 1
error = "At least Python 3.5 is required to run qutebrowser"
assert proc.stderr.decode('ascii').startswith(error)
def test_initial_private_browsing(request, quteproc_new):
"""Make sure the initial window is private when the setting is set."""
args = (_base_args(request.config) +
['--temp-basedir', '-s', 'content.private_browsing', 'true'])
quteproc_new.start(args)
quteproc_new.compare_session("""
windows:
- private: True
tabs:
- history:
- url: about:blank
""")
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
def test_loading_empty_session(tmpdir, request, quteproc_new):
"""Make sure loading an empty session opens a window."""
session = tmpdir / 'session.yml'
session.write('windows: []')
args = _base_args(request.config) + ['--temp-basedir', '-r', str(session)]
quteproc_new.start(args)
quteproc_new.compare_session("""
windows:
- tabs:
- history:
- url: about:blank
""")
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
def test_qute_settings_persistence(short_tmpdir, request, quteproc_new):
"""Make sure settings from qute://settings are persistent."""
args = _base_args(request.config) + ['--basedir', str(short_tmpdir)]
quteproc_new.start(args)
quteproc_new.open_path(
'qute://settings/set?option=ignore_case&value=always')
assert quteproc_new.get_setting('ignore_case') == 'always'
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
quteproc_new.start(args)
assert quteproc_new.get_setting('ignore_case') == 'always'
@pytest.mark.no_xvfb
@pytest.mark.no_ci
def test_force_software_rendering(request, quteproc_new):
"""Make sure we can force software rendering with -s."""
if not request.config.webengine:
pytest.skip("Only runs with QtWebEngine")
args = (_base_args(request.config) +
['--temp-basedir', '-s', 'qt.force_software_rendering', 'true'])
quteproc_new.start(args)
quteproc_new.open_path('chrome://gpu')
message = 'Canvas: Software only, hardware acceleration unavailable'
assert message in quteproc_new.get_content()
| NoctuaNivalis/qutebrowser | tests/end2end/test_invocations.py | Python | gpl-3.0 | 13,799 |
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""pw_status"""
import setuptools # type: ignore
setuptools.setup() # Package definition in setup.cfg
| google/pigweed | pw_status/py/setup.py | Python | apache-2.0 | 690 |
"""Intesishome platform."""
| jawilson/home-assistant | homeassistant/components/intesishome/__init__.py | Python | apache-2.0 | 28 |
import json
import os
import praw
import requests
dirName = os.path.dirname(os.path.abspath(__file__))
fileName = os.path.join(dirName, '../config.json')
config = {}
with open(fileName) as f:
config = json.loads(f.read())
subredditName = config['reddit']['subreddit']
redditAuth = config['reddit']['auth']
port = config['api']['port']
url = 'http://localhost:{}/leaderboard'.format(port)
request = requests.get(url)
if request.status_code != 200:
raise Exception('Unexpected status code from PG API: {}'.format(request.status_code))
leaderboard = json.loads(request.text)['leaderboard']
reddit = praw.Reddit(
user_agent = redditAuth['userAgent'],
client_id = redditAuth['clientId'],
client_secret = redditAuth['clientSecret'],
username = redditAuth['username'],
password = redditAuth['password'])
subreddit = reddit.subreddit(subredditName)
wikiPage = subreddit.wiki['leaderboard']
outputString = '# Leaderboard\n\n' + \
'Rank | Username | Rounds won | Total |\n' + \
'|:--:|:--:|:--|:--:|:--:|\n'
for player in leaderboard:
outputString += '{} | {} | {} | {}\n'.format(
player['rank'],
player['username'],
', '.join(str(num) for num in player['roundList']),
len(player['roundList']))
wikiPage.edit(outputString)
print('Done')
| hswhite33/picturegame-api | scripts/edit-leaderboard.py | Python | mit | 1,312 |
from typing import List
class Solution:
def transformArray2(self, arr: List[int]) -> List[int]:
while True:
arr2 = [a for a in arr]
changed = 0
for id in range(1, len(arr) - 1):
l = arr[id - 1]
r = arr[id + 1]
m = arr[id]
if l > m and r > m:
m += 1
changed += 1
elif l < m and r < m:
m -= 1
changed += 1
arr2[id] = m
arr = arr2
if changed == 0:
break
return arr
def transformArray(self, A):
for _ in range(100):
A = A[:1] + [b + (a > b < c) - (a < b > c) for a, b, c in zip(A, A[1:], A[2:])] + A[-1:]
return A
if __name__ == '__main__':
assert Solution().transformArray([6, 2, 3, 4]) == [6, 3, 3, 4]
assert Solution().transformArray([1, 6, 3, 4, 3, 5]) == [1, 4, 4, 4, 4, 5]
| lmmsoft/LeetCode | LeetCode-Algorithm/1243. Array Transformation/1243.py | Python | gpl-2.0 | 992 |
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import unittest
from diamond.metric import Metric
class TestMetric(unittest.TestCase):
def testgetPathPrefix(self):
metric = Metric('servers.com.example.www.cpu.total.idle',
0,
host='com.example.www')
actual_value = metric.getPathPrefix()
expected_value = 'servers'
message = 'Actual %s, expected %s' % (actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
def testgetPathPrefixCustom(self):
metric = Metric('custom.path.prefix.com.example.www.cpu.total.idle',
0,
host='com.example.www')
actual_value = metric.getPathPrefix()
expected_value = 'custom.path.prefix'
message = 'Actual %s, expected %s' % (actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
def testgetCollectorPath(self):
metric = Metric('servers.com.example.www.cpu.total.idle',
0,
host='com.example.www')
actual_value = metric.getCollectorPath()
expected_value = 'cpu'
message = 'Actual %s, expected %s' % (actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
def testgetMetricPath(self):
metric = Metric('servers.com.example.www.cpu.total.idle',
0,
host='com.example.www')
actual_value = metric.getMetricPath()
expected_value = 'total.idle'
message = 'Actual %s, expected %s' % (actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
# Test hostname of none
def testgetPathPrefixHostNone(self):
metric = Metric('servers.host.cpu.total.idle',
0)
actual_value = metric.getPathPrefix()
expected_value = 'servers'
message = 'Actual %s, expected %s' % (actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
def testgetCollectorPathHostNone(self):
metric = Metric('servers.host.cpu.total.idle',
0)
actual_value = metric.getCollectorPath()
expected_value = 'cpu'
message = 'Actual %s, expected %s' % (actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
def testgetMetricPathHostNone(self):
metric = Metric('servers.host.cpu.total.idle',
0)
actual_value = metric.getMetricPath()
expected_value = 'total.idle'
message = 'Actual %s, expected %s' % (actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
def test_parse(self):
metric = Metric('test.parse', 0)
actual_value = str(metric).strip()
expected_value = str(Metric.parse(actual_value)).strip()
message = 'Actual %s, expected %s' % (actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
def test_issue_723(self):
metrics = [
9.97143369909e-05,
'9.97143369909e-05',
0.0000997143369909,
'0.0000997143369909',
]
for precision in xrange(0, 100):
for m in metrics:
metric = Metric('test.723', m, timestamp=0)
actual_value = str(metric).strip()
expected_value = 'test.723 0 0'
message = 'Actual %s, expected %s' % (actual_value,
expected_value)
self.assertEqual(actual_value, expected_value, message)
| Ormod/Diamond | src/diamond/test/testmetric.py | Python | mit | 3,866 |
from tests.local.base import StatsTest
class GaugeTestCase(StatsTest):
def test_gauge_works(self):
fugly_hack = {'i': 0}
def _gauge_value():
fugly_hack['i'] += 1
return fugly_hack['i']
self.registry.gauge('GaugeTestCase.test_gauge_works', _gauge_value)
stats = self.registry.get_stats()
assert stats.GaugeTestCase.test_gauge_works.value == 1
stats = self.registry.get_stats()
assert stats.GaugeTestCase.test_gauge_works.value == 2
| emilssolmanis/tapes | tests/local/test_gauge.py | Python | apache-2.0 | 521 |
from rest_framework import serializers
from mkt.constants.payments import PROVIDER_LOOKUP_INVERTED
from mkt.prices.models import Price, price_locale
class PriceSerializer(serializers.ModelSerializer):
prices = serializers.SerializerMethodField()
localized = serializers.SerializerMethodField('get_localized_prices')
pricePoint = serializers.CharField(source='name')
name = serializers.CharField(source='tier_name')
class Meta:
model = Price
def get_prices(self, obj):
provider = self.context['request'].GET.get('provider', None)
if provider:
provider = PROVIDER_LOOKUP_INVERTED[provider]
return obj.prices(provider=provider)
def get_localized_prices(self, obj):
region = self.context['request'].REGION
for price in self.get_prices(obj):
if price['region'] == region.id:
result = price.copy()
result.update({
'locale': price_locale(price['price'], price['currency']),
'region': region.name,
})
return result
return {}
| washort/zamboni | mkt/prices/serializers.py | Python | bsd-3-clause | 1,136 |
import arcas
import pandas
def test_setup():
api = arcas.Springer()
assert api.standard == 'http://api.springer.com/metadata/pam?q='
def test_keys():
api = arcas.Springer()
assert api.keys() == ['url', 'key', 'unique_key', 'title', 'author', 'abstract',
'doi', 'date', 'journal', 'provenance', 'category', 'score',
'open_access']
def test_parameters_and_url_author():
api = arcas.Springer()
parameters = api.parameters_fix(author='Glynatsi')
assert parameters == ['name:Glynatsi']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=name:Glynatsi&api_key=Your key here'
def test_parameters_and_url_title():
api = arcas.Springer()
parameters = api.parameters_fix(title='Game')
assert parameters == ['title:Game']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=title:Game&api_key=Your key here'
def test_parameters_and_url_category():
api = arcas.Springer()
parameters = api.parameters_fix(category='game theory')
assert parameters == ['subject:game theory']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=subject:game theory&api_key=Your key here'
def test_parameters_and_url_journal():
api = arcas.Springer()
parameters = api.parameters_fix(journal='Springer')
assert parameters == ['pub:Springer']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=pub:Springer&api_key=Your key here'
def test_parameters_and_url_record():
api = arcas.Springer()
parameters = api.parameters_fix(records=1)
assert parameters == ['p=1']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=p=1&api_key=Your key here'
def test_parameters_and_url_start():
api = arcas.Springer()
parameters = api.parameters_fix(start=1)
assert parameters == ['s=1']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=s=1&api_key=Your key here'
def test_create_url_search():
api = arcas.Springer()
parameters = api.parameters_fix(title='Nash', journal='Spinger', records=2, start=5)
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=title:Nash+AND+pub:Spinger&p=2&s=5&api_key=Your key here'
def test_to_dataframe():
dummy_article = {'identifier': 'doi:10.1000/', 'title': 'Title',
'creator': 'E Glynatsi, V Knight', 'publicationName':
'Awesome Journal', 'genre': 'ReviewPaper', 'openAccess': 'false',
'h1': 'Abstract', 'p': 'Abstract',
'doi': '10.1000/', 'publisher': 'Springer',
'publicationDate': '2021-01-01', 'url': 'http://dx.doi.org/10.1000/',
'openAccess': 'false',}
api = arcas.Springer()
article = api.to_dataframe(dummy_article)
assert isinstance(article, pandas.core.frame.DataFrame)
assert list(article.columns) == api.keys()
assert len(article['url']) == 2
assert article['url'].unique()[0] == 'http://dx.doi.org/10.1000/'
assert article['key'].unique()[0] == 'Glynatsi2021'
assert article['title'].unique()[0] == 'Title'
assert article['abstract'].unique()[0] == 'Abstract'
assert article['journal'].unique()[0] == 'Awesome Journal'
assert article['date'].unique()[0] == 2021
assert article['open_access'].unique()[0] == False
assert article['score'].unique()[0] == 'Not available' | ArcasProject/Arcas | tests/test_springer.py | Python | mit | 3,670 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import HTTP_HEADER
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "NSFOCUS Web Application Firewall (NSFOCUS)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
_, headers, _ = get_page(get=vector)
retval = re.search(r"NSFocus", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
if retval:
break
return retval
| hackersql/sq1map | waf/nsfocus.py | Python | gpl-3.0 | 580 |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Setup up tests for regression """
import unittest
import uuid
import datetime
import os
from ConfigParser import SafeConfigParser
from threading import Lock
import sparktk as stk
import config
import spark_context_config
udf_lib_path = os.path.join(config.root, "regression-tests",
"sparktkregtests", "lib", "udftestlib")
udf_files = [os.path.join(udf_lib_path, f) for f in os.listdir(udf_lib_path)]
lock = Lock()
global_tc = None
def get_context():
global global_tc
with lock:
if global_tc is None:
sparktkconf_dict = spark_context_config.get_spark_conf()
if config.run_mode:
global_tc = stk.TkContext(
master='yarn-client',
extra_conf_dict=sparktkconf_dict, py_files=udf_files)
else:
global_tc = stk.TkContext(py_files=udf_files)
return global_tc
class SparkTKTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Build the context for use"""
cls.context = get_context()
cls.context.sc.setCheckpointDir(config.checkpoint_dir)
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
pass
def get_file(self, identifier, performance_file=False):
"""Return the hdfs path to the given file"""
# Note this is an HDFS path, not a userspace path. os.path library
# may be wrong
if performance_file:
module_name = '.'.join(identifier.split('.')[-2:])
config_reader = SafeConfigParser()
filepath = os.path.abspath(os.path.join(
config.root, "regression-tests", "sparktkregtests",
"lib", "performance.ini"))
config_reader.read(filepath)
placed_path = (config.performance_data_dir + "/" +
config_reader.get(config.test_size, module_name))
else:
placed_path = config.hdfs_data_dir + "/" + identifier
return placed_path
def get_export_file(self, filename):
# Note this is an HDFS path, not a userspace path. os.path library
# may be wrong
placed_path = config.export_dir + "/" + filename
return placed_path
def get_name(self, prefix):
"""build a guid hardened unique name """
datestamp = datetime.datetime.now().strftime("%m_%d_%H_%M_")
name = prefix + datestamp + uuid.uuid1().hex
return name
def get_local_dataset(self, dataset):
"""gets the dataset from the dataset folder"""
dataset_directory = config.dataset_directory
return os.path.join(dataset_directory, dataset)
def assertFramesEqual(self, frame1, frame2):
frame1_take = frame1.take(frame1.count())
frame2_take = frame2.take(frame2.count())
self.assertItemsEqual(frame1_take, frame2_take)
| grehx/spark-tk | regression-tests/sparktkregtests/lib/sparktk_test.py | Python | apache-2.0 | 3,650 |
#!/usr/bin/env python
# Copyright 2012 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
import cStringIO
import hashlib
import json
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
ROOT_DIR = unicode(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, ROOT_DIR)
sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party'))
from depot_tools import auto_stub
from depot_tools import fix_encoding
import auth
import isolate
import isolate_format
import isolated_format
import isolateserver
from utils import file_path
from utils import logging_utils
from utils import tools
import test_utils
ALGO = hashlib.sha1
NO_RUN_ISOLATE = {
'tests/isolate/files1/subdir/42.txt':
'the answer to life the universe and everything\n',
'tests/isolate/files1/test_file1.txt': 'Foo\n',
'tests/isolate/files1/test_file2.txt': 'Bar\n',
'tests/isolate/no_run.isolate':
"""{
# Includes itself.
'variables': {'files': ['no_run.isolate', 'files1/']},
}""",
}
SPLIT_ISOLATE = {
'tests/isolate/files1/subdir/42.txt':
'the answer to life the universe and everything',
'tests/isolate/split.isolate':
"""{
'variables': {
'command': ['python', 'split.py'],
'files': [
'<(DEPTH)/split.py',
'<(PRODUCT_DIR)/subdir/42.txt',
'test/data/foo.txt',
],
},
}""",
'tests/isolate/split.py': "import sys; sys.exit(1)",
'tests/isolate/test/data/foo.txt': 'Split',
}
TOUCH_ROOT_ISOLATE = {
'tests/isolate/touch_root.isolate':
"""{
'conditions': [
['(OS=="linux" and chromeos==1) or ((OS=="mac" or OS=="win") and '
'chromeos==0)', {
'variables': {
'command': ['python', 'touch_root.py'],
'files': ['../../at_root', 'touch_root.py'],
},
}],
],
}""",
'tests/isolate/touch_root.py':
"def main():\n"
" import os, sys\n"
" print('child_touch_root: Verify the relative directories')\n"
" root_dir = os.path.dirname(os.path.abspath(__file__))\n"
" parent_dir, base = os.path.split(root_dir)\n"
" parent_dir, base2 = os.path.split(parent_dir)\n"
" if base != 'isolate' or base2 != 'tests':\n"
" print 'Invalid root dir %s' % root_dir\n"
" return 4\n"
" content = open(os.path.join(parent_dir, 'at_root'), 'r').read()\n"
" return int(content != 'foo')\n"
"\n"
"if __name__ == '__main__':\n"
" sys.exit(main())\n",
'at_root': 'foo',
}
class IsolateBase(auto_stub.TestCase):
def setUp(self):
super(IsolateBase, self).setUp()
self.mock(auth, 'ensure_logged_in', lambda _: None)
self.old_cwd = os.getcwd()
self.cwd = file_path.get_native_path_case(
unicode(tempfile.mkdtemp(prefix=u'isolate_')))
# Everything should work even from another directory.
os.chdir(self.cwd)
self.mock(
logging_utils.OptionParserWithLogging, 'logger_root',
logging.Logger('unittest'))
def tearDown(self):
try:
os.chdir(self.old_cwd)
file_path.rmtree(self.cwd)
finally:
super(IsolateBase, self).tearDown()
class IsolateTest(IsolateBase):
def test_savedstate_load_minimal(self):
# The file referenced by 'isolate_file' must exist even if its content is
# not read.
open(os.path.join(self.cwd, 'fake.isolate'), 'wb').close()
values = {
'OS': sys.platform,
'algo': 'sha-1',
'isolate_file': 'fake.isolate',
}
expected = {
'OS': sys.platform,
'algo': 'sha-1',
'child_isolated_files': [],
'config_variables': {},
'command': [],
'extra_variables': {},
'files': {},
'isolate_file': 'fake.isolate',
'path_variables': {},
'version': isolate.SavedState.EXPECTED_VERSION,
}
saved_state = isolate.SavedState.load(values, self.cwd)
self.assertEqual(expected, saved_state.flatten())
def test_savedstate_load(self):
# The file referenced by 'isolate_file' must exist even if its content is
# not read.
open(os.path.join(self.cwd, 'fake.isolate'), 'wb').close()
values = {
'OS': sys.platform,
'algo': 'sha-1',
'config_variables': {},
'extra_variables': {
'foo': 42,
},
'isolate_file': 'fake.isolate',
}
expected = {
'OS': sys.platform,
'algo': 'sha-1',
'child_isolated_files': [],
'command': [],
'config_variables': {},
'extra_variables': {
'foo': 42,
},
'files': {},
'isolate_file': 'fake.isolate',
'path_variables': {},
'version': isolate.SavedState.EXPECTED_VERSION,
}
saved_state = isolate.SavedState.load(values, self.cwd)
self.assertEqual(expected, saved_state.flatten())
def test_variable_arg(self):
parser = optparse.OptionParser()
isolate.add_isolate_options(parser)
options, args = parser.parse_args(
['--config-variable', 'Foo', 'bar',
'--path-variable', 'Baz=sub=string',
'--extra-variable', 'biz', 'b uz=a'])
isolate.process_isolate_options(parser, options, require_isolated=False)
expected_path = {
'Baz': 'sub=string',
}
expected_config = {
'Foo': 'bar',
}
expected_extra = {
'biz': 'b uz=a',
'EXECUTABLE_SUFFIX': '.exe' if sys.platform == 'win32' else '',
}
self.assertEqual(expected_path, options.path_variables)
self.assertEqual(expected_config, options.config_variables)
self.assertEqual(expected_extra, options.extra_variables)
self.assertEqual([], args)
def test_variable_arg_fail(self):
parser = optparse.OptionParser()
isolate.add_isolate_options(parser)
self.mock(sys, 'stderr', cStringIO.StringIO())
with self.assertRaises(SystemExit):
parser.parse_args(['--config-variable', 'Foo'])
def test_blacklist_default(self):
ok = [
'.git2',
'.pyc',
'.swp',
'allo.git',
'foo',
]
blocked = [
'.git',
os.path.join('foo', '.git'),
'foo.pyc',
'bar.swp',
]
blacklist = tools.gen_blacklist(isolateserver.DEFAULT_BLACKLIST)
for i in ok:
self.assertFalse(blacklist(i), i)
for i in blocked:
self.assertTrue(blacklist(i), i)
def test_blacklist_custom(self):
ok = [
'.run_test_cases',
'testserver.log2',
]
blocked = [
'foo.run_test_cases',
'testserver.log',
os.path.join('foo', 'testserver.log'),
]
blacklist = tools.gen_blacklist([r'^.+\.run_test_cases$', r'^.+\.log$'])
for i in ok:
self.assertFalse(blacklist(i), i)
for i in blocked:
self.assertTrue(blacklist(i), i)
def test_read_only(self):
isolate_file = os.path.join(self.cwd, 'fake.isolate')
isolate_content = {
'variables': {
'read_only': 0,
},
}
tools.write_json(isolate_file, isolate_content, False)
expected = {
'algo': 'sha-1',
'files': {},
'read_only': 0,
'relative_cwd': '.',
'version': isolated_format.ISOLATED_FILE_VERSION,
}
complete_state = isolate.CompleteState(None, isolate.SavedState(self.cwd))
complete_state.load_isolate(
unicode(self.cwd), unicode(isolate_file), {}, {}, {}, None, False)
self.assertEqual(expected, complete_state.saved_state.to_isolated())
class IsolateLoad(IsolateBase):
def setUp(self):
super(IsolateLoad, self).setUp()
self.directory = tempfile.mkdtemp(prefix=u'isolate_')
self.isolate_dir = os.path.join(self.directory, u'isolate')
self.isolated_dir = os.path.join(self.directory, u'isolated')
os.mkdir(self.isolated_dir, 0700)
def tearDown(self):
try:
file_path.rmtree(self.directory)
finally:
super(IsolateLoad, self).tearDown()
def _get_option(self, *isolatepath):
isolate_file = os.path.join(self.isolate_dir, *isolatepath)
class Options(object):
isolated = os.path.join(self.isolated_dir, 'foo.isolated')
outdir = os.path.join(self.directory, 'outdir')
isolate = isolate_file
blacklist = list(isolateserver.DEFAULT_BLACKLIST)
path_variables = {}
config_variables = {
'OS': 'linux',
'chromeos': 1,
}
extra_variables = {'foo': 'bar'}
ignore_broken_items = False
return Options()
def _cleanup_isolated(self, expected_isolated):
"""Modifies isolated to remove the non-deterministic parts."""
if sys.platform == 'win32':
# 'm' are not saved in windows.
for values in expected_isolated['files'].itervalues():
self.assertTrue(values.pop('m'))
def _cleanup_saved_state(self, actual_saved_state):
for item in actual_saved_state['files'].itervalues():
self.assertTrue(item.pop('t'))
def make_tree(self, contents):
test_utils.make_tree(self.isolate_dir, contents)
def size(self, *args):
return os.stat(os.path.join(self.isolate_dir, *args)).st_size
def hash_file(self, *args):
p = os.path.join(*args)
if not os.path.isabs(p):
p = os.path.join(self.isolate_dir, p)
return isolated_format.hash_file(p, ALGO)
def test_load_stale_isolated(self):
# Data to be loaded in the .isolated file. Do not create a .state file.
self.make_tree(TOUCH_ROOT_ISOLATE)
input_data = {
'command': ['python'],
'files': {
'foo': {
"m": 0640,
"h": "invalid",
"s": 538,
"t": 1335146921,
},
os.path.join('tests', 'isolate', 'touch_root.py'): {
"m": 0750,
"h": "invalid",
"s": 538,
"t": 1335146921,
},
},
}
options = self._get_option('tests', 'isolate', 'touch_root.isolate')
tools.write_json(options.isolated, input_data, False)
# A CompleteState object contains two parts:
# - Result instance stored in complete_state.isolated, corresponding to the
# .isolated file, is what is read by run_test_from_archive.py.
# - SavedState instance stored in compelte_state.saved_state,
# corresponding to the .state file, which is simply to aid the developer
# when re-running the same command multiple times and contain
# discardable information.
complete_state = isolate.load_complete_state(options, self.cwd, None, False)
actual_isolated = complete_state.saved_state.to_isolated()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'algo': 'sha-1',
'command': ['python', 'touch_root.py'],
'files': {
os.path.join(u'tests', 'isolate', 'touch_root.py'): {
'm': 0700,
'h': self.hash_file('tests', 'isolate', 'touch_root.py'),
's': self.size('tests', 'isolate', 'touch_root.py'),
},
u'at_root': {
'm': 0600,
'h': self.hash_file('at_root'),
's': self.size('at_root'),
},
},
'read_only': 1,
'relative_cwd': os.path.join(u'tests', 'isolate'),
'version': isolated_format.ISOLATED_FILE_VERSION,
}
self._cleanup_isolated(expected_isolated)
self.assertEqual(expected_isolated, actual_isolated)
isolate_file = os.path.join(
self.isolate_dir, 'tests', 'isolate', 'touch_root.isolate')
expected_saved_state = {
'OS': sys.platform,
'algo': 'sha-1',
'child_isolated_files': [],
'command': ['python', 'touch_root.py'],
'config_variables': {
'OS': 'linux',
'chromeos': options.config_variables['chromeos'],
},
'extra_variables': {
'foo': 'bar',
},
'files': {
os.path.join(u'tests', 'isolate', 'touch_root.py'): {
'm': 0700,
'h': self.hash_file('tests', 'isolate', 'touch_root.py'),
's': self.size('tests', 'isolate', 'touch_root.py'),
},
u'at_root': {
'm': 0600,
'h': self.hash_file('at_root'),
's': self.size('at_root'),
},
},
'isolate_file': file_path.safe_relpath(
file_path.get_native_path_case(isolate_file),
os.path.dirname(options.isolated)),
'path_variables': {},
'relative_cwd': os.path.join(u'tests', 'isolate'),
'root_dir': file_path.get_native_path_case(self.isolate_dir),
'version': isolate.SavedState.EXPECTED_VERSION,
}
self._cleanup_isolated(expected_saved_state)
self._cleanup_saved_state(actual_saved_state)
self.assertEqual(expected_saved_state, actual_saved_state)
def test_subdir(self):
# The resulting .isolated file will be missing ../../at_root. It is
# because this file is outside the --subdir parameter.
self.make_tree(TOUCH_ROOT_ISOLATE)
options = self._get_option('tests', 'isolate', 'touch_root.isolate')
complete_state = isolate.load_complete_state(
options, self.cwd, os.path.join('tests', 'isolate'), False)
actual_isolated = complete_state.saved_state.to_isolated()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'algo': 'sha-1',
'command': ['python', 'touch_root.py'],
'files': {
os.path.join(u'tests', 'isolate', 'touch_root.py'): {
'm': 0700,
'h': self.hash_file('tests', 'isolate', 'touch_root.py'),
's': self.size('tests', 'isolate', 'touch_root.py'),
},
},
'read_only': 1,
'relative_cwd': os.path.join(u'tests', 'isolate'),
'version': isolated_format.ISOLATED_FILE_VERSION,
}
self._cleanup_isolated(expected_isolated)
self.assertEqual(expected_isolated, actual_isolated)
isolate_file = os.path.join(
self.isolate_dir, 'tests', 'isolate', 'touch_root.isolate')
expected_saved_state = {
'OS': sys.platform,
'algo': 'sha-1',
'child_isolated_files': [],
'command': ['python', 'touch_root.py'],
'config_variables': {
'OS': 'linux',
'chromeos': 1,
},
'extra_variables': {
'foo': 'bar',
},
'files': {
os.path.join(u'tests', 'isolate', 'touch_root.py'): {
'm': 0700,
'h': self.hash_file('tests', 'isolate', 'touch_root.py'),
's': self.size('tests', 'isolate', 'touch_root.py'),
},
},
'isolate_file': file_path.safe_relpath(
file_path.get_native_path_case(isolate_file),
os.path.dirname(options.isolated)),
'path_variables': {},
'relative_cwd': os.path.join(u'tests', 'isolate'),
'root_dir': file_path.get_native_path_case(self.isolate_dir),
'version': isolate.SavedState.EXPECTED_VERSION,
}
self._cleanup_isolated(expected_saved_state)
self._cleanup_saved_state(actual_saved_state)
self.assertEqual(expected_saved_state, actual_saved_state)
def test_subdir_variable(self):
# the resulting .isolated file will be missing ../../at_root. it is
# because this file is outside the --subdir parameter.
self.make_tree(TOUCH_ROOT_ISOLATE)
options = self._get_option('tests', 'isolate', 'touch_root.isolate')
# Path variables are keyed on the directory containing the .isolate file.
options.path_variables['TEST_ISOLATE'] = '.'
# Note that options.isolated is in self.directory, which is a temporary
# directory.
complete_state = isolate.load_complete_state(
options, os.path.join(self.isolate_dir, 'tests', 'isolate'),
'<(TEST_ISOLATE)', False)
actual_isolated = complete_state.saved_state.to_isolated()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'algo': 'sha-1',
'command': ['python', 'touch_root.py'],
'files': {
os.path.join(u'tests', 'isolate', 'touch_root.py'): {
'm': 0700,
'h': self.hash_file('tests', 'isolate', 'touch_root.py'),
's': self.size('tests', 'isolate', 'touch_root.py'),
},
},
'read_only': 1,
'relative_cwd': os.path.join(u'tests', 'isolate'),
'version': isolated_format.ISOLATED_FILE_VERSION,
}
self._cleanup_isolated(expected_isolated)
self.assertEqual(expected_isolated, actual_isolated)
# It is important to note:
# - the root directory is self.isolate_dir.
# - relative_cwd is tests/isolate.
# - TEST_ISOLATE is based of relative_cwd, so it represents tests/isolate.
# - anything outside TEST_ISOLATE was not included in the 'files' section.
isolate_file = os.path.join(
self.isolate_dir, 'tests', 'isolate', 'touch_root.isolate')
expected_saved_state = {
'OS': sys.platform,
'algo': 'sha-1',
'child_isolated_files': [],
'command': ['python', 'touch_root.py'],
'config_variables': {
'OS': 'linux',
'chromeos': 1,
},
'extra_variables': {
'foo': 'bar',
},
'files': {
os.path.join(u'tests', 'isolate', 'touch_root.py'): {
'm': 0700,
'h': self.hash_file('tests', 'isolate', 'touch_root.py'),
's': self.size('tests', 'isolate', 'touch_root.py'),
},
},
'isolate_file': file_path.safe_relpath(
file_path.get_native_path_case(isolate_file),
os.path.dirname(options.isolated)),
'path_variables': {
'TEST_ISOLATE': '.',
},
'relative_cwd': os.path.join(u'tests', 'isolate'),
'root_dir': file_path.get_native_path_case(self.isolate_dir),
'version': isolate.SavedState.EXPECTED_VERSION,
}
self._cleanup_isolated(expected_saved_state)
self._cleanup_saved_state(actual_saved_state)
self.assertEqual(expected_saved_state, actual_saved_state)
def test_variable_not_exist(self):
self.make_tree(TOUCH_ROOT_ISOLATE)
options = self._get_option('tests', 'isolate', 'touch_root.isolate')
options.path_variables['PRODUCT_DIR'] = os.path.join(u'tests', u'isolate')
native_cwd = file_path.get_native_path_case(unicode(self.cwd))
try:
isolate.load_complete_state(options, self.cwd, None, False)
self.fail()
except isolate.ExecutionError, e:
self.assertEqual(
'PRODUCT_DIR=%s is not a directory' %
os.path.join(native_cwd, 'tests', 'isolate'),
e.args[0])
def test_variable(self):
self.make_tree(TOUCH_ROOT_ISOLATE)
options = self._get_option('tests', 'isolate', 'touch_root.isolate')
options.path_variables['PRODUCT_DIR'] = os.path.join('tests', 'isolate')
complete_state = isolate.load_complete_state(
options, self.isolate_dir, None, False)
actual_isolated = complete_state.saved_state.to_isolated()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'algo': 'sha-1',
'command': ['python', 'touch_root.py'],
'files': {
u'at_root': {
'm': 0600,
'h': self.hash_file('at_root'),
's': self.size('at_root'),
},
os.path.join(u'tests', 'isolate', 'touch_root.py'): {
'm': 0700,
'h': self.hash_file('tests', 'isolate', 'touch_root.py'),
's': self.size('tests', 'isolate', 'touch_root.py'),
},
},
'read_only': 1,
'relative_cwd': os.path.join(u'tests', 'isolate'),
'version': isolated_format.ISOLATED_FILE_VERSION,
}
self._cleanup_isolated(expected_isolated)
self.assertEqual(expected_isolated, actual_isolated)
isolate_file = os.path.join(
self.isolate_dir, 'tests', 'isolate', 'touch_root.isolate')
expected_saved_state = {
'OS': sys.platform,
'algo': 'sha-1',
'child_isolated_files': [],
'command': ['python', 'touch_root.py'],
'config_variables': {
'OS': 'linux',
'chromeos': 1,
},
'extra_variables': {
'foo': 'bar',
},
'files': {
u'at_root': {
'm': 0600,
'h': self.hash_file('at_root'),
's': self.size('at_root'),
},
os.path.join(u'tests', 'isolate', 'touch_root.py'): {
'm': 0700,
'h': self.hash_file('tests', 'isolate', 'touch_root.py'),
's': self.size('tests', 'isolate', 'touch_root.py'),
},
},
'isolate_file': file_path.safe_relpath(
file_path.get_native_path_case(isolate_file),
os.path.dirname(options.isolated)),
'path_variables': {
'PRODUCT_DIR': '.',
},
'relative_cwd': os.path.join(u'tests', 'isolate'),
'root_dir': file_path.get_native_path_case(self.isolate_dir),
'version': isolate.SavedState.EXPECTED_VERSION,
}
self._cleanup_isolated(expected_saved_state)
self._cleanup_saved_state(actual_saved_state)
self.assertEqual(expected_saved_state, actual_saved_state)
self.assertEqual([], os.listdir(self.isolated_dir))
def test_root_dir_because_of_variable(self):
# Ensures that load_isolate() works even when path variables have deep root
# dirs. The end result is similar to touch_root.isolate, except that
# no_run.isolate doesn't reference '..' at all.
#
# A real world example would be PRODUCT_DIR=../../out/Release but nothing in
# this directory is mapped.
#
# Imagine base/base_unittests.isolate would not map anything in
# PRODUCT_DIR. In that case, the automatically determined root dir is
# src/base, since nothing outside this directory is mapped.
self.make_tree(NO_RUN_ISOLATE)
options = self._get_option('tests', 'isolate', 'no_run.isolate')
# Any directory outside <self.isolate_dir>/tests/isolate.
options.path_variables['PRODUCT_DIR'] = 'third_party'
os.mkdir(os.path.join(self.isolate_dir, 'third_party'), 0700)
complete_state = isolate.load_complete_state(
options, self.isolate_dir, None, False)
actual_isolated = complete_state.saved_state.to_isolated()
actual_saved_state = complete_state.saved_state.flatten()
expected_isolated = {
'algo': 'sha-1',
'files': {
os.path.join(u'tests', 'isolate', 'files1', 'subdir', '42.txt'): {
'm': 0600,
'h': self.hash_file('tests', 'isolate', 'files1', 'subdir', '42.txt'),
's': self.size('tests', 'isolate', 'files1', 'subdir', '42.txt'),
},
os.path.join(u'tests', 'isolate', 'files1', 'test_file1.txt'): {
'm': 0600,
'h': self.hash_file('tests', 'isolate', 'files1', 'test_file1.txt'),
's': self.size('tests', 'isolate', 'files1', 'test_file1.txt'),
},
os.path.join(u'tests', 'isolate', 'files1', 'test_file2.txt'): {
'm': 0600,
'h': self.hash_file('tests', 'isolate', 'files1', 'test_file2.txt'),
's': self.size('tests', 'isolate', 'files1', 'test_file2.txt'),
},
os.path.join(u'tests', 'isolate', 'no_run.isolate'): {
'm': 0600,
'h': self.hash_file('tests', 'isolate', 'no_run.isolate'),
's': self.size('tests', 'isolate', 'no_run.isolate'),
},
},
'read_only': 1,
'relative_cwd': os.path.join(u'tests', 'isolate'),
'version': isolated_format.ISOLATED_FILE_VERSION,
}
self._cleanup_isolated(expected_isolated)
self.assertEqual(expected_isolated, actual_isolated)
isolate_file = os.path.join(
self.isolate_dir, 'tests', 'isolate', 'no_run.isolate')
expected_saved_state = {
'OS': sys.platform,
'algo': 'sha-1',
'child_isolated_files': [],
'command': [],
'config_variables': {
'OS': 'linux',
'chromeos': 1,
},
'extra_variables': {
'foo': 'bar',
},
'files': {
os.path.join(u'tests', 'isolate', 'files1', 'subdir', '42.txt'): {
'm': 0600,
'h': self.hash_file('tests', 'isolate', 'files1', 'subdir', '42.txt'),
's': self.size('tests', 'isolate', 'files1', 'subdir', '42.txt'),
},
os.path.join(u'tests', 'isolate', 'files1', 'test_file1.txt'): {
'm': 0600,
'h': self.hash_file('tests', 'isolate', 'files1', 'test_file1.txt'),
's': self.size('tests', 'isolate', 'files1', 'test_file1.txt'),
},
os.path.join(u'tests', 'isolate', 'files1', 'test_file2.txt'): {
'm': 0600,
'h': self.hash_file('tests', 'isolate', 'files1', 'test_file2.txt'),
's': self.size('tests', 'isolate', 'files1', 'test_file2.txt'),
},
os.path.join(u'tests', 'isolate', 'no_run.isolate'): {
'm': 0600,
'h': self.hash_file('tests', 'isolate', 'no_run.isolate'),
's': self.size('tests', 'isolate', 'no_run.isolate'),
},
},
'isolate_file': file_path.safe_relpath(
file_path.get_native_path_case(isolate_file),
os.path.dirname(options.isolated)),
'path_variables': {
'PRODUCT_DIR': os.path.join(u'..', '..', 'third_party'),
},
'relative_cwd': os.path.join(u'tests', 'isolate'),
'root_dir': file_path.get_native_path_case(self.isolate_dir),
'version': isolate.SavedState.EXPECTED_VERSION,
}
self._cleanup_isolated(expected_saved_state)
self._cleanup_saved_state(actual_saved_state)
self.assertEqual(expected_saved_state, actual_saved_state)
self.assertEqual([], os.listdir(self.isolated_dir))
def test_chromium_split(self):
# Create an .isolate file and a tree of random stuff.
self.make_tree(SPLIT_ISOLATE)
options = self._get_option('tests', 'isolate', 'split.isolate')
options.path_variables = {
'DEPTH': '.',
'PRODUCT_DIR': os.path.join('files1'),
}
options.config_variables = {
'OS': 'linux',
}
complete_state = isolate.load_complete_state(
options, os.path.join(self.isolate_dir, 'tests', 'isolate'), None,
False)
# By saving the files, it forces splitting the data up.
complete_state.save_files()
actual_isolated_master = tools.read_json(
os.path.join(self.isolated_dir, 'foo.isolated'))
expected_isolated_master = {
u'algo': u'sha-1',
u'command': [u'python', u'split.py'],
u'files': {
u'split.py': {
u'm': 0700,
u'h': unicode(self.hash_file('tests', 'isolate', 'split.py')),
u's': self.size('tests', 'isolate', 'split.py'),
},
},
u'includes': [
unicode(self.hash_file(self.isolated_dir, 'foo.0.isolated')),
unicode(self.hash_file(self.isolated_dir, 'foo.1.isolated')),
],
u'read_only': 1,
u'relative_cwd': u'.',
u'version': unicode(isolated_format.ISOLATED_FILE_VERSION),
}
self._cleanup_isolated(expected_isolated_master)
self.assertEqual(expected_isolated_master, actual_isolated_master)
actual_isolated_0 = tools.read_json(
os.path.join(self.isolated_dir, 'foo.0.isolated'))
expected_isolated_0 = {
u'algo': u'sha-1',
u'files': {
os.path.join(u'test', 'data', 'foo.txt'): {
u'm': 0600,
u'h': unicode(
self.hash_file('tests', 'isolate', 'test', 'data', 'foo.txt')),
u's': self.size('tests', 'isolate', 'test', 'data', 'foo.txt'),
},
},
u'version': unicode(isolated_format.ISOLATED_FILE_VERSION),
}
self._cleanup_isolated(expected_isolated_0)
self.assertEqual(expected_isolated_0, actual_isolated_0)
actual_isolated_1 = tools.read_json(
os.path.join(self.isolated_dir, 'foo.1.isolated'))
expected_isolated_1 = {
u'algo': u'sha-1',
u'files': {
os.path.join(u'files1', 'subdir', '42.txt'): {
u'm': 0600,
u'h': unicode(
self.hash_file('tests', 'isolate', 'files1', 'subdir', '42.txt')),
u's': self.size('tests', 'isolate', 'files1', 'subdir', '42.txt'),
},
},
u'version': unicode(isolated_format.ISOLATED_FILE_VERSION),
}
self._cleanup_isolated(expected_isolated_1)
self.assertEqual(expected_isolated_1, actual_isolated_1)
actual_saved_state = tools.read_json(
isolate.isolatedfile_to_state(options.isolated))
isolated_base = unicode(os.path.basename(options.isolated))
isolate_file = os.path.join(
self.isolate_dir, 'tests', 'isolate', 'split.isolate')
expected_saved_state = {
u'OS': unicode(sys.platform),
u'algo': u'sha-1',
u'child_isolated_files': [
isolated_base[:-len('.isolated')] + '.0.isolated',
isolated_base[:-len('.isolated')] + '.1.isolated',
],
u'command': [u'python', u'split.py'],
u'config_variables': {
u'OS': u'linux',
},
u'extra_variables': {
u'foo': u'bar',
},
u'files': {
os.path.join(u'files1', 'subdir', '42.txt'): {
u'm': 0600,
u'h': unicode(
self.hash_file('tests', 'isolate', 'files1', 'subdir', '42.txt')),
u's': self.size('tests', 'isolate', 'files1', 'subdir', '42.txt'),
},
u'split.py': {
u'm': 0700,
u'h': unicode(self.hash_file('tests', 'isolate', 'split.py')),
u's': self.size('tests', 'isolate', 'split.py'),
},
os.path.join(u'test', 'data', 'foo.txt'): {
u'm': 0600,
u'h': unicode(
self.hash_file('tests', 'isolate', 'test', 'data', 'foo.txt')),
u's': self.size('tests', 'isolate', 'test', 'data', 'foo.txt'),
},
},
u'isolate_file': file_path.safe_relpath(
file_path.get_native_path_case(isolate_file),
unicode(os.path.dirname(options.isolated))),
u'path_variables': {
u'DEPTH': u'.',
u'PRODUCT_DIR': u'files1',
},
u'relative_cwd': u'.',
u'root_dir': file_path.get_native_path_case(
os.path.dirname(isolate_file)),
u'version': unicode(isolate.SavedState.EXPECTED_VERSION),
}
self._cleanup_isolated(expected_saved_state)
self._cleanup_saved_state(actual_saved_state)
self.assertEqual(expected_saved_state, actual_saved_state)
self.assertEqual(
[
'foo.0.isolated', 'foo.1.isolated',
'foo.isolated', 'foo.isolated.state',
],
sorted(os.listdir(self.isolated_dir)))
def test_load_isolate_include_command(self):
# Ensure that using a .isolate that includes another one in a different
# directory will lead to the proper relative directory. See
# test_load_with_includes_with_commands in isolate_format_test.py as
# reference.
# Exactly the same thing as in isolate_format_test.py
isolate1 = {
'conditions': [
['OS=="amiga" or OS=="win"', {
'variables': {
'command': [
'foo', 'amiga_or_win',
],
},
}],
['OS=="linux"', {
'variables': {
'command': [
'foo', 'linux',
],
'files': [
'file_linux',
],
},
}],
['OS=="mac" or OS=="win"', {
'variables': {
'files': [
'file_non_linux',
],
},
}],
],
}
isolate2 = {
'conditions': [
['OS=="linux" or OS=="mac"', {
'variables': {
'command': [
'foo', 'linux_or_mac',
],
'files': [
'other/file',
],
},
}],
],
}
# Do not define command in isolate3, otherwise commands in the other
# included .isolated will be ignored.
isolate3 = {
'includes': [
'../1/isolate1.isolate',
'2/isolate2.isolate',
],
'conditions': [
['OS=="amiga"', {
'variables': {
'files': [
'file_amiga',
],
},
}],
['OS=="mac"', {
'variables': {
'files': [
'file_mac',
],
},
}],
],
}
def test_with_os(
config_os, files_to_create, expected_files, command, relative_cwd):
"""Creates a tree of files in a subdirectory for testing and test this
set of conditions.
"""
directory = os.path.join(unicode(self.directory), config_os)
os.mkdir(directory)
isolate_dir = os.path.join(directory, u'isolate')
isolate_dir_1 = os.path.join(isolate_dir, u'1')
isolate_dir_3 = os.path.join(isolate_dir, u'3')
isolate_dir_3_2 = os.path.join(isolate_dir_3, u'2')
isolated_dir = os.path.join(directory, u'isolated')
os.mkdir(isolated_dir)
os.mkdir(isolate_dir)
os.mkdir(isolate_dir_1)
os.mkdir(isolate_dir_3)
os.mkdir(isolate_dir_3_2)
isolated = os.path.join(isolated_dir, u'foo.isolated')
with open(os.path.join(isolate_dir_1, 'isolate1.isolate'), 'wb') as f:
isolate_format.pretty_print(isolate1, f)
with open(os.path.join(isolate_dir_3_2, 'isolate2.isolate'), 'wb') as f:
isolate_format.pretty_print(isolate2, f)
root_isolate = os.path.join(isolate_dir_3, 'isolate3.isolate')
with open(root_isolate, 'wb') as f:
isolate_format.pretty_print(isolate3, f)
# Make all the touched files.
mapping = {1: isolate_dir_1, 2: isolate_dir_3_2, 3: isolate_dir_3}
for k, v in files_to_create.iteritems():
f = os.path.join(mapping[k], v)
base = os.path.dirname(f)
if not os.path.isdir(base):
os.mkdir(base)
open(f, 'wb').close()
c = isolate.CompleteState(isolated, isolate.SavedState(isolated_dir))
config = {
'OS': config_os,
}
c.load_isolate(
unicode(self.cwd), root_isolate, {}, config, {}, None, False)
# Note that load_isolate() doesn't retrieve the meta data about each file.
expected = {
'algo': 'sha-1',
'command': command,
'files': {
unicode(f.replace('/', os.path.sep)):{} for f in expected_files
},
'read_only': 1,
'relative_cwd': relative_cwd.replace('/', os.path.sep),
'version': isolated_format.ISOLATED_FILE_VERSION,
}
self.assertEqual(expected, c.saved_state.to_isolated())
# root is .../isolate/.
test_with_os(
'amiga',
{
3: 'file_amiga',
},
(
u'3/file_amiga',
),
['foo', 'amiga_or_win'],
'1')
# root is .../isolate/.
test_with_os(
'linux',
{
1: 'file_linux',
2: 'other/file',
},
(
u'1/file_linux',
u'3/2/other/file',
),
['foo', 'linux_or_mac'],
'3/2')
# root is .../isolate/.
test_with_os(
'mac',
{
1: 'file_non_linux',
2: 'other/file',
3: 'file_mac',
},
(
u'1/file_non_linux',
u'3/2/other/file',
u'3/file_mac',
),
['foo', 'linux_or_mac'],
'3/2')
# root is .../isolate/1/.
test_with_os(
'win',
{
1: 'file_non_linux',
},
(
u'file_non_linux',
),
['foo', 'amiga_or_win'],
'.')
def test_load_isolate_include_command_and_variables(self):
# Ensure that using a .isolate that includes another one in a different
# directory will lead to the proper relative directory when using variables.
# See test_load_with_includes_with_commands_and_variables in
# isolate_format_test.py as reference.
#
# With path variables, 'cwd' is used instead of the path to the .isolate
# file. So the files have to be set towards the cwd accordingly. While this
# may seem surprising, this makes the whole thing work in the first place.
# Almost exactly the same thing as in isolate_format_test.py plus the EXTRA
# for better testing with variable replacement.
isolate1 = {
'conditions': [
['OS=="amiga" or OS=="win"', {
'variables': {
'command': [
'foo', 'amiga_or_win', '<(PATH)', '<(EXTRA)',
],
},
}],
['OS=="linux"', {
'variables': {
'command': [
'foo', 'linux', '<(PATH)', '<(EXTRA)',
],
'files': [
'<(PATH)/file_linux',
],
},
}],
['OS=="mac" or OS=="win"', {
'variables': {
'files': [
'<(PATH)/file_non_linux',
],
},
}],
],
}
isolate2 = {
'conditions': [
['OS=="linux" or OS=="mac"', {
'variables': {
'command': [
'foo', 'linux_or_mac', '<(PATH)', '<(EXTRA)',
],
'files': [
'<(PATH)/other/file',
],
},
}],
],
}
isolate3 = {
'includes': [
'../1/isolate1.isolate',
'2/isolate2.isolate',
],
'conditions': [
['OS=="amiga"', {
'variables': {
'files': [
'<(PATH)/file_amiga',
],
},
}],
['OS=="mac"', {
'variables': {
'command': [
'foo', 'mac', '<(PATH)', '<(EXTRA)',
],
'files': [
'<(PATH)/file_mac',
],
},
}],
],
}
def test_with_os(config_os, expected_files, command, relative_cwd):
"""Creates a tree of files in a subdirectory for testing and test this
set of conditions.
"""
directory = os.path.join(unicode(self.directory), config_os)
os.mkdir(directory)
cwd = os.path.join(unicode(self.cwd), config_os)
os.mkdir(cwd)
isolate_dir = os.path.join(directory, u'isolate')
isolate_dir_1 = os.path.join(isolate_dir, u'1')
isolate_dir_3 = os.path.join(isolate_dir, u'3')
isolate_dir_3_2 = os.path.join(isolate_dir_3, u'2')
isolated_dir = os.path.join(directory, u'isolated')
os.mkdir(isolated_dir)
os.mkdir(isolate_dir)
os.mkdir(isolate_dir_1)
os.mkdir(isolate_dir_3)
os.mkdir(isolate_dir_3_2)
isolated = os.path.join(isolated_dir, u'foo.isolated')
with open(os.path.join(isolate_dir_1, 'isolate1.isolate'), 'wb') as f:
isolate_format.pretty_print(isolate1, f)
with open(os.path.join(isolate_dir_3_2, 'isolate2.isolate'), 'wb') as f:
isolate_format.pretty_print(isolate2, f)
root_isolate = os.path.join(isolate_dir_3, 'isolate3.isolate')
with open(root_isolate, 'wb') as f:
isolate_format.pretty_print(isolate3, f)
# Make all the touched files.
path_dir = os.path.join(cwd, 'path')
os.mkdir(path_dir)
for v in expected_files:
f = os.path.join(path_dir, v)
base = os.path.dirname(f)
if not os.path.isdir(base):
os.makedirs(base)
logging.warn(f)
open(f, 'wb').close()
c = isolate.CompleteState(isolated, isolate.SavedState(isolated_dir))
config = {
'OS': config_os,
}
paths = {
'PATH': 'path/',
}
extra = {
'EXTRA': 'indeed',
}
c.load_isolate(
unicode(cwd), root_isolate, paths, config, extra, None, False)
# Note that load_isolate() doesn't retrieve the meta data about each file.
expected = {
'algo': 'sha-1',
'command': command,
'files': {
unicode(os.path.join(cwd_name, config_os, 'path', f)): {}
for f in expected_files
},
'read_only': 1,
'relative_cwd': relative_cwd,
'version': isolated_format.ISOLATED_FILE_VERSION,
}
if not command:
expected.pop('command')
self.assertEqual(expected, c.saved_state.to_isolated())
cwd_name = os.path.basename(self.cwd)
dir_name = os.path.basename(self.directory)
test_with_os(
'amiga',
(
'file_amiga',
),
[],
os.path.join(dir_name, u'amiga', 'isolate', '3'))
test_with_os(
'linux',
(
u'file_linux',
os.path.join(u'other', 'file'),
),
[],
os.path.join(dir_name, u'linux', 'isolate', '3', '2'))
test_with_os(
'mac',
(
'file_non_linux',
os.path.join(u'other', 'file'),
'file_mac',
),
[
'foo',
'mac',
os.path.join(u'..', '..', '..', '..', cwd_name, 'mac', 'path'),
'indeed',
],
os.path.join(dir_name, u'mac', 'isolate', '3'))
test_with_os(
'win',
(
'file_non_linux',
),
[],
os.path.join(dir_name, u'win', 'isolate', '1'))
class IsolateCommand(IsolateBase):
def load_complete_state(self, *_):
"""Creates a minimalist CompleteState instance without an .isolated
reference.
"""
out = isolate.CompleteState(None, isolate.SavedState(self.cwd))
out.saved_state.isolate_file = u'blah.isolate'
out.saved_state.relative_cwd = u''
out.saved_state.root_dir = ROOT_DIR
return out
def test_CMDarchive(self):
actual = []
def mocked_upload_tree(base_url, infiles, namespace):
# |infiles| may be a generator of pair, materialize it into a list.
actual.append({
'base_url': base_url,
'infiles': dict(infiles),
'namespace': namespace,
})
self.mock(isolateserver, 'upload_tree', mocked_upload_tree)
def join(*path):
return os.path.join(self.cwd, *path)
isolate_file = join('x.isolate')
isolated_file = join('x.isolated')
with open(isolate_file, 'wb') as f:
f.write(
'# Foo\n'
'{'
' \'conditions\':['
' [\'OS=="dendy"\', {'
' \'variables\': {'
' \'files\': [\'foo\'],'
' },'
' }],'
' ],'
'}')
with open(join('foo'), 'wb') as f:
f.write('fooo')
self.mock(sys, 'stdout', cStringIO.StringIO())
cmd = [
'-i', isolate_file,
'-s', isolated_file,
'--isolate-server', 'http://localhost:1',
'--config-variable', 'OS', 'dendy',
]
self.assertEqual(0, isolate.CMDarchive(optparse.OptionParser(), cmd))
expected = [
{
'base_url': 'http://localhost:1',
'infiles': {
join(isolated_file): {
'priority': '0',
},
join('foo'): {
'h': '520d41b29f891bbaccf31d9fcfa72e82ea20fcf0',
's': 4,
},
},
'namespace': 'default-gzip',
},
]
# These always change.
actual[0]['infiles'][join(isolated_file)].pop('h')
actual[0]['infiles'][join(isolated_file)].pop('s')
# 'm' is not set on Windows.
actual[0]['infiles'][join('foo')].pop('m', None)
actual[0]['infiles'][join('foo')].pop('t')
self.assertEqual(expected, actual)
def test_CMDbatcharchive(self):
# Same as test_CMDarchive but via code path that parses *.gen.json files.
actual = []
def mocked_upload_tree(base_url, infiles, namespace):
# |infiles| may be a generator of pair, materialize it into a list.
actual.append({
'base_url': base_url,
'infiles': dict(infiles),
'namespace': namespace,
})
self.mock(isolateserver, 'upload_tree', mocked_upload_tree)
def join(*path):
return os.path.join(self.cwd, *path)
# First isolate: x.isolate.
isolate_file_x = join('x.isolate')
isolated_file_x = join('x.isolated')
with open(isolate_file_x, 'wb') as f:
f.write(
'# Foo\n'
'{'
' \'conditions\':['
' [\'OS=="dendy"\', {'
' \'variables\': {'
' \'files\': [\'foo\'],'
' },'
' }],'
' ],'
'}')
with open(join('foo'), 'wb') as f:
f.write('fooo')
with open(join('x.isolated.gen.json'), 'wb') as f:
json.dump({
'args': [
'-i', isolate_file_x,
'-s', isolated_file_x,
'--config-variable', 'OS', 'dendy',
],
'dir': self.cwd,
'version': 1,
}, f)
# Second isolate: y.isolate.
isolate_file_y = join('y.isolate')
isolated_file_y = join('y.isolated')
with open(isolate_file_y, 'wb') as f:
f.write(
'# Foo\n'
'{'
' \'conditions\':['
' [\'OS=="dendy"\', {'
' \'variables\': {'
' \'files\': [\'bar\'],'
' },'
' }],'
' ],'
'}')
with open(join('bar'), 'wb') as f:
f.write('barr')
with open(join('y.isolated.gen.json'), 'wb') as f:
json.dump({
'args': [
'-i', isolate_file_y,
'-s', isolated_file_y,
'--config-variable', 'OS', 'dendy',
],
'dir': self.cwd,
'version': 1,
}, f)
self.mock(sys, 'stdout', cStringIO.StringIO())
cmd = [
'--isolate-server', 'http://localhost:1',
'--dump-json', 'json_output.json',
join('x.isolated.gen.json'),
join('y.isolated.gen.json'),
]
self.assertEqual(
0,
isolate.CMDbatcharchive(logging_utils.OptionParserWithLogging(), cmd))
expected = [
{
'base_url': 'http://localhost:1',
'infiles': {
join(isolated_file_x): {
'priority': '0',
},
join('foo'): {
'h': '520d41b29f891bbaccf31d9fcfa72e82ea20fcf0',
's': 4,
},
join(isolated_file_y): {
'priority': '0',
},
join('bar'): {
'h': 'e918b3a3f9597e3cfdc62ce20ecf5756191cb3ec',
's': 4,
},
},
'namespace': 'default-gzip',
},
]
# These always change.
actual[0]['infiles'][join(isolated_file_x)].pop('h')
actual[0]['infiles'][join(isolated_file_x)].pop('s')
actual[0]['infiles'][join('foo')].pop('m', None)
actual[0]['infiles'][join('foo')].pop('t')
actual[0]['infiles'][join(isolated_file_y)].pop('h')
actual[0]['infiles'][join(isolated_file_y)].pop('s')
actual[0]['infiles'][join('bar')].pop('m', None)
actual[0]['infiles'][join('bar')].pop('t')
self.assertEqual(expected, actual)
expected_json = {
'x': isolated_format.hash_file(
os.path.join(self.cwd, 'x.isolated'), ALGO),
'y': isolated_format.hash_file(
os.path.join(self.cwd, 'y.isolated'), ALGO),
}
self.assertEqual(expected_json, tools.read_json('json_output.json'))
def test_CMDcheck_empty(self):
isolate_file = os.path.join(self.cwd, 'x.isolate')
isolated_file = os.path.join(self.cwd, 'x.isolated')
with open(isolate_file, 'wb') as f:
f.write('# Foo\n{\n}')
self.mock(sys, 'stdout', cStringIO.StringIO())
cmd = ['-i', isolate_file, '-s', isolated_file]
isolate.CMDcheck(optparse.OptionParser(), cmd)
def test_CMDcheck_stale_version(self):
isolate_file = os.path.join(self.cwd, 'x.isolate')
isolated_file = os.path.join(self.cwd, 'x.isolated')
with open(isolate_file, 'wb') as f:
f.write(
'# Foo\n'
'{'
' \'conditions\':['
' [\'OS=="dendy"\', {'
' \'variables\': {'
' \'command\': [\'foo\'],'
' },'
' }],'
' ],'
'}')
self.mock(sys, 'stdout', cStringIO.StringIO())
cmd = [
'-i', isolate_file,
'-s', isolated_file,
'--config-variable', 'OS=dendy',
]
self.assertEqual(0, isolate.CMDcheck(optparse.OptionParser(), cmd))
with open(isolate_file, 'rb') as f:
actual = f.read()
expected = (
'# Foo\n{ \'conditions\':[ [\'OS=="dendy"\', { '
'\'variables\': { \'command\': [\'foo\'], }, }], ],}')
self.assertEqual(expected, actual)
with open(isolated_file, 'rb') as f:
actual_isolated = f.read()
expected_isolated = (
'{"algo":"sha-1","command":["foo"],"files":{},'
'"read_only":1,"relative_cwd":".","version":"%s"}'
) % isolated_format.ISOLATED_FILE_VERSION
self.assertEqual(expected_isolated, actual_isolated)
isolated_data = json.loads(actual_isolated)
with open(isolated_file + '.state', 'rb') as f:
actual_isolated_state = f.read()
expected_isolated_state = (
'{"OS":"%s","algo":"sha-1","child_isolated_files":[],"command":["foo"],'
'"config_variables":{"OS":"dendy"},'
'"extra_variables":{"EXECUTABLE_SUFFIX":"%s"},"files":{},'
'"isolate_file":"x.isolate","path_variables":{},'
'"relative_cwd":".","root_dir":%s,"version":"%s"}'
) % (
sys.platform,
'.exe' if sys.platform=='win32' else '',
json.dumps(self.cwd),
isolate.SavedState.EXPECTED_VERSION)
self.assertEqual(expected_isolated_state, actual_isolated_state)
isolated_state_data = json.loads(actual_isolated_state)
# Now edit the .isolated.state file to break the version number and make
# sure it doesn't crash.
with open(isolated_file + '.state', 'wb') as f:
isolated_state_data['version'] = '100.42'
json.dump(isolated_state_data, f)
self.assertEqual(0, isolate.CMDcheck(optparse.OptionParser(), cmd))
# Now edit the .isolated file to break the version number and make
# sure it doesn't crash.
with open(isolated_file, 'wb') as f:
isolated_data['version'] = '100.42'
json.dump(isolated_data, f)
self.assertEqual(0, isolate.CMDcheck(optparse.OptionParser(), cmd))
# Make sure the files were regenerated.
with open(isolated_file, 'rb') as f:
actual_isolated = f.read()
self.assertEqual(expected_isolated, actual_isolated)
with open(isolated_file + '.state', 'rb') as f:
actual_isolated_state = f.read()
self.assertEqual(expected_isolated_state, actual_isolated_state)
def test_CMDcheck_new_variables(self):
# Test bug #61.
isolate_file = os.path.join(self.cwd, 'x.isolate')
isolated_file = os.path.join(self.cwd, 'x.isolated')
cmd = [
'-i', isolate_file,
'-s', isolated_file,
'--config-variable', 'OS=dendy',
]
with open(isolate_file, 'wb') as f:
f.write(
'# Foo\n'
'{'
' \'conditions\':['
' [\'OS=="dendy"\', {'
' \'variables\': {'
' \'command\': [\'foo\'],'
' \'files\': [\'foo\'],'
' },'
' }],'
' ],'
'}')
with open(os.path.join(self.cwd, 'foo'), 'wb') as f:
f.write('yeah')
self.mock(sys, 'stdout', cStringIO.StringIO())
self.assertEqual(0, isolate.CMDcheck(optparse.OptionParser(), cmd))
# Now add a new config variable.
with open(isolate_file, 'wb') as f:
f.write(
'# Foo\n'
'{'
' \'conditions\':['
' [\'OS=="dendy"\', {'
' \'variables\': {'
' \'command\': [\'foo\'],'
' \'files\': [\'foo\'],'
' },'
' }],'
' [\'foo=="baz"\', {'
' \'variables\': {'
' \'files\': [\'bar\'],'
' },'
' }],'
' ],'
'}')
with open(os.path.join(self.cwd, 'bar'), 'wb') as f:
f.write('yeah right!')
# The configuration is OS=dendy and foo=bar. So it should load both
# configurations.
self.assertEqual(
0,
isolate.CMDcheck(
optparse.OptionParser(), cmd + ['--config-variable', 'foo=bar']))
def test_CMDcheck_isolate_copied(self):
# Note that moving the .isolate file is a different code path, this is about
# copying the .isolate file to a new place and specifying the new location
# on a subsequent execution.
x_isolate_file = os.path.join(self.cwd, 'x.isolate')
isolated_file = os.path.join(self.cwd, 'x.isolated')
cmd = ['-i', x_isolate_file, '-s', isolated_file]
with open(x_isolate_file, 'wb') as f:
f.write('{}')
self.assertEqual(0, isolate.CMDcheck(optparse.OptionParser(), cmd))
self.assertTrue(os.path.isfile(isolated_file + '.state'))
with open(isolated_file + '.state', 'rb') as f:
self.assertEqual(json.load(f)['isolate_file'], 'x.isolate')
# Move the .isolate file.
y_isolate_file = os.path.join(self.cwd, 'Y.isolate')
shutil.copyfile(x_isolate_file, y_isolate_file)
cmd = ['-i', y_isolate_file, '-s', isolated_file]
self.assertEqual(0, isolate.CMDcheck(optparse.OptionParser(), cmd))
with open(isolated_file + '.state', 'rb') as f:
self.assertEqual(json.load(f)['isolate_file'], 'Y.isolate')
def test_CMDrun_extra_args(self):
cmd = [
'run',
'--isolate', 'blah.isolate',
'--', 'extra_args',
]
self.mock(isolate, 'load_complete_state', self.load_complete_state)
self.mock(subprocess, 'call', lambda *_, **_kwargs: 0)
self.assertEqual(0, isolate.CMDrun(optparse.OptionParser(), cmd))
def test_CMDrun_no_isolated(self):
isolate_file = os.path.join(self.cwd, 'x.isolate')
with open(isolate_file, 'wb') as f:
f.write('{"variables": {"command": ["python", "-c", "print(\'hi\')"]} }')
def expect_call(cmd, cwd):
self.assertEqual([sys.executable, '-c', "print('hi')", 'run'], cmd)
self.assertTrue(os.path.isdir(cwd))
return 0
self.mock(subprocess, 'call', expect_call)
cmd = ['run', '--isolate', isolate_file]
self.assertEqual(0, isolate.CMDrun(optparse.OptionParser(), cmd))
def clear_env_vars():
for e in ('ISOLATE_DEBUG', 'ISOLATE_SERVER'):
os.environ.pop(e, None)
if __name__ == '__main__':
fix_encoding.fix_encoding()
clear_env_vars()
test_utils.main()
| baslr/ArangoDB | 3rdParty/V8/V8-5.0.71.39/tools/swarming_client/tests/isolate_test.py | Python | apache-2.0 | 54,047 |
import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 6)
# This check and everything above must remain compatible with Python 2.7.
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""
==========================
Unsupported Python version
==========================
This version of Django requires Python {}.{}, but you're trying to
install it on Python {}.{}.
This may be because you are using a version of pip that doesn't
understand the python_requires classifier. Make sure you
have pip >= 9.0 and setuptools >= 24.2, then try again:
$ python -m pip install --upgrade pip setuptools
$ python -m pip install django
This will install the latest version of Django which works on your
version of Python. If you can't upgrade your pip (or Python), request
an older version of Django:
$ python -m pip install "django<2"
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name='Django',
version=version,
python_requires='>={}.{}'.format(*REQUIRED_PYTHON),
url='https://www.djangoproject.com/',
author='Django Software Foundation',
author_email='[email protected]',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
long_description=read('README.rst'),
license='BSD',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['django/bin/django-admin.py'],
entry_points={'console_scripts': [
'django-admin = django.core.management:execute_from_command_line',
]},
install_requires=['pytz', 'sqlparse'],
extras_require={
"bcrypt": ["bcrypt"],
"argon2": ["argon2-cffi >= 16.1.0"],
},
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docs.djangoproject.com/',
'Funding': 'https://www.djangoproject.com/fundraising/',
'Source': 'https://github.com/django/django',
'Tracker': 'https://code.djangoproject.com/',
},
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
| fenginx/django | setup.py | Python | bsd-3-clause | 4,689 |
"""
-----------------------------------------------------------------------
dyneye
Copyright (C) Floris van Breugel, 2013.
[email protected]
Released under the GNU GPL license, Version 3
This file is part of dyneye.
dyneye is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
dyneye is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
You should have received a copy of the GNU General Public
License along with dyneye. If not, see <http://www.gnu.org/licenses/>.
------------------------------------------------------------------------
"""
import Lie_Algebra as LA
import sympy as sp # this is a symbolic python package
import numpy as np
d,v = sp.symbols('d,v')
def r():
return v/d
dx = [d,v]
f0 = sp.Matrix([v, 0])
f1 = sp.Matrix([0, 1])
h = sp.Matrix([v/d])
print 'h(x): '
print h
print 'rank: ', LA.column_rank(h)
print
f0h = LA.directional_derivative(f0,h,dx)
print 'f0h: '
print f0h
print
loa = LA.little_observability_algebra(f0,h,dx,0)
print 'little observability algebra (h, f0h):'
print loa
print 'rank: ', LA.column_rank(loa)
print
# now add some terms from big observability matrix:
f0f1 = LA.lie_bracket(f0,f1,dx)
Lf0f1h = LA.directional_derivative(f0f1,h,dx)
for term in Lf0f1h:
if np.sum(np.abs(term)) != 0: # remove terms that are equal to zero
loa = loa.col_join(sp.Matrix([term]))
dloa = loa.jacobian(dx)
rank = LA.column_rank(dloa)
print 'big observability algebra: '
print loa
print 'rank: ', rank
print
print 'num states: ', len(dx)
'''
RESULTS:
h(x):
[v/d]
rank: 1
f0h:
[-v**2/d**2]
little observability algebra (h, f0h):
[v/d]
rank: 1
big observability algebra:
[ v/d]
[v/d**2]
rank: 2
num states: 2
'''
| florisvb/dyneye | Theory/LieMath/dyneye_lie_observability.py | Python | gpl-3.0 | 2,035 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from os import path as p
import subprocess
try:
from setuptools import setup, find_packages, Command
except ImportError:
from distutils.core import setup, find_packages, Command
def read(filename, parent=None):
parent = (parent or __file__)
try:
with open(p.join(p.dirname(parent), filename)) as f:
return f.read()
except IOError:
return ''
def parse_requirements(filename, parent=None):
parent = (parent or __file__)
filepath = p.join(p.dirname(parent), filename)
content = read(filename, parent)
for line_number, line in enumerate(content.splitlines(), 1):
candidate = line.strip()
if candidate.startswith('-r'):
for item in parse_requirements(candidate[2:].strip(), filepath):
yield item
else:
yield candidate
try:
from _version import __version__
except ImportError:
__version__ = "0.1.a1-dirty"
class GitVersionCommand(Command):
description = 'Extract version from git label'
user_options = []
def initialize_options(self):
self.cwd = None
def finalize_options(self):
self.cwd = os.getcwd()
def run(self):
"""
Try to determine the version from git via git describe
if not possible leave the _version.py file untouched
"""
message = "Must be in package root {0}".format(self.cwd)
assert os.getcwd() == self.cwd, message
version = None
try:
version = subprocess.check_output(["git", "describe"])
version = version[:-1] # remove the last \r
except subprocess.CalledProcessError:
pass
if version:
with open('src/_version.py', 'w') as f:
f.write("__version__ = '{0}'\n".format(version))
readme = open('README.rst').read()
setup(
name='cli_tools',
version=__version__,
description='Command Line Interface tools',
long_description=readme + '\n',
author='Volker Kempert',
author_email='[email protected]',
url='www.pixmeter.com',
package_dir={'': 'src'},
packages=find_packages(where='src', exclude=[]),
py_modules = ['_version'],
install_requires=parse_requirements('requirements.txt'),
test_requires=parse_requirements('dev_requirements.txt'),
license="MIT",
keywords='cli tools',
classifiers=[
'Intended Audience :: DevOps',
'License :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
entry_points={
'console_scripts': [
'find-dups=find_duplicates.cli_find_dups:main',
'filter-dups=find_duplicates.cli_filter_dups:main',
]
},
cmdclass={
'set_git_version': GitVersionCommand
},
)
| volker-kempert/python-tools | setup.py | Python | mit | 2,944 |
import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="xsrc", parent_name="histogram", **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/histogram/_xsrc.py | Python | mit | 431 |
from catalog import Catalog
from category import Category | scruwys/wino | wino/__init__.py | Python | mit | 57 |
# -*- coding: utf-8 -*-
"""Fundamental models for rpi2caster, not depending on database.
Classes-only module. All constants come from definitions."""
import json
from collections import namedtuple, OrderedDict
from pkg_resources import resource_string as rs
# some named tuples for easier attribute addressing
WedgeLimits = namedtuple('WedgeLimits', 'shrink stretch')
Matrix = namedtuple('Matrix',
'column row units row_units code '
'comment ribbon_entry wedges')
ParsedRecord = namedtuple('ParsedRecord',
('raw signals comment column row has_signals is_char'
' uses_row_16 has_0005 has_0075 has_s_needle '
'is_pump_start is_pump_stop is_newline'))
# constant wedge definitions data
# Wedge definitions: {WEDGE_SERIES: [row1_units, row2_units...]...}
WEDGES_JSON = rs('rpi2caster.data', 'wedge_units.json').decode()
WEDGE_DEFINITIONS = json.loads(WEDGES_JSON)
WEDGE_ALIASES = ('10E: UK S527', '10L: UK S536', '11Q: UK S574',
'14E: UK S1406', '1A: UK S207', '1B: UK S209',
'1C: UK S210', '1O: UK S221', '1R: UK S223',
'1Q: UK S224', '2A: UK S233', '2Z: UK S261',
'3O: UK S275', '3Q: UK S277', '3Y: UK S286',
'4A: UK S295', '4G: UK S300', '5P: UK S327',
'5V: UK S371', '7J: UK S422', '7Z: UK S449',
'8A: UK S450', '8U: UK S409', 'TW: S535 typewriter',
'AK: EU S5', 'BO: EU S221', 'CZ: EU S336',
'A: UK S5', 'D: UK S46', 'E: UK S92',
'F: UK S94', 'G: UK S93', 'J: UK S101',
'K: UK S87', 'L: UK S96', 'M: UK S45',
'N: UK S88', 'O: UK S111', 'Q: UK S50',
'S: UK S197', 'V: UK S202', 'W: UK S205', 'X: UK S47')
class Wedge:
"""Default S5-12E wedge, unless otherwise specified"""
__slots__ = ('series', 'set_width', 'is_brit_pica', '_units')
S5 = [5, 6, 7, 8, 9, 9, 9, 10, 10, 11, 12, 13, 14, 15, 18]
definitions = WEDGE_DEFINITIONS
aliases = WEDGE_ALIASES
def __init__(self, wedge_name='', wedge_data=None):
w_data = wedge_data or {}
if wedge_name:
self.name = wedge_name
else:
self.series = w_data.get('series', '5')
self.set_width = w_data.get('set_width', 12.0)
self.is_brit_pica = w_data.get('is_brit_pica', True)
self._units = w_data.get('units', Wedge.S5)
def __str__(self):
return self.name
def __repr__(self):
return '<Wedge: {}>'.format(self.name)
def __getitem__(self, row):
return self.units[row]
def __bool__(self):
return True
@property
def pica(self):
"""Get the pica base of the wedge. Two values were used:
.1667" (old British pica, European wedges), also: DTP pica,
.1660" (new British, American), also: TeX pica."""
return 0.1667 if self.is_brit_pica else 0.166
@property
def name(self):
"""Gets a wedge name - for example S5-12.25E.
S (for stopbar) is prepended by convention.
E is appended whenever the wedge is based on pica = .1667".
"""
# Truncate the fractional part of the set width if it's an integer
set_w = self.set_width if self.set_width % 1 else int(self.set_width)
name = 'S{series}-{set_width}{european_suffix}'
return name.format(series=self.series, set_width=set_w,
european_suffix=self.is_brit_pica * 'E')
@name.setter
def name(self, wedge_name):
"""Parse the wedge name to get series, set width, unit values
and whether the wedge is British pica."""
if not wedge_name:
# use default S5
return
try:
# For countries that use comma as decimal delimiter,
# convert to point:
wedge_name = str(wedge_name).replace(',', '.').upper().strip()
# Check if this is an European wedge
# (these were based on pica = .1667" )
is_brit_pica = wedge_name.endswith('E')
# Away with the initial S, final E and any spaces before and after
# Make it work with space or dash as delimiter
wedge = wedge_name.strip('SE ').replace('-', ' ').split(' ')
series, raw_set_w = wedge
# get the set width i.e. float approximated to .25
set_w_str = ''.join(x for x in raw_set_w
if x.isnumeric() or x == '.')
set_width = float(set_w_str) // 0.25 * 0.25
if set_width > 25:
raise ValueError
# try to get the unit values, otherwise S5
units = WEDGE_DEFINITIONS.get(series, Wedge.S5)
# update the attributes
self.series, self.set_width = series, set_width
self.is_brit_pica, self.units = is_brit_pica, units
except (TypeError, AttributeError, ValueError):
# In case parsing goes wrong
raise ValueError('Cannot parse wedge name {}'.format(wedge_name))
@property
def parameters(self):
"""Gets a list of parameters"""
em_inches = round(self.pica * self.set_width / 12, 5)
parameters = OrderedDict()
parameters['Wedge designation'] = self.name
parameters['Inch width of 18 units [1em]'] = em_inches
parameters['Units per row'] = ' '.join(str(x) for x in self.units if x)
return parameters
@property
def units(self):
"""Gets the unit values for the wedge's rows"""
units = self._units or Wedge.S5
# Add 0 at the beginning so that the list can be indexed with
# row numbers - i.e. units[1] for row 1
if units[0] is not 0:
units = [0] + units
# Ensure that wedge has 0 + 16 steps - i.e. length is 17
# Keep adding the last value until we have enough
while len(units) < 17:
units.append(units[-1])
return units
@units.setter
def units(self, units):
"""Sets the wedge unit values"""
self._units = units
def units_to_inches(self, units=1):
"""Convert units to inches, based on wedge's set width and pica def"""
return round(units / 18 * self.set_width / 12 * self.pica, 4)
def inches_to_units(self, inches=1):
"""Convert inches to units"""
return round(18 * inches / self.pica * 12 / self.set_width, 2)
def get_adjustment_limits(self):
"""Get the unit adjustment limits for this wedge.
Without adjustment, the character has the same width as if the
wedges were at 3/8.
lower limit is reached at 1/1 0075 and 0005 wedge positions,
max shrink is 3/8 - 1/1 = 2/7 => 2 * 0.0075 + 7 * 0.0005 = 0.0185"
upper limit is reached at 15/15,
max stretch is 15/15 - 3/8 = 12/7 => 12 * 0.0075 + 7 * 0.0005 = 0.0935"
Constraints:
Single-cell matrices have max .18" width for safety.
Double-cell (large composition, wide characters) are .38"
Otherwise the mould opening would not be covered by the matrix,
leading to lead splashing over the diecase.
Low spaces don't have this constraint as the upper mould blade
prevents the lead from even reaching the diecase.
Calculate this to wedge set units.
Return positive integers.
"""
# upper limit based on parameters
stretch_inches = 0.0935
cap = 0.18
maximum = min(stretch_inches, cap)
# calculate adjustment range in units
shrink = int(18 * 12 * 0.0185 / self.pica / self.set_width)
stretch = int(18 * 12 * maximum / self.pica / self.set_width)
return WedgeLimits(shrink, stretch)
def corrections(self, row, units):
"""Calculate the 0075 and 0005 wedge positions for this matrix
based on the current wedge used.
matrix - a Matrix object
correction - units of self.wedge's set to add/subtract,
units - arbitrary character width in units of self.wedge's set"""
def steps(unit_width=0):
"""get a width (in .0005" steps) of a character
for a given number of units or diecase row"""
inches = unit_width / 18 * self.set_width / 12 * self.pica
# return a number of 0.0005" steps
return int(2000 * inches)
def limits_exceeded():
"""raise an error if width can't be adjusted with wedges"""
limits = self.get_adjustment_limits()
minimum = row_units - limits.shrink
maximum = row_units + limits.stretch
message = ('{} units exceed adjustment limits (min: {} / max: {})')
error_msg = message.format(units, minimum, maximum)
raise ValueError(error_msg)
# absolute width: how many .0005" steps is it?
char_width = steps(units)
# how wide would a character from the given row normally be?
# (using self.wedge, not self.diecase.wedge!)
row_units = self[row]
row_width = steps(row_units)
# calculate the difference and wedge positions
# 1 step of 0075 wedge is 15 steps of 0005; neutral positions are 3/8
# 3 * 15 + 8 = 53, so any increment/decrement is relative to this
increments = char_width - row_width + 53
# Upper limit: 15/15 => 15*15=225 + 15 = 240;
# lower limit: 1/ 1 => 1 * 15 + 1 = 16
if increments < 16 or increments > 240:
limits_exceeded()
# calculate wedge positions from the increments
pos_0005, pos_0075 = increments % 15, increments // 15
if not pos_0005:
# wedge positions cannot be zero
pos_0005, pos_0075 = 15, pos_0075 - 1
return (pos_0075, pos_0005)
class Ribbon:
"""Ribbon model for the rpi2caster casting program"""
def __init__(self, description='', diecase='', wedge='', contents=''):
self.description = description
self.diecase = diecase
self.wedge = wedge
self.contents = contents
def __iter__(self):
return iter(self.contents)
def __next__(self):
yield from self.contents
def __repr__(self):
return ('<Ribbon: {} diecase: {}, wedge: {}>'
.format(self.description, self.diecase, self.wedge))
def __bool__(self):
return bool(self.contents)
def __call__(self):
return self
@property
def parameters(self):
"""Gets a list of parameters"""
parameters = OrderedDict()
parameters['Description'] = self.description
parameters['Diecase'] = self.diecase
parameters['Wedge'] = self.wedge
return parameters
| elegantandrogyne/rpi2caster | rpi2caster/models.py | Python | gpl-2.0 | 10,884 |
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
Test the ipv4 module.
"""
import qatest
from ipv4 import *
TEST_CASES = [
("10.1.2.3", None),
("0.0.0.0/0", None),
("164.1.2.3", None),
("192.168.1.1", None),
("192.168.1.4/32", None),
("192.168.1.5", "255.255.255.255"),
("192.168.1.1", "255.255.255.248"),
("192.168.1.1", "/27"),
("192.168.1.1/28", None),
("www.dartworks.biz", None),
("www.dartworks.biz/28", None),
("www.dartworks.biz", "255.255.255.248"),
("192.168.1.2/28", "255.255.255.0"),
(0xa1010102, None),
(0xc1020203, 0xfffff000),
("192.168.1.2", 0xffffff80),
(0,0),
(0xc0020201, "255.255.255.0")
]
class IPv4BaseTest(qatest.Test):
pass
class IPv4Test(IPv4BaseTest):
def test_method(self):
for case in TEST_CASES:
self.info("\ntest case where address=%s mask=%s" % (case[0], case[1]))
ip = IPv4(case[0], case[1])
self.info(str( ip ))
self.info("repr = %r" % (ip,))
s = ["\nraw address = %x" % (ip.address)]
s.append("raw mask = %x" % (ip.mask))
s.append("maskbits = %u" % (ip.maskbits))
s.append("network = %s" % (ip.network))
s.append("raw host = %x" % (ip.host))
s.append("raw broadcast = %x" % (ip.broadcast))
self.info("\n".join(s))
# XXX
ip = IPv4("192.168.1.1")
print ip
print ip.getStrings()
ip.mask = "255.255.255.248"
print ip
ip.address = "192.168.1.2"
print ip
ip.maskbits = 24
print ip
net = IPv4("172.22.0.0/16")
print "net is:", net
for host in ( IPv4("172.22.0.1/16"),
IPv4("172.24.0.1/16"),
IPv4("172.22.0.1/24") ):
if host in net:
print host.cidr(), "in", net.cidr()
else:
print host.cidr(), "NOT in", net.cidr()
return self.passed()
class IPv4Suite(qatest.TestSuite):
pass
def get_suite(conf):
suite = IPv4Suite(conf)
suite.add_test(IPv4Test)
return suite
def run(conf):
suite = get_suite(conf)
suite()
| xiangke/pycopia | experimental/pycopia/_unittest/test_ipv4.py | Python | lgpl-2.1 | 3,102 |
import frappe
def execute():
updated_list = []
for je in frappe.db.sql_list("""select name from `tabJournal Entry` where voucher_type='Credit Note' and docstatus < 2"""):
je = frappe.get_doc("Journal Entry", je)
original = je.total_amount
je.set_print_format_fields()
if je.total_amount != original:
updated_list.append(je.name)
je.db_set("total_amount", je.total_amount, update_modified=False)
je.db_set("total_amount_in_words", je.total_amount_in_words, update_modified=False)
print "updated jv list: ", updated_list
| ShashaQin/erpnext | erpnext/patches/v6_27/fix_total_amount_in_jv_for_credit_note.py | Python | agpl-3.0 | 545 |
def f(x):
import math
return 10*math.e**(math.log(0.5)/5.27 * x)
def radiationExposure(start, stop, step):
'''
Computes and returns the amount of radiation exposed
to between the start and stop times. Calls the
function f (defined for you in the grading script)
to obtain the value of the function at any point.
start: integer, the time at which exposure begins
stop: integer, the time at which exposure ends
step: float, the width of each rectangle. You can assume that
the step size will always partition the space evenly.
returns: float, the amount of radiation exposed to
between start and stop times.
'''
def frange(start, stop, step):
while start < stop:
yield start
start += step
value = 0
for time in frange(start, stop, step):
value += (f(time) * step)
return value
print radiationExposure(40, 100, 1.5) | emyarod/OSS | 1_intro/6.00.1x/Week 3/Problem Set 3/radiationExposure.py | Python | mit | 935 |
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
{%- from "horizon/map.jinja" import server with context %}
{%- set app = salt['pillar.get']('horizon:server:app:'+app_name) %}
HORIZON_CONFIG = {
'dashboards': ({% if app.plugin is defined %}{% for plugin_name, plugin in app.plugin.iteritems() %}{% if plugin.get('dashboard', False) %}'{{ plugin_name }}', {% endif %}{% endfor %}{% endif %}'admin', 'settings'),
'default_dashboard': '{{ app.get('default_dashboard', 'project') }}',
'user_home': '{{ app.get('user_home', 'openstack_dashboard.views.get_user_home') }}',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "{{ app.get('help_url', 'http://docs.openstack.org') }}",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
'password_autocomplete': 'on'
}
INSTALLED_APPS = (
'robotice_dashboard.dashboards.location',
'robotice_dashboard.dashboards.admin',
'robotice_auth',
'openstack_dashboard',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'compressor',
'horizon',
'openstack_auth',
'bootstrap3_datetime',
{%- if app.logging is defined %}
'raven.contrib.django.raven_compat',
{%- endif %}
)
{% include "horizon/files/horizon_settings/_local_settings.py" %}
{% include "horizon/files/horizon_settings/_horizon_settings.py" %}
| Martin819/salt-formula-horizon | horizon/files/local_settings/robotice_settings.py | Python | apache-2.0 | 1,748 |
from yowsup.structs import ProtocolTreeNode
from .notification_contact import ContactNotificationProtocolEntity
class AddContactNotificationProtocolEntity(ContactNotificationProtocolEntity):
'''
<notification offline="0" id="{{NOTIFICATION_ID}}" notify="{{NOTIFY_NAME}}" type="contacts"
t="{{TIMESTAMP}}" from="{{SENDER_JID}}">
<add jid="{{SET_JID}}"> </add>
</notification>
'''
def __init__(self, _id, _from, timestamp, notify, offline, contactJid):
super(AddContactNotificationProtocolEntity, self).__init__(_id, _from, timestamp, notify, offline)
self.setData(contactJid)
def setData(self, jid):
self.contactJid = jid
def toProtocolTreeNode(self):
node = super(AddContactNotificationProtocolEntity, self).toProtocolTreeNode()
removeNode = ProtocolTreeNode("add", {"jid": self.contactJid}, None, None)
node.addChild(removeNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = ContactNotificationProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = AddContactNotificationProtocolEntity
removeNode = node.getChild("add")
entity.setData(removeNode.getAttributeValue("jid"))
return entity | colonyhq/yowsup | yowsup/layers/protocol_contacts/protocolentities/notification_contact_add.py | Python | gpl-3.0 | 1,271 |
#!/usr/bin/env python
##################################################################
# Copyright (c) 2012, Sergej Srepfler <[email protected]>
# February 2012 - March 2014
# Version 0.2.9, Last change on Mar 06, 2014
# This software is distributed under the terms of BSD license.
##################################################################
# two lines are to include parent directory for testing
import sys
sys.path.append("..")
# Radius client
from libRadius import *
import datetime
import time
def create_Request():
# Create message header (empty)
REQ=HDRItem()
# Set command code
REQ.Code=dictCOMMANDname2code("Accounting-Request")
REQ.Identifier=1
auth=createZeroAuthenticator()
# Let's build Request
REQ_avps=[]
REQ_avps.append(encodeAVP('Acct-Status-Type', '\x00\x00\x00\x01'))
REQ_avps.append(encodeAVP('User-Name', 'username'))
REQ_avps.append(encodeAVP('Acct-Authentic', 1))
REQ_avps.append(encodeAVP('Framed-IP-Address', '127.0.0.1'))
REQ_avps.append(encodeAVP('Called-Station-Id', 'test.apn'))
REQ_avps.append(encodeAVP('Calling-Station-Id', '1234567891'))
REQ_avps.append(encodeAVP('NAS-IP-Address', '192.168.0.1'))
REQ_avps.append(encodeAVP('NAS-Identifier', '192.168.0.1'))
REQ_avps.append(encodeAVP('Framed-Protocol', 7))
REQ_avps.append(encodeAVP('Service-Type', 2))
REQ_avps.append(encodeAVP('NAS-Port-Type', 5))
REQ_avps.append(encodeAVP('Acct-Session-Id', 'D4C8F56173c26ae2'))
REQ_avps.append(encodeAVP('3GPP-IMSI', '111111111111111'))
REQ_avps.append(encodeAVP('3GPP-IMSI-MCC-MNC', '11111'))
REQ_avps.append(encodeAVP('3GPP-SGSN-Address', '192.168.0.1'))
REQ_avps.append(encodeAVP('3GPP-NSAPI', '6'))
REQ_avps.append(encodeAVP('3GPP-GGSN-MCC-MNC', '11111'))
REQ_avps.append(encodeAVP('3GPP-Charging-Characteristics', '0400'))
REQ_avps.append(encodeAVP('3GPP-IMEISV', '111111111111111'))
REQ_avps.append(encodeAVP('3GPP-User-Location-Info', '\x01"\xf00\x9c\xa7+_'))
REQ_avps.append(encodeAVP('3GPP-Charging-Id', 1111111111))
REQ_avps.append(encodeAVP('3GPP-PDP-Type', 0))
REQ_avps.append(encodeAVP('3GPP-CG-Address', '192.168.0.1'))
REQ_avps.append(encodeAVP('3GPP-Selection-Mode', '0'))
REQ_avps.append(encodeAVP('3GPP-Negotiated-DSCP', '\x00'))
REQ_avps.append(encodeAVP('3GPP-GPRS-QoS-Profile', '05-23921F9396D5FE74800000006400'))
msg=createWithAuthenticator(REQ,auth,REQ_avps,SECRET)
# msg now contains Accounting-Request as hex string
return msg
if __name__ == "__main__":
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
LoadDictionary("../dictRadius.xml")
HOST="127.0.0.1"
PORT=1813
SECRET="secret"
# Let's assume that my Radius messages will fit into 4k
MSG_SIZE=4096
###########################################################
Conn=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# socket is in blocking mode, so let's add a timeout
Conn.settimeout(5)
###########################################################
# Create Acconting-Request (Start)
msg=create_Request()
# msg now contains Accounting-Request as hex string
logging.debug("+"*30)
print "Accounting-Request",msg
# send data
Conn.sendto(msg.decode("hex"),(HOST,PORT))
# Receive response
received = Conn.recv(MSG_SIZE)
# Process response
RES=HDRItem()
stripHdr(RES,received.encode("hex"))
radius_avps=splitMsgAVPs(RES.msg)
for avps in radius_avps:
print decodeAVP(avps)
#print radius_avps
# Normally - this is the end.
###########################################################
# And close the connection
Conn.close()
######################################################
# History
# 0.2.9 - Mar 06, 2014 - Radius accounting example added
# 0.2.8 - May 31, 2013 - Radius initial version
| dgudtsov/hsstools | pyprotosim/example/radius_acct_start_client.py | Python | gpl-3.0 | 3,972 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 100, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_None/trend_ConstantTrend/cycle_12/ar_12/test_artificial_32_None_ConstantTrend_12_12_100.py | Python | bsd-3-clause | 265 |
import pytest
import sqlalchemy as sa
from sqlalchemy_continuum import version_class
from tests import TestCase, uses_native_versioning, create_test_cases
class JoinTableInheritanceTestCase(TestCase):
def create_models(self):
class TextItem(self.Model):
__tablename__ = 'text_item'
__versioned__ = {
'base_classes': (self.Model, )
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
discriminator = sa.Column(
sa.Unicode(100)
)
__mapper_args__ = {
'polymorphic_on': discriminator,
'with_polymorphic': '*'
}
class Article(TextItem):
__tablename__ = 'article'
__mapper_args__ = {'polymorphic_identity': u'article'}
id = sa.Column(
sa.Integer,
sa.ForeignKey(TextItem.id),
primary_key=True
)
class BlogPost(TextItem):
__tablename__ = 'blog_post'
__mapper_args__ = {'polymorphic_identity': u'blog_post'}
id = sa.Column(
sa.Integer,
sa.ForeignKey(TextItem.id),
primary_key=True
)
self.TextItem = TextItem
self.Article = Article
self.BlogPost = BlogPost
def setup_method(self, method):
TestCase.setup_method(self, method)
self.TextItemVersion = version_class(self.TextItem)
self.ArticleVersion = version_class(self.Article)
self.BlogPostVersion = version_class(self.BlogPost)
def test_each_class_has_distinct_version_table(self):
assert self.TextItemVersion.__table__.name == 'text_item_version'
assert self.ArticleVersion.__table__.name == 'article_version'
assert self.BlogPostVersion.__table__.name == 'blog_post_version'
assert issubclass(self.ArticleVersion, self.TextItemVersion)
assert issubclass(self.BlogPostVersion, self.TextItemVersion)
def test_each_object_has_distinct_version_class(self):
article = self.Article()
blogpost = self.BlogPost()
textitem = self.TextItem()
self.session.add(article)
self.session.add(blogpost)
self.session.add(textitem)
self.session.commit()
# assert type(textitem.versions[0]) == self.TextItemVersion
assert type(article.versions[0]) == self.ArticleVersion
assert type(blogpost.versions[0]) == self.BlogPostVersion
def test_all_tables_contain_transaction_id_column(self):
tx_column = self.options['transaction_column_name']
assert tx_column in self.TextItemVersion.__table__.c
assert tx_column in self.ArticleVersion.__table__.c
assert tx_column in self.BlogPostVersion.__table__.c
def test_with_polymorphic(self):
article = self.Article()
self.session.add(article)
self.session.commit()
version_obj = self.session.query(self.TextItemVersion).first()
assert isinstance(version_obj, self.ArticleVersion)
def test_consecutive_insert_and_delete(self):
article = self.Article()
self.session.add(article)
self.session.flush()
self.session.delete(article)
self.session.commit()
def test_assign_transaction_id_to_both_parent_and_child_tables(self):
tx_column = self.options['transaction_column_name']
article = self.Article()
self.session.add(article)
self.session.commit()
assert self.session.execute(
'SELECT %s FROM article_version' % tx_column
).fetchone()[0]
assert self.session.execute(
'SELECT %s FROM text_item_version' % tx_column
).fetchone()[0]
def test_primary_keys(self):
tx_column = self.options['transaction_column_name']
table = self.TextItemVersion.__table__
assert len(table.primary_key.columns)
assert 'id' in table.primary_key.columns
assert tx_column in table.primary_key.columns
table = self.ArticleVersion.__table__
assert len(table.primary_key.columns)
assert 'id' in table.primary_key.columns
assert tx_column in table.primary_key.columns
@pytest.mark.skipif('uses_native_versioning()')
def test_updates_end_transaction_id_to_all_tables(self):
if self.options['strategy'] == 'subquery':
pytest.skip()
end_tx_column = self.options['end_transaction_column_name']
tx_column = self.options['transaction_column_name']
article = self.Article()
self.session.add(article)
self.session.commit()
article.name = u'Updated article'
self.session.commit()
assert article.versions.count() == 2
assert self.session.execute(
'SELECT %s FROM text_item_version '
'ORDER BY %s LIMIT 1' % (end_tx_column, tx_column)
).scalar()
assert self.session.execute(
'SELECT %s FROM article_version '
'ORDER BY %s LIMIT 1' % (end_tx_column, tx_column)
).scalar()
create_test_cases(JoinTableInheritanceTestCase)
class TestDeepJoinedTableInheritance(TestCase):
def create_models(self):
class Node(self.Model):
__versioned__ = {}
__tablename__ = 'node'
__mapper_args__ = dict(
polymorphic_on='type',
polymorphic_identity='node',
with_polymorphic='*',
)
id = sa.Column(sa.Integer, primary_key=True)
type = sa.Column(sa.String(30), nullable=False)
class Content(Node):
__versioned__ = {}
__tablename__ = 'content'
__mapper_args__ = {
'polymorphic_identity': 'content'
}
id = sa.Column(
sa.Integer,
sa.ForeignKey('node.id'),
primary_key=True
)
description = sa.Column(sa.UnicodeText())
class Document(Content):
__versioned__ = {}
__tablename__ = 'document'
__mapper_args__ = {
'polymorphic_identity': 'document'
}
id = sa.Column(
sa.Integer,
sa.ForeignKey('content.id'),
primary_key=True
)
body = sa.Column(sa.UnicodeText)
self.Node = Node
self.Content = Content
self.Document = Document
def test_insert(self):
document = self.Document()
self.session.add(document)
self.session.commit()
assert self.session.execute(
'SELECT COUNT(1) FROM document_version'
).scalar() == 1
assert self.session.execute(
'SELECT COUNT(1) FROM content_version'
).scalar() == 1
assert self.session.execute(
'SELECT COUNT(1) FROM node_version'
).scalar() == 1
| kvesteri/sqlalchemy-continuum | tests/inheritance/test_join_table_inheritance.py | Python | bsd-3-clause | 7,059 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Refrescador automatico de clines
#Creado por Dagger - https://github.com/gavazquez
import ReloadCam_Main, ReloadCam_Helper
def GetVersion():
return 2
#Filename must start with Server, classname and argument must be the same!
class Satna(ReloadCam_Main.Server):
def GetUrl(self):
#Pon un breakpoint aqui si quieres ver la URL verdadera ;)
#http://satna4ever.no-ip.biz
realUrl = ReloadCam_Helper.Decrypt("maanpH1wfOXG4N3CmJaomKZxr7yfztydw82rYaaVt6-uoeCc7NTWp2CjnLM=")
return realUrl
def GetClines(self):
print "Now getting Satna clines!"
satnaClines = []
satnaClines.append(self.__GetSatnaCline(1))
satnaClines.append(self.__GetSatnaCline(2))
satnaClines.append(self.__GetSatnaCline(3))
satnaClines.append(self.__GetSatnaCline(4))
satnaClines.append(self.__GetSatnaCline(5))
satnaClines.append(self.__GetSatnaCline(6))
satnaClines = filter(None, satnaClines)
if len(satnaClines) == 0: print "No Satna lines retrieved"
return satnaClines
def __GetSatnaCline(self, serverNo):
htmlCode = ReloadCam_Helper.GetHtmlCode(None, self.GetUrl().format(serverNo))
cline = ReloadCam_Helper.FindStandardClineInText(htmlCode)
if cline != None and ReloadCam_Helper.TestCline(cline):
return cline
return None
| DaggerES/ReloadCam | DELETED_ReloadCam_Server_Satna.py | Python | gpl-3.0 | 1,429 |
"""
sentry.runner.commands.queues
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import click
from sentry.runner.decorators import configuration
@click.group()
def queues():
"Manage Sentry queues."
@queues.command()
@click.option('-S', 'sort_size', default=False, is_flag=True, help='Sort by size.')
@click.option('-r', 'reverse', default=False, is_flag=True, help='Reverse the sort order.')
@configuration
def list(sort_size, reverse):
"List queues and their sizes."
from django.conf import settings
from sentry.monitoring.queues import backend
if backend is None:
raise click.ClickException('unknown broker type')
queues = backend.bulk_get_sizes([q.name for q in settings.CELERY_QUEUES])
if sort_size:
queues = sorted(queues, key=lambda q: (-q[1], q[0]), reverse=reverse)
else:
queues = sorted(queues, reverse=reverse)
for queue in queues:
click.echo('%s %d' % queue)
@queues.command()
@click.option('-f', '--force', default=False, is_flag=True, help='Do not prompt for confirmation.')
@click.argument('queue')
@configuration
def purge(force, queue):
"Purge all messages from a queue."
from sentry.monitoring.queues import get_queue_by_name, backend
if get_queue_by_name(queue) is None:
raise click.ClickException('unknown queue: %r' % queue)
if backend is None:
raise click.ClickException('unknown broker type')
size = backend.get_size(queue)
if size == 0:
click.echo('Queue is empty, nothing to purge', err=True)
return
if not force:
click.confirm(
'Are you sure you want to purge %d messages from the queue \'%s\'?' % (size, queue),
abort=True
)
click.echo('Poof, %d messages deleted' % backend.purge_queue(queue), err=True)
| looker/sentry | src/sentry/runner/commands/queues.py | Python | bsd-3-clause | 1,980 |
from abstract import AbstractBucke
class AmazonBucket(AbstractBucket):
"""
TODO: this class will implement a bucket for amazon cloud storage
"""
def delete_object(self, path):
raise NotImplemented
def list_current_dir(self):
raise NotImplemented | Sybrand/digital-panda | digitalpanda/bucket/amazon.py | Python | mit | 269 |
#!/usr/bin/python
#
# Note: This implementation is being phased out, you should instead look at
# the DynamoDB2_* examples for the new implementation.
#
from troposphere import Template, Ref, Output, Parameter
from troposphere.dynamodb import (Key, AttributeDefinition,
ProvisionedThroughput, Projection)
from troposphere.dynamodb import Table, GlobalSecondaryIndex
template = Template()
template.add_description("Create a dynamodb table with a global secondary "
"index")
# N.B. If you remove the provisioning section this works for
# LocalSecondaryIndexes aswell.
readunits = template.add_parameter(Parameter(
"ReadCapacityUnits",
Description="Provisioned read throughput",
Type="Number",
Default="10",
MinValue="5",
MaxValue="10000",
ConstraintDescription="should be between 5 and 10000"
))
writeunits = template.add_parameter(Parameter(
"WriteCapacityUnits",
Description="Provisioned write throughput",
Type="Number",
Default="5",
MinValue="5",
MaxValue="10000",
ConstraintDescription="should be between 5 and 10000"
))
tableIndexName = template.add_parameter(Parameter(
"TableIndexName",
Description="Table: Primary Key Field",
Type="String",
Default="id",
AllowedPattern="[a-zA-Z0-9]*",
MinLength="1",
MaxLength="2048",
ConstraintDescription="must contain only alphanumberic characters"
))
tableIndexDataType = template.add_parameter(Parameter(
"TableIndexDataType",
Description=" Table: Primary Key Data Type",
Type="String",
Default="S",
AllowedPattern="[S|N|B]",
MinLength="1",
MaxLength="1",
ConstraintDescription="S for string data, N for numeric data, or B for "
"binary data"
))
secondaryIndexHashName = template.add_parameter(Parameter(
"SecondaryIndexHashName",
Description="Secondary Index: Primary Key Field",
Type="String",
Default="tokenType",
AllowedPattern="[a-zA-Z0-9]*",
MinLength="1",
MaxLength="2048",
ConstraintDescription="must contain only alphanumberic characters"
))
secondaryIndexHashDataType = template.add_parameter(Parameter(
"SecondaryIndexHashDataType",
Description="Secondary Index: Primary Key Data Type",
Type="String",
Default="S",
AllowedPattern="[S|N|B]",
MinLength="1",
MaxLength="1",
ConstraintDescription="S for string data, N for numeric data, or B for "
"binary data"
))
secondaryIndexRangeName = template.add_parameter(Parameter(
"refreshSecondaryIndexRangeName",
Description="Secondary Index: Range Key Field",
Type="String",
Default="tokenUpdatedTime",
AllowedPattern="[a-zA-Z0-9]*",
MinLength="1",
MaxLength="2048",
ConstraintDescription="must contain only alphanumberic characters"
))
secondaryIndexRangeDataType = template.add_parameter(Parameter(
"SecondaryIndexRangeDataType",
Description="Secondary Index: Range Key Data Type",
Type="String",
Default="S",
AllowedPattern="[S|N|B]",
MinLength="1",
MaxLength="1",
ConstraintDescription="S for string data, N for numeric data, or B for "
"binary data"
))
GSITable = template.add_resource(Table(
"GSITable",
AttributeDefinitions=[
AttributeDefinition(Ref(tableIndexName), Ref(tableIndexDataType)),
AttributeDefinition(Ref(secondaryIndexHashName),
Ref(secondaryIndexHashDataType)),
AttributeDefinition(Ref(secondaryIndexRangeName),
Ref(secondaryIndexRangeDataType))
],
KeySchema=[
Key(Ref(tableIndexName), "HASH")
],
ProvisionedThroughput=ProvisionedThroughput(
Ref(readunits),
Ref(writeunits)
),
GlobalSecondaryIndexes=[
GlobalSecondaryIndex(
"SecondaryIndex",
[
Key(Ref(secondaryIndexHashName), "HASH"),
Key(Ref(secondaryIndexRangeName), "RANGE")
],
Projection("ALL"),
ProvisionedThroughput(
Ref(readunits),
Ref(writeunits)
)
)
]
))
template.add_output(Output(
"GSITable",
Value=Ref(GSITable),
Description="Table with a Global Secondary Index",
))
print(template.to_json())
| Yipit/troposphere | examples/DynamoDB_Table_With_GlobalSecondaryIndex.py | Python | bsd-2-clause | 4,388 |
# coding: utf-8
"""
DLRN API
DLRN API client
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
class Params(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, max_age=None, success=None, job_id=None,
sequential_mode=None, previous_job_id=None, component=None):
"""Params - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'max_age': 'int',
'success': 'bool',
'job_id': 'str',
'sequential_mode': 'bool',
'previous_job_id': 'str',
'component': 'str'
}
self.attribute_map = {
'max_age': 'max_age',
'success': 'success',
'job_id': 'job_id',
'sequential_mode': 'sequential_mode',
'previous_job_id': 'previous_job_id',
'component': 'component'
}
self._max_age = max_age
self._success = success
self._job_id = job_id
self._sequential_mode = sequential_mode
self._previous_job_id = previous_job_id
self._component = component
@property
def max_age(self):
"""Gets the max_age of this Params.
Maximum age (in hours) for the repo to be considered.
Any repo tested or being tested after \"now - max_age\" will be taken
into account. If set to 0, all repos will be considered.
:return: The max_age of this Params.
:rtype: int
"""
return self._max_age
@max_age.setter
def max_age(self, max_age):
"""Sets the max_age of this Params.
Maximum age (in hours) for the repo to be considered.
Any repo tested or being tested after \"now - max_age\" will be taken
into account. If set to 0, all repos will be considered.
:param max_age: The max_age of this Params.
:type: int
"""
if max_age is None:
raise ValueError("Invalid value for `max_age`, must not be `None`")
if max_age is not None and max_age < 0:
raise ValueError("Invalid value for `max_age`, must be a value"
" greater than or equal to `0`")
self._max_age = max_age
@property
def success(self):
"""Gets the success of this Params.
If set to a value, find repos with a successful/unsuccessful vote
(as specified). If not set, any tested repo will be considered.
:return: The success of this Params.
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this Params.
If set to a value, find repos with a successful/unsuccessful vote
(as specified). If not set, any tested repo will be considered.
:param success: The success of this Params.
:type: bool
"""
self._success = success
@property
def job_id(self):
"""Gets the job_id of this Params.
Name of the CI that sent the vote. If not set, no filter will be set
on CI.
:return: The job_id of this Params.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this Params.
Name of the CI that sent the vote. If not set, no filter will be set
on CI.
:param job_id: The job_id of this Params.
:type: str
"""
self._job_id = job_id
@property
def sequential_mode(self):
"""Gets the sequential_mode of this Params.
Use the sequential mode algorithm. In this case, return the last tested
repo within that timeframe for the CI job described by previous_job_id.
Defaults to false.
:return: The sequential_mode of this Params.
:rtype: bool
"""
return self._sequential_mode
@sequential_mode.setter
def sequential_mode(self, sequential_mode):
"""Sets the sequential_mode of this Params.
Use the sequential mode algorithm. In this case, return the last tested
repo within that timeframe for the CI job described by previous_job_id.
Defaults to false.
:param sequential_mode: The sequential_mode of this Params.
:type: bool
"""
self._sequential_mode = sequential_mode
@property
def previous_job_id(self):
"""Gets the previous_job_id of this Params.
If sequential_mode is set to true, look for jobs tested by the CI
identified by previous_job_id.
:return: The previous_job_id of this Params.
:rtype: str
"""
return self._previous_job_id
@previous_job_id.setter
def previous_job_id(self, previous_job_id):
"""Sets the previous_job_id of this Params.
If sequential_mode is set to true, look for jobs tested by the CI
identified by previous_job_id.
:param previous_job_id: The previous_job_id of this Params.
:type: str
"""
self._previous_job_id = previous_job_id
@property
def component(self):
"""Gets the component of this Params.
additional notes
:return: The component of this Params.
:rtype: str
"""
return self._component
@component.setter
def component(self, component):
"""Sets the component of this Params.
additional notes
:param component: The component of this Params.
:type: str
"""
self._component = component
def to_dict(self):
"""Returns the model properties as a dict """
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model """
return pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint` """
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal """
if not isinstance(other, Params):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal """
return not self == other
| javierpena/dlrnapi_client | dlrnapi_client/models/params.py | Python | apache-2.0 | 7,405 |
import pytest
from anchore_engine.db import Image, get_thread_scoped_session
from anchore_engine.services.policy_engine.engine.policy.gates.gems import (
BadVersionTrigger,
BlacklistedGemTrigger,
GemCheckGate,
NoFeedTrigger,
NotLatestTrigger,
NotOfficialTrigger,
)
from anchore_engine.subsys import logger
from tests.integration.services.policy_engine.engine.policy.gates import GateUnitTest
logger.enable_test_logging()
@pytest.mark.usefixtures("cls_fully_loaded_test_env")
class GemCheckGateTest(GateUnitTest):
gate_clazz = GemCheckGate
def setUp(self):
db = get_thread_scoped_session()
self.test_image = db.query(Image).get(
(self.test_env.get_images_named("ruby")[0][0], "0")
)
def test_notofficial(self):
t, gate, test_context = self.get_initialized_trigger(
NotOfficialTrigger.__trigger_name__,
)
db = get_thread_scoped_session()
db.refresh(self.test_image)
test_context = gate.prepare_context(self.test_image, test_context)
t.evaluate(self.test_image, test_context)
print(("Fired: {}".format(t.fired)))
self.assertGreaterEqual(len(t.fired), 0)
def test_notlatest(self):
t, gate, test_context = self.get_initialized_trigger(
NotLatestTrigger.__trigger_name__
)
db = get_thread_scoped_session()
db.refresh(self.test_image)
test_context = gate.prepare_context(self.test_image, test_context)
t.evaluate(self.test_image, test_context)
print(("Fired: {}".format(t.fired)))
self.assertGreaterEqual(len(t.fired), 0)
def test_nofeed(self):
t, gate, test_context = self.get_initialized_trigger(
NoFeedTrigger.__trigger_name__
)
db = get_thread_scoped_session()
db.refresh(self.test_image)
test_context = gate.prepare_context(self.test_image, test_context)
t.evaluate(self.test_image, test_context)
print(("Fired: {}".format(t.fired)))
self.assertEqual(len(t.fired), 0)
def test_badversion(self):
t, gate, test_context = self.get_initialized_trigger(
BadVersionTrigger.__trigger_name__
)
db = get_thread_scoped_session()
db.refresh(self.test_image)
test_context = gate.prepare_context(self.test_image, test_context)
t.evaluate(self.test_image, test_context)
print(("Fired: {}".format(t.fired)))
self.assertGreaterEqual(len(t.fired), 0)
def test_pkgfullmatch(self):
t, gate, test_context = self.get_initialized_trigger(
BlacklistedGemTrigger.__trigger_name__, name="json", version="2.0.2"
)
db = get_thread_scoped_session()
db.refresh(self.test_image)
test_context = gate.prepare_context(self.test_image, test_context)
t.evaluate(self.test_image, test_context)
print(("Fired: {}".format(t.fired)))
self.assertGreaterEqual(len(t.fired), 1)
t, gate, test_context = self.get_initialized_trigger(
BlacklistedGemTrigger.__trigger_name__, name="jsonify", version="2.0.2"
)
db = get_thread_scoped_session()
db.refresh(self.test_image)
test_context = gate.prepare_context(self.test_image, test_context)
t.evaluate(self.test_image, test_context)
print(("Fired: {}".format(t.fired)))
self.assertGreaterEqual(len(t.fired), 0)
t, gate, test_context = self.get_initialized_trigger(
BlacklistedGemTrigger.__trigger_name__, name="json", version="2.0.1"
)
db = get_thread_scoped_session()
db.refresh(self.test_image)
test_context = gate.prepare_context(self.test_image, test_context)
t.evaluate(self.test_image, test_context)
print(("Fired: {}".format(t.fired)))
self.assertGreaterEqual(len(t.fired), 0)
def test_pkgnamematch(self):
t, gate, test_context = self.get_initialized_trigger(
BlacklistedGemTrigger.__trigger_name__, name="json"
)
db = get_thread_scoped_session()
db.refresh(self.test_image)
test_context = gate.prepare_context(self.test_image, test_context)
t.evaluate(self.test_image, test_context)
print(("Fired: {}".format(t.fired)))
self.assertGreaterEqual(len(t.fired), 1)
t, gate, test_context = self.get_initialized_trigger(
BlacklistedGemTrigger.__trigger_name__, name="blah"
)
db = get_thread_scoped_session()
db.refresh(self.test_image)
test_context = gate.prepare_context(self.test_image, test_context)
t.evaluate(self.test_image, test_context)
print(("Fired: {}".format(t.fired)))
self.assertGreaterEqual(len(t.fired), 0)
| anchore/anchore-engine | tests/integration/services/policy_engine/engine/policy/gates/test_gems.py | Python | apache-2.0 | 4,814 |
"""Token object and related objects needed by the RETE algorithm."""
from collections import namedtuple
from collections.abc import Mapping
from enum import Enum
from pyknow.fact import Fact
class TokenInfo(namedtuple('_TokenInfo', ['data', 'context'])):
"""Tag agnostig version of Token with inmutable data."""
def __new__(cls, data, context):
"""Return a namedtuple instance with inmutable data."""
return super(TokenInfo, cls).__new__(cls,
frozenset(data),
frozenset(context.items()))
def to_valid_token(self):
"""Create a VALID token using this data."""
return Token.valid(self.data, dict(self.context))
def to_invalid_token(self):
"""Create an INVALID token using this data."""
return Token.invalid(self.data, dict(self.context))
class Token(namedtuple('_Token', ['tag', 'data', 'context'])):
"""Token, as described by RETE but with context."""
class TagType(Enum):
"""Types of Token TAG data."""
VALID = True
INVALID = False
def __new__(cls, tag, data, context=None):
"""
Instantiate a new Token with the given tag, data and context.
If the context is None an empty context will be generated.
- `tag`: Must be an instance of `TagType`.
- `data`: A Fact or an iterable of Facts.
- `context`: A mapping or None.
"""
if context is None:
context = {}
try:
assert isinstance(tag, cls.TagType), \
"tag must be of `Token.TagType` type"
assert (isinstance(data, Fact) or
all(isinstance(f, Fact) for f in data)), \
"data must be either Fact or iterable of Facts"
assert isinstance(context, Mapping)
except AssertionError as exc:
raise TypeError(exc) from exc
data = {data} if isinstance(data, Fact) else set(data)
self = super(Token, cls).__new__(cls, tag, data, context)
return self
def to_info(self):
"""
Create and return an instance of TokenInfo with this token.
This is useful, for example, to use this token information as a
dictionary key.
"""
return TokenInfo(self.data, self.context)
@classmethod
def valid(cls, data, context=None):
"""Shortcut to create a VALID Token."""
return cls(cls.TagType.VALID, data, context)
@classmethod
def invalid(cls, data, context=None):
"""Shortcut to create an INVALID Token."""
return cls(cls.TagType.INVALID, data, context)
def is_valid(self):
"""Test if this Token is VALID."""
return self.tag == self.TagType.VALID
def copy(self):
"""
Make a new instance of this Token.
This method makes a copy of the mutable part of the token before
making the instance.
"""
return self.__class__(self.tag, self.data.copy(), self.context.copy())
| buguroo/pyknow | pyknow/matchers/rete/token.py | Python | lgpl-3.0 | 3,078 |
#!/usr/bin/env python
#coding: utf-8
#Filename: pickling.py
import cPickle as p
shoplist = ['apple', 'mango', 'banana']
txt = 'shoplist.txt'
f = file(txt, 'w')
p.dump(shoplist, f)
f.close()
del shoplist
f = file(txt)
storedlist = p.load(f)
print storedlist | daya-prac/Python-prac | python/pickling.py | Python | gpl-2.0 | 259 |
# Copyright 2011 David Malcolm <[email protected]>
# Copyright 2011 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
from collections import namedtuple
from cpybuilder import camel_case
class TreeType(namedtuple('TreeType', 'SYM, STRING, TYPE, NARGS')):
def camel_cased_string(self):
return camel_case(self.STRING)
# "type" seems to be an "enum_tree_code_class"; see GCC's tree.h
def iter_tree_types():
import re
f = open('autogenerated-tree-types.txt')
for line in f:
# e.g.
# ERROR_MARK, "error_mark", tcc_exceptional, 0
m = re.match('(.+), (.+), (.+), (.+)', line)
if m:
yield TreeType(SYM=m.group(1),
STRING=m.group(2)[1:-1],
TYPE=m.group(3),
NARGS=int(m.group(4)))
else:
# print 'UNMATCHED: ', line
assert(line.startswith('#') or line.strip() == '')
f.close()
class GimpleType(namedtuple('GimpleType', 'gimple_symbol printable_name gss_symbol')):
def camel_cased_string(self):
return camel_case(self.gimple_symbol)
def iter_gimple_types():
import re
f = open('autogenerated-gimple-types.txt')
for line in f:
# e.g.
# GIMPLE_ERROR_MARK, "gimple_error_mark", GSS_BASE
m = re.match('(GIMPLE_.+), (.+), (.+)', line)
if m:
yield GimpleType(gimple_symbol=m.group(1),
printable_name=m.group(2)[1:-1],
gss_symbol=m.group(3))
else:
# print 'UNMATCHED: ', line
assert(line.startswith('#') or line.strip() == '' or line.startswith('GSS_'))
f.close()
# Corresponds to a DEFGSTRUCT macro from gsstruct.def
class GimpleStructType(namedtuple('GimpleStructType', 'enum_value struct_name has_tree_operands')):
def camel_cased_string(self):
return camel_case(self.struct_name)
def iter_gimple_struct_types():
import re
f = open('autogenerated-gimple-types.txt')
for line in f:
# e.g.
# GSS_BASE, gimple_statement_base, false
m = re.match('(GSS_.+), (.+), (.+)', line)
if m:
yield GimpleStructType(enum_value=m.group(1),
struct_name=m.group(2),
has_tree_operands=m.group(3))
else:
# print 'UNMATCHED: ', line
assert(line.startswith('#') or line.strip() == '' or line.startswith('GIMPLE_'))
f.close()
# Corresponds to a DEF_RTL_EXPR macro from rtl.def
class DefRtlExpr(namedtuple('DefRtlExpr', 'ENUM, NAME, FORMAT, CLASS')):
def camel_cased_string(self):
return 'Rtl' + camel_case(self.ENUM)
def iter_rtl_expr_types():
import re
f = open('autogenerated-rtl-types.txt')
for line in f:
# e.g.
# rtl_expr: DEBUG_EXPR, "debug_expr", "0", RTX_OBJ
m = re.match('rtl_expr: (.+), "(.+)", (.*), (.+)', line)
if m:
#print m.groups()
yield DefRtlExpr(ENUM=m.group(1),
NAME=m.group(2),
FORMAT=m.group(3),
CLASS=m.group(4))
else:
#print 'UNMATCHED: %r' % line
assert(line.startswith('#') or line.strip() == '' or line.startswith('GIMPLE_'))
f.close()
| jasonxmueller/gcc-python-plugin | maketreetypes.py | Python | gpl-3.0 | 4,002 |
from django import template
from django.urls import reverse
from django.apps import apps as django_apps
from django.urls.exceptions import NoReverseMatch
register = template.Library()
class ContinuationAppointmentUrlError(Exception):
pass
class ContinuationAppointmentAnchor(template.Node):
"""Returns a reverse url for a continuation appointment
if the appointment does not already exist.
"""
def __init__(self, appointment, dashboard_type, extra_url_context):
self.unresolved_appointment = template.Variable(appointment)
self.unresolved_dashboard_type = template.Variable(dashboard_type)
self.unresolved_extra_url_context = template.Variable(
extra_url_context)
@property
def appointment_model(self):
return django_apps.get_app_config('edc_appointment').appointment_model
def render(self, context):
self.appointment = self.unresolved_appointment.resolve(context)
self.dashboard_type = self.unresolved_dashboard_type.resolve(context)
self.registered_subject = self.appointment.registered_subject
self.extra_url_context = self.unresolved_extra_url_context
if not self.extra_url_context:
self.extra_url_context = ''
# does a continuation appointment exist? instance will be instance+1
visit_code_sequences = []
for appointment in self.appointment_model.objects.filter(
registered_subject=self.appointment.registered_subject):
visit_code_sequences.append(int(appointment.visit_code_sequence))
if (int(self.appointment.visit_code_sequence) + 1) in visit_code_sequences:
anchor = ''
else:
view = (f'admin:{self.appointment._meta.app_label}_'
f'{self.appointment._meta.module_name}_add')
try:
url = reverse(view)
except NoReverseMatch as e:
raise ContinuationAppointmentUrlError(
'ContinuationAppointmentUrl Tag: NoReverseMatch while '
'rendering reverse for {self.appointment._meta.module_name}. '
f'Is model registered in admin? Got {e}.')
else:
# TODO: resolve error when using extra_url_context...give back
# variable name ???
visit_code_sequence = str(
int(self.appointment.visit_code_sequence) + 1)
rev_url = (
f'{url}?next=dashboard_url&dashboard_type={self.dashboard_type}'
f'®istered_subject={self.appointment.registered_subject.pk}'
f'&visit_definition={self.appointment.visit_definition.pk}'
f'&visit_code_sequence={visit_code_sequence}')
anchor = f'<A href="{rev_url}">continuation</A>'
return anchor
@register.tag(name='continuation_appointment_anchor')
def continuation_appointment_anchor(parser, token):
"""Compilation function for renderer ContinuationAppointmentUrl.
"""
try:
_, appointment, dashboard_type, extra_url_context = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires exactly 3 arguments" % token.contents.split()[0])
return ContinuationAppointmentAnchor(appointment, dashboard_type, extra_url_context)
@register.filter(name='appt_type')
def appt_type(value):
"""Filters appointment.appt_type.
"""
if value == 'clinic':
retval = 'Clin'
elif value == 'telephone':
retval = 'Tele'
elif value == 'home':
retval = 'Home'
else:
retval = None
return retval
| botswana-harvard/edc-appointment | edc_appointment/templatetags/appointment_tags.py | Python | gpl-2.0 | 3,699 |
# -*- coding: utf-8 -*-
import unittest, cleancss
from textwrap import dedent
from StringIO import StringIO
class TestConvert(unittest.TestCase):
def test_01_convert(self):
ccss = StringIO()
ccss.write(dedent('''
// Comment
#header, #footer:
margin: 0
padding: 0
font->
family: Verdana, sans-serif
size: .9em // Comment
li:
padding: 0.4em// Comment
margin: 0.8em 0 0.8em
a:
background-image: url('abc.png')
&:hover:
background-color: red
h3:
background-image: url('http://test.com/abc.png')
font-size: 1.2em
p, div.p:
padding: 0.3em
p.meta:
text-align: right
color: #ddd
'''))
expected_result = dedent('''
#header,
#footer {
margin: 0;
padding: 0;
font-family: Verdana, sans-serif;
font-size: .9em;
}
#header li,
#footer li {
padding: 0.4em;
margin: 0.8em 0 0.8em;
}
#header li a,
#footer li a {
background-image: url('abc.png');
}
#header li a:hover,
#footer li a:hover {
background-color: red;
}
#header li h3,
#footer li h3 {
background-image: url('http://test.com/abc.png');
font-size: 1.2em;
}
#header li p,
#header li div.p,
#footer li p,
#footer li div.p {
padding: 0.3em;
}
#header li p.meta,
#footer li p.meta {
text-align: right;
color: #ddd;
}
''').lstrip().replace(" ", "\t")
ccss.seek(0)
self.assertEqual(cleancss.convert(ccss), expected_result)
def test_02_callback(self):
def callback(prop, value):
return [
(prop + "-variant", value)
]
ccss = StringIO()
ccss.write(dedent('''
#header, #footer:
margin: 0
padding: 0
'''))
expected_result = dedent('''
#header,
#footer {
margin-variant: 0;
padding-variant: 0;
}
''').lstrip().replace(" ", "\t")
c = cleancss.Parser(ccss)
c.registerPropertyCallback( callback )
ccss.seek(0)
self.assertEqual(c.toCss(), expected_result)
ccss.seek(0)
self.assertEqual(cleancss.convert(ccss, callback), expected_result)
def test_03_variants_callback(self):
ccss = StringIO()
ccss.write(dedent('''
#header, #footer:
border-radius: 5px
padding: 0
text-overflow: ellipsis
display: box
border-top-left-radius: 5px
-ms-filter: "test"
opacity: 0.5
opacity: notvalid
background: linear-gradient(top left, #000 0%, #FFF 100%)
'''))
expected_result = dedent('''
#header,
#footer {
border-radius: 5px;
-o-border-radius: 5px;
-moz-border-radius: 5px;
-webkit-border-radius: 5px;
padding: 0;
text-overflow: ellipsis;
-o-text-overflow: ellipsis;
display: box;
display: -moz-box;
display: -webkit-box;
border-top-left-radius: 5px;
-moz-border-radius-topleft: 5px;
-webkit-border-top-left-radius: 5px;
-ms-filter: "test";
filter: test;
opacity: 0.5;
filter: alpha(opacity=50);
opacity: notvalid;
background: linear-gradient(top left, #000 0%, #FFF 100%);
background: -o-linear-gradient(top left, #000 0%, #FFF 100%);
background: -moz-linear-gradient(top left, #000 0%, #FFF 100%);
background: -webkit-linear-gradient(top left, #000 0%, #FFF 100%);
}
''').lstrip().replace(" ", "\t")
def test_04_multiline_comments(self):
ccss = StringIO()
ccss.write(dedent('''body:
/* Hi // there */
color: /* inline comment! */ red
/* Multi-
line */ // and a comment
'''))
expected_result = dedent('''
body {
color: red;
}
''').lstrip().replace(" ", "\t")
ccss.seek(0)
self.assertEqual(cleancss.convert(ccss, cleancss.callbacks.browser_variants), expected_result)
if __name__ == "__main__":
unittest.main() | mtorromeo/py-cleancss | tests/__init__.py | Python | bsd-2-clause | 5,236 |
def main():
words = str(input("Enter a English word: ")).split()
for word in words:
print(word[1:] + word[0] + "ay", end = " ")
print ()
main() | cynthiacarter/Week-Four-Assignment | idk.py | Python | mit | 164 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for instance views.
"""
import boto.ec2.instance
import mox
from django.core.urlresolvers import reverse
from django_nova.tests.view_tests.base import BaseProjectViewTests, TEST_PROJECT
TEST_INSTANCE_ID = 'i-abcdefgh'
class InstanceViewTests(BaseProjectViewTests):
def test_index(self):
self.mox.StubOutWithMock(self.project, 'get_instances')
self.project.get_instances().AndReturn([])
self.mox.ReplayAll()
res = self.client.get(reverse('nova_instances', args=[TEST_PROJECT]))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'django_nova/instances/index.html')
self.assertEqual(len(res.context['instances']), 0)
self.mox.VerifyAll()
def test_detail(self):
instance = boto.ec2.instance.Instance()
instance.id = TEST_INSTANCE_ID
instance.displayName = instance.id
instance.displayDescription = instance.id
self.mox.StubOutWithMock(self.project, 'get_instance')
self.project.get_instance(instance.id).AndReturn(instance)
self.mox.StubOutWithMock(self.project, 'get_instances')
self.project.get_instances().AndReturn([instance])
self.mox.ReplayAll()
res = self.client.get(reverse('nova_instances_detail',
args=[TEST_PROJECT, TEST_INSTANCE_ID]))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'django_nova/instances/index.html')
self.assertEqual(res.context['selected_instance'].id, instance.id)
self.mox.VerifyAll()
| sleepsonthefloor/openstack-dashboard | django-nova/src/django_nova/tests/view_tests/instance_tests.py | Python | apache-2.0 | 2,367 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlTryTiny(PerlPackage):
"""Minimal try/catch with proper preservation of $@"""
homepage = "http://search.cpan.org/~ether/Try-Tiny-0.28/lib/Try/Tiny.pm"
url = "http://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Try-Tiny-0.28.tar.gz"
version('0.28', 'e2f8af601a62981aab30df15a6f47475')
| EmreAtes/spack | var/spack/repos/builtin/packages/perl-try-tiny/package.py | Python | lgpl-2.1 | 1,571 |
#!/usr/bin/env python
#
# Copyright 2005,2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
import sys
import wx
from optparse import OptionParser
from gnuradio import gr, audio, blks2, uhd
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import stdgui2, fftsink2, scopesink2, slider, form
from numpy import convolve, array
#import os
#print "pid =", os.getpid()
#raw_input('Press Enter to continue: ')
# ////////////////////////////////////////////////////////////////////////
# Control Stuff
# ////////////////////////////////////////////////////////////////////////
class ptt_block(stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__ (self, frame, panel, vbox, argv)
self.frame = frame
self.space_bar_pressed = False
parser = OptionParser (option_class=eng_option)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option ("-f", "--freq", type="eng_float", default=442.1e6,
help="set Tx and Rx frequency to FREQ", metavar="FREQ")
parser.add_option ("-g", "--rx-gain", type="eng_float", default=None,
help="set rx gain [default=midpoint in dB]")
parser.add_option ("", "--tx-gain", type="eng_float", default=None,
help="set tx gain [default=midpoint in dB]")
parser.add_option("-I", "--audio-input", type="string", default="",
help="pcm input device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option ("-N", "--no-gui", action="store_true", default=False)
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
sys.exit(1)
if options.freq < 1e6:
options.freq *= 1e6
self.txpath = transmit_path(options.args, options.spec,
options.antenna, options.tx_gain,
options.audio_input)
self.rxpath = receive_path(options.args, options.spec,
options.antenna, options.rx_gain,
options.audio_output)
self.connect(self.txpath)
self.connect(self.rxpath)
self._build_gui(frame, panel, vbox, argv, options.no_gui)
self.set_transmit(False)
self.set_freq(options.freq)
self.set_rx_gain(self.rxpath.gain) # update gui
self.set_volume(self.rxpath.volume) # update gui
self.set_squelch(self.rxpath.threshold()) # update gui
def set_transmit(self, enabled):
self.txpath.set_enable(enabled)
self.rxpath.set_enable(not(enabled))
if enabled:
self.frame.SetStatusText ("Transmitter ON", 1)
else:
self.frame.SetStatusText ("Receiver ON", 1)
def set_rx_gain(self, gain):
self.myform['rx_gain'].set_value(gain) # update displayed value
self.rxpath.set_gain(gain)
def set_tx_gain(self, gain):
self.txpath.set_gain(gain)
def set_squelch(self, threshold):
self.rxpath.set_squelch(threshold)
self.myform['squelch'].set_value(self.rxpath.threshold())
def set_volume (self, vol):
self.rxpath.set_volume(vol)
self.myform['volume'].set_value(self.rxpath.volume)
#self.update_status_bar ()
def set_freq(self, freq):
r1 = self.txpath.set_freq(freq)
r2 = self.rxpath.set_freq(freq)
#print "txpath.set_freq =", r1
#print "rxpath.set_freq =", r2
if r1 and r2:
self.myform['freq'].set_value(freq) # update displayed value
return r1 and r2
def _build_gui(self, frame, panel, vbox, argv, no_gui):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
self.panel = panel
# FIXME This REALLY needs to be replaced with a hand-crafted button
# that sends both button down and button up events
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((10,0), 1)
self.status_msg = wx.StaticText(panel, -1, "Press Space Bar to Transmit")
of = self.status_msg.GetFont()
self.status_msg.SetFont(wx.Font(15, of.GetFamily(), of.GetStyle(), of.GetWeight()))
hbox.Add(self.status_msg, 0, wx.ALIGN_CENTER)
hbox.Add((10,0), 1)
vbox.Add(hbox, 0, wx.EXPAND | wx.ALIGN_CENTER)
panel.Bind(wx.EVT_KEY_DOWN, self._on_key_down)
panel.Bind(wx.EVT_KEY_UP, self._on_key_up)
panel.Bind(wx.EVT_KILL_FOCUS, self._on_kill_focus)
panel.SetFocus()
if 1 and not(no_gui):
rx_fft = fftsink2.fft_sink_c(panel, title="Rx Input", fft_size=512,
sample_rate=self.rxpath.if_rate,
ref_level=80, y_per_div=20)
self.connect (self.rxpath.u, rx_fft)
vbox.Add (rx_fft.win, 1, wx.EXPAND)
if 1 and not(no_gui):
rx_fft = fftsink2.fft_sink_c(panel, title="Post s/w Resampler",
fft_size=512, sample_rate=self.rxpath.quad_rate,
ref_level=80, y_per_div=20)
self.connect (self.rxpath.resamp, rx_fft)
vbox.Add (rx_fft.win, 1, wx.EXPAND)
if 0 and not(no_gui):
foo = scopesink2.scope_sink_f(panel, title="Squelch",
sample_rate=32000)
self.connect (self.rxpath.fmrx.div, (foo,0))
self.connect (self.rxpath.fmrx.gate, (foo,1))
self.connect (self.rxpath.fmrx.squelch_lpf, (foo,2))
vbox.Add (foo.win, 1, wx.EXPAND)
if 0 and not(no_gui):
tx_fft = fftsink2.fft_sink_c(panel, title="Tx Output",
fft_size=512, sample_rate=self.txpath.usrp_rate)
self.connect (self.txpath.amp, tx_fft)
vbox.Add (tx_fft.win, 1, wx.EXPAND)
# add control area at the bottom
self.myform = myform = form.form()
# first row
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0, 0)
myform['freq'] = form.float_field(
parent=panel, sizer=hbox, label="Freq", weight=1,
callback=myform.check_input_and_call(_form_set_freq, self._set_status_msg))
hbox.Add((5,0), 0, 0)
vbox.Add(hbox, 0, wx.EXPAND)
# second row
hbox = wx.BoxSizer(wx.HORIZONTAL)
myform['volume'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Volume",
weight=3, range=self.rxpath.volume_range(),
callback=self.set_volume)
hbox.Add((5,0), 0)
myform['squelch'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Squelch",
weight=3, range=self.rxpath.squelch_range(),
callback=self.set_squelch)
g = self.rxpath.u.get_gain_range()
hbox.Add((5,0), 0)
myform['rx_gain'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Rx Gain",
weight=3, range=(g.start(), g.stop(), g.step()),
callback=self.set_rx_gain)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
self._build_subpanel(vbox)
def _build_subpanel(self, vbox_arg):
# build a secondary information panel (sometimes hidden)
# FIXME figure out how to have this be a subpanel that is always
# created, but has its visibility controlled by foo.Show(True/False)
#if not(self.show_debug_info):
# return
panel = self.panel
vbox = vbox_arg
myform = self.myform
#panel = wx.Panel(self.panel, -1)
#vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
#myform['decim'] = form.static_float_field(
# parent=panel, sizer=hbox, label="Decim")
#hbox.Add((5,0), 1)
#myform['fs@usb'] = form.static_float_field(
# parent=panel, sizer=hbox, label="Fs@USB")
#hbox.Add((5,0), 1)
#myform['dbname'] = form.static_text_field(
# parent=panel, sizer=hbox)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
def _set_status_msg(self, msg, which=0):
self.frame.GetStatusBar().SetStatusText(msg, which)
def _on_key_down(self, evt):
# print "key_down:", evt.m_keyCode
if evt.m_keyCode == wx.WXK_SPACE and not(self.space_bar_pressed):
self.space_bar_pressed = True
self.set_transmit(True)
def _on_key_up(self, evt):
# print "key_up", evt.m_keyCode
if evt.m_keyCode == wx.WXK_SPACE:
self.space_bar_pressed = False
self.set_transmit(False)
def _on_kill_focus(self, evt):
# if we lose the keyboard focus, turn off the transmitter
self.space_bar_pressed = False
self.set_transmit(False)
# ////////////////////////////////////////////////////////////////////////
# Transmit Path
# ////////////////////////////////////////////////////////////////////////
class transmit_path(gr.hier_block2):
def __init__(self, args, spec, antenna, gain, audio_input):
gr.hier_block2.__init__(self, "transmit_path",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(0, 0, 0)) # Output signature
self.u = uhd.usrp_sink(device_addr=args, stream_args=uhd.stream_args('fc32'))
# Set the subdevice spec
if(spec):
self.u.set_subdev_spec(spec, 0)
# Set the antenna
if(antenna):
self.u.set_antenna(antenna, 0)
self.if_rate = 320e3
self.audio_rate = 32e3
self.u.set_samp_rate(self.if_rate)
dev_rate = self.u.get_samp_rate()
self.audio_gain = 10
self.normal_gain = 32000
self.audio = audio.source(int(self.audio_rate), audio_input)
self.audio_amp = gr.multiply_const_ff(self.audio_gain)
lpf = gr.firdes.low_pass (1, # gain
self.audio_rate, # sampling rate
3800, # low pass cutoff freq
300, # width of trans. band
gr.firdes.WIN_HANN) # filter type
hpf = gr.firdes.high_pass (1, # gain
self.audio_rate, # sampling rate
325, # low pass cutoff freq
50, # width of trans. band
gr.firdes.WIN_HANN) # filter type
audio_taps = convolve(array(lpf),array(hpf))
self.audio_filt = gr.fir_filter_fff(1,audio_taps)
self.pl = blks2.ctcss_gen_f(self.audio_rate,123.0)
self.add_pl = gr.add_ff()
self.connect(self.pl,(self.add_pl,1))
self.fmtx = blks2.nbfm_tx(self.audio_rate, self.if_rate)
self.amp = gr.multiply_const_cc (self.normal_gain)
rrate = dev_rate / self.if_rate
self.resamp = blks2.pfb_arb_resampler_ccf(rrate)
self.connect(self.audio, self.audio_amp, self.audio_filt,
(self.add_pl,0), self.fmtx, self.amp,
self.resamp, self.u)
if gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
gain = float(g.start() + g.stop())/2.0
self.set_gain(gain)
self.set_enable(False)
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
@param target_freq: frequency in Hz
@rypte: bool
"""
r = self.u.set_center_freq(target_freq)
if r:
return True
return False
def set_gain(self, gain):
self.gain = gain
self.u.set_gain(gain)
def set_enable(self, enable):
if enable:
self.amp.set_k (self.normal_gain)
else:
self.amp.set_k (0)
# ////////////////////////////////////////////////////////////////////////
# Receive Path
# ////////////////////////////////////////////////////////////////////////
class receive_path(gr.hier_block2):
def __init__(self, args, gain, audio_output):
gr.hier_block2.__init__(self, "receive_path",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(0, 0, 0)) # Output signature
self.u = uhd.usrp_source(device_addr=args,
io_type=uhd.io_type.COMPLEX_FLOAT32,
num_channels=1)
self.if_rate = 256e3
self.quad_rate = 64e3
self.audio_rate = 32e3
self.u.set_samp_rate(self.if_rate)
dev_rate = self.u.get_samp_rate()
# Create filter to get actual channel we want
nfilts = 32
chan_coeffs = gr.firdes.low_pass (nfilts, # gain
nfilts*dev_rate, # sampling rate
13e3, # low pass cutoff freq
4e3, # width of trans. band
gr.firdes.WIN_HANN) # filter type
rrate = self.quad_rate / dev_rate
self.resamp = blks2.pfb_arb_resampler_ccf(rrate, chan_coeffs, nfilts)
# instantiate the guts of the single channel receiver
self.fmrx = blks2.nbfm_rx(self.audio_rate, self.quad_rate)
# standard squelch block
self.squelch = blks2.standard_squelch(self.audio_rate)
# audio gain / mute block
self._audio_gain = gr.multiply_const_ff(1.0)
# sound card as final sink
audio_sink = audio.sink (int(self.audio_rate), audio_output)
# now wire it all together
self.connect (self.u, self.resamp, self.fmrx, self.squelch,
self._audio_gain, audio_sink)
if gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
gain = float(g.start() + g.stop())/2.0
self.enabled = True
self.set_gain(gain)
v = self.volume_range()
self.set_volume((v[0]+v[1])/2)
s = self.squelch_range()
self.set_squelch((s[0]+s[1])/2)
# Set the subdevice spec
if(spec):
self.u.set_subdev_spec(spec, 0)
# Set the antenna
if(antenna):
self.u.set_antenna(antenna, 0)
def volume_range(self):
return (-20.0, 0.0, 0.5)
def set_volume (self, vol):
g = self.volume_range()
self.volume = max(g[0], min(g[1], vol))
self._update_audio_gain()
def set_enable(self, enable):
self.enabled = enable
self._update_audio_gain()
def _update_audio_gain(self):
if self.enabled:
self._audio_gain.set_k(10**(self.volume/10))
else:
self._audio_gain.set_k(0)
def squelch_range(self):
return self.squelch.squelch_range()
def set_squelch(self, threshold):
print "SQL =", threshold
self.squelch.set_threshold(threshold)
def threshold(self):
return self.squelch.threshold()
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
@param target_freq: frequency in Hz
@rypte: bool
"""
r = self.u.set_center_freq(target_freq)
if r:
return True
return False
def set_gain(self, gain):
self.gain = gain
self.u.set_gain(gain)
# ////////////////////////////////////////////////////////////////////////
# Main
# ////////////////////////////////////////////////////////////////////////
def main():
app = stdgui2.stdapp(ptt_block, "NBFM Push to Talk")
app.MainLoop()
if __name__ == '__main__':
main()
| owaiskhan/Retransmission-Combining | gr-uhd/examples/usrp_nbfm_ptt.py | Python | gpl-3.0 | 17,742 |
import django.views.generic as base_generic
from core.mixins import LoginPermissionRequiredMixin, update_permissions
from core.views import generic
from django.core.urlresolvers import reverse
from jsonattrs.mixins import JsonAttrsMixin, template_xlang_labels
from organization.views import mixins as organization_mixins
from party.messages import TENURE_REL_CREATE
from resources.forms import AddResourceFromLibraryForm
from resources.views import mixins as resource_mixins
from questionnaires.models import Question, QuestionOption
from . import mixins
from .. import messages as error_messages
from .. import forms
class LocationsList(LoginPermissionRequiredMixin,
mixins.SpatialQuerySetMixin,
organization_mixins.ProjectAdminCheckMixin,
generic.ListView):
template_name = 'spatial/location_map.html'
permission_required = 'spatial.list'
permission_denied_message = error_messages.SPATIAL_LIST
def get_perms_objects(self):
return [self.get_project()]
class LocationsAdd(LoginPermissionRequiredMixin,
mixins.SpatialQuerySetMixin,
organization_mixins.ProjectAdminCheckMixin,
generic.CreateView):
form_class = forms.LocationForm
template_name = 'spatial/location_add.html'
permission_required = update_permissions('spatial.add')
permission_denied_message = error_messages.SPATIAL_CREATE
def get(self, request, *args, **kwargs):
referrer = request.META.get('HTTP_REFERER', None)
if referrer:
current_url = reverse('locations:add', kwargs=self.kwargs)
if current_url not in referrer:
request.session['cancel_add_location_url'] = referrer
else:
# In case the browser does not send any referrer
request.session['cancel_add_location_url'] = reverse(
'organization:project-dashboard', kwargs=self.kwargs)
return super().get(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
cancel_url = self.request.session.get('cancel_add_location_url', None)
context['cancel_url'] = cancel_url or reverse(
'organization:project-dashboard', kwargs=self.kwargs)
return context
def get_perms_objects(self):
return [self.get_project()]
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['project'] = self.get_project()
return kwargs
class LocationDetail(LoginPermissionRequiredMixin,
JsonAttrsMixin,
mixins.SpatialUnitObjectMixin,
organization_mixins.ProjectAdminCheckMixin,
resource_mixins.DetachableResourcesListMixin,
generic.DetailView):
template_name = 'spatial/location_detail.html'
permission_required = 'spatial.view'
permission_denied_message = error_messages.SPATIAL_VIEW
attributes_field = 'attributes'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['relationships'] = self.object.tenurerelationship_set.all(
).select_related('party').defer('party__attributes')
project = context['object']
if project.current_questionnaire:
try:
question = Question.objects.get(
name='location_type',
questionnaire_id=project.current_questionnaire)
context['type_labels'] = template_xlang_labels(
question.label_xlat)
except Question.DoesNotExist:
pass
else:
try:
option = QuestionOption.objects.get(
question=question,
name=context['location'].type)
context['type_choice_labels'] = template_xlang_labels(
option.label_xlat)
except QuestionOption.DoesNotExist:
pass
try:
tenure_type = Question.objects.get(
name='tenure_type',
questionnaire_id=project.current_questionnaire)
tenure_opts = QuestionOption.objects.filter(
question=tenure_type)
tenure_opts = dict(tenure_opts.values_list(
'name', 'label_xlat'))
for rel in context['relationships']:
rel.type_labels = template_xlang_labels(
tenure_opts.get(rel.tenure_type))
except Question.DoesNotExist:
pass
location = context['location']
user = self.request.user
context['is_allowed_edit_location'] = user.has_perm('spatial.update',
location)
context['is_allowed_delete_location'] = user.has_perm('spatial.delete',
location)
return context
class LocationEdit(LoginPermissionRequiredMixin,
mixins.SpatialUnitObjectMixin,
organization_mixins.ProjectAdminCheckMixin,
generic.UpdateView):
template_name = 'spatial/location_edit.html'
form_class = forms.LocationForm
permission_required = update_permissions('spatial.edit')
permission_denied_message = error_messages.SPATIAL_UPDATE
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['project'] = self.get_project()
return kwargs
class LocationDelete(LoginPermissionRequiredMixin,
mixins.SpatialUnitObjectMixin,
organization_mixins.ProjectAdminCheckMixin,
generic.DeleteView):
template_name = 'spatial/location_delete.html'
permission_required = update_permissions('spatial.delete')
permission_denied_message = error_messages.SPATIAL_DELETE
def get_success_url(self):
kwargs = self.kwargs
del kwargs['location']
return reverse('locations:list', kwargs=self.kwargs)
class LocationResourceAdd(LoginPermissionRequiredMixin,
mixins.SpatialUnitResourceMixin,
base_generic.edit.FormMixin,
organization_mixins.ProjectAdminCheckMixin,
generic.DetailView):
template_name = 'spatial/resources_add.html'
form_class = AddResourceFromLibraryForm
permission_required = update_permissions('spatial.resources.add')
permission_denied_message = error_messages.SPATIAL_ADD_RESOURCE
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
form.save()
return self.form_valid(form)
class LocationResourceNew(LoginPermissionRequiredMixin,
mixins.SpatialUnitResourceMixin,
organization_mixins.ProjectAdminCheckMixin,
generic.CreateView):
template_name = 'spatial/resources_new.html'
permission_required = update_permissions('spatial.resources.add')
permission_denied_message = error_messages.SPATIAL_ADD_RESOURCE
class TenureRelationshipAdd(LoginPermissionRequiredMixin,
mixins.SpatialUnitRelationshipMixin,
organization_mixins.ProjectAdminCheckMixin,
generic.CreateView):
template_name = 'spatial/relationship_add.html'
form_class = forms.TenureRelationshipForm
permission_required = update_permissions('tenure_rel.create')
permission_denied_message = TENURE_REL_CREATE
def get_perms_objects(self):
return [self.get_project()]
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['initial'] = {
'new_entity': not self.get_project().parties.exists(),
}
return kwargs
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
return context
def get_success_url(self):
return (reverse('locations:detail', kwargs=self.kwargs) +
'#relationships')
| Cadasta/cadasta-platform | cadasta/spatial/views/default.py | Python | agpl-3.0 | 8,398 |
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Bram Cohen, Uoti Urpala
import os
import sys
import socket
import signal
import struct
import thread
from bisect import insort
from cStringIO import StringIO
from traceback import print_exc
from errno import EWOULDBLOCK, ENOBUFS, EINTR
from BitTorrent.platform import bttime
from BitTorrent import WARNING, CRITICAL, FAQ_URL
from BitTorrent.defer import Deferred
try:
from select import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
timemult = 1000
except ImportError:
from BitTorrent.selectpoll import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
timemult = 1
NOLINGER = struct.pack('ii', 1, 0)
class Handler(object):
# there is only a semantic difference between "made" and "started".
# I prefer "started"
def connection_started(self, s):
self.connection_made(s)
def connection_made(self, s):
pass
def connection_lost(self, s):
pass
# Maybe connection_lost should just have a default 'None' exception parameter
def connection_failed(self, addr, exception):
pass
def connection_flushed(self, s):
pass
def data_came_in(self, addr, datagram):
pass
class SingleSocket(object):
def __init__(self, rawserver, sock, handler, context, addr=None):
self.rawserver = rawserver
self.socket = sock
self.handler = handler
self.buffer = []
self.last_hit = bttime()
self.fileno = sock.fileno()
self.connected = False
self.context = context
self.ip = None
self.port = None
if isinstance(addr, basestring):
# UNIX socket, not really ip
self.ip = addr
else:
peername = (None, None)
try:
peername = self.socket.getpeername()
except socket.error, e:
# UDP raises (107, 'Transport endpoint is not connected')
# but so can a TCP socket we just got from start_connection,
# in which case addr is set and we use it later.
if (e[0] == 107) and (addr == None):
# lies.
# the peer endpoint should be gathered from the
# tuple passed to data_came_in
try:
peername = self.socket.getsockname()
except socket.error, e:
pass
# this is awesome!
# max prefers a value over None, so this is a common case:
# max(('ip', None), ('ip', 1234)) => ('ip', 1234)
# or the default case:
# max(('ip', None), None) => ('ip', None)
self.ip, self.port = max(peername, addr)
def close(self):
sock = self.socket
self.socket = None
self.buffer = []
del self.rawserver.single_sockets[self.fileno]
self.rawserver.poll.unregister(sock)
self.handler = None
if self.rawserver.config['close_with_rst']:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, NOLINGER)
sock.close()
def shutdown(self, val):
self.socket.shutdown(val)
def is_flushed(self):
return len(self.buffer) == 0
def write(self, s):
assert self.socket is not None
self.buffer.append(s)
if len(self.buffer) == 1:
self.try_write()
def try_write(self):
if self.connected:
try:
while self.buffer != []:
amount = self.socket.send(self.buffer[0])
if amount != len(self.buffer[0]):
if amount != 0:
self.buffer[0] = self.buffer[0][amount:]
break
del self.buffer[0]
except socket.error, e:
code, msg = e
if code != EWOULDBLOCK:
self.rawserver.dead_from_write.append(self)
return
if self.buffer == []:
self.rawserver.poll.register(self.socket, POLLIN)
else:
self.rawserver.poll.register(self.socket, POLLIN | POLLOUT)
def default_error_handler(level, message):
print message
class RawServer(object):
def __init__(self, doneflag, config, noisy=True,
errorfunc=default_error_handler, tos=0):
self.config = config
self.tos = tos
self.poll = poll()
# {socket: SingleSocket}
self.single_sockets = {}
self.udp_sockets = {}
self.dead_from_write = []
self.doneflag = doneflag
self.noisy = noisy
self.errorfunc = errorfunc
self.funcs = []
self.externally_added_tasks = []
self.listening_handlers = {}
self.serversockets = {}
self.live_contexts = {None : True}
self.ident = thread.get_ident()
self.to_start = []
self.add_task(self.scan_for_timeouts, config['timeout_check_interval'])
if sys.platform.startswith('win'):
# Windows doesn't support pipes with select(). Just prevent sleeps
# longer than a second instead of proper wakeup for now.
self.wakeupfds = (None, None)
self._wakeup()
else:
self.wakeupfds = os.pipe()
self.poll.register(self.wakeupfds[0], POLLIN)
def _wakeup(self):
self.add_task(self._wakeup, 1)
def add_context(self, context):
self.live_contexts[context] = True
def remove_context(self, context):
del self.live_contexts[context]
self.funcs = [x for x in self.funcs if x[3] != context]
def add_task(self, func, delay, args=(), context=None):
assert thread.get_ident() == self.ident
assert type(args) == list or type(args) == tuple
if context in self.live_contexts:
insort(self.funcs, (bttime() + delay, func, args, context))
def external_add_task(self, func, delay, args=(), context=None):
assert type(args) == list or type(args) == tuple
self.externally_added_tasks.append((func, delay, args, context))
# Wake up the RawServer thread in case it's sleeping in poll()
if self.wakeupfds[1] is not None:
os.write(self.wakeupfds[1], 'X')
def scan_for_timeouts(self):
self.add_task(self.scan_for_timeouts,
self.config['timeout_check_interval'])
t = bttime() - self.config['socket_timeout']
tokill = []
for s in [s for s in self.single_sockets.values() if s not in self.udp_sockets.keys()]:
if s.last_hit < t:
tokill.append(s)
for k in tokill:
if k.socket is not None:
self._close_socket(k)
def create_unixserversocket(filename):
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server.setblocking(0)
server.bind(filename)
server.listen(5)
return server
create_unixserversocket = staticmethod(create_unixserversocket)
def create_serversocket(port, bind='', reuse=False, tos=0):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if reuse and os.name != 'nt':
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.setblocking(0)
if tos != 0:
try:
server.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, tos)
except:
pass
server.bind((bind, port))
server.listen(5)
return server
create_serversocket = staticmethod(create_serversocket)
def create_udpsocket(port, bind='', reuse=False, tos=0):
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if reuse and os.name != 'nt':
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.setblocking(0)
if tos != 0:
try:
server.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, tos)
except:
pass
server.bind((bind, port))
return server
create_udpsocket = staticmethod(create_udpsocket)
def start_listening(self, serversocket, handler, context=None):
self.listening_handlers[serversocket.fileno()] = (handler, context)
self.serversockets[serversocket.fileno()] = serversocket
self.poll.register(serversocket, POLLIN)
def start_listening_udp(self, serversocket, handler, context=None):
self.listening_handlers[serversocket.fileno()] = (handler, context)
nss = SingleSocket(self, serversocket, handler, context)
self.single_sockets[serversocket.fileno()] = nss
self.udp_sockets[nss] = 1
self.poll.register(serversocket, POLLIN)
def stop_listening(self, serversocket):
del self.listening_handlers[serversocket.fileno()]
del self.serversockets[serversocket.fileno()]
self.poll.unregister(serversocket)
def stop_listening_udp(self, serversocket):
del self.listening_handlers[serversocket.fileno()]
del self.single_sockets[serversocket.fileno()]
l = [s for s in self.udp_sockets.keys() if s.socket == serversocket]
del self.udp_sockets[l[0]]
self.poll.unregister(serversocket)
def start_connection(self, dns, handler=None, context=None, do_bind=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
bindaddr = do_bind and self.config['bind']
if bindaddr:
sock.bind((bindaddr, 0))
if self.tos != 0:
try:
sock.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, self.tos)
except:
pass
try:
sock.connect_ex(dns)
except socket.error:
sock.close()
raise
except Exception, e:
sock.close()
raise socket.error(str(e))
self.poll.register(sock, POLLIN)
s = SingleSocket(self, sock, handler, context, dns)
self.single_sockets[sock.fileno()] = s
return s
def _add_pending_connection(self, addr):
pass
def _remove_pending_connection(self, addr):
pass
def async_start_connection(self, dns, handler=None, context=None, do_bind=True):
self.to_start.insert(0, (dns, handler, context, do_bind))
self._start_connection()
return True
def _start_connection(self):
dns, handler, context, do_bind = self.to_start.pop()
try:
s = self.start_connection(dns, handler, context, do_bind)
except Exception, e:
handler.connection_failed(dns, e)
else:
handler.connection_started(s)
def wrap_socket(self, sock, handler, context=None, ip=None, port=None):
sock.setblocking(0)
self.poll.register(sock, POLLIN)
s = SingleSocket(self, sock, handler, context, (ip, port))
self.single_sockets[sock.fileno()] = s
return s
# must be called from the main thread
def install_sigint_handler(self):
try:
signal.signal(signal.SIGINT, self._handler)
except ValueError: #if we weren't the main thread...
pass
def _handler(self, signum, frame):
self.external_add_task(self.doneflag.set, 0)
# Allow pressing ctrl-c multiple times to raise KeyboardInterrupt,
# in case the program is in an infinite loop
signal.signal(signal.SIGINT, signal.default_int_handler)
def _handle_events(self, events):
for sock, event in events:
if sock in self.serversockets:
s = self.serversockets[sock]
if event & (POLLHUP | POLLERR) != 0:
try:
self.poll.unregister(s)
s.close()
except socket.error, e:
self.errorfunc(WARNING, _("failed to unregister or close server socket: %s") % str(e))
self.errorfunc(CRITICAL, _("lost server socket"))
else:
handler, context = self.listening_handlers[sock]
try:
newsock, addr = s.accept()
except socket.error, e:
continue
try:
newsock.setblocking(0)
nss = SingleSocket(self, newsock, handler, context, addr)
self.single_sockets[newsock.fileno()] = nss
self.poll.register(newsock, POLLIN)
self._make_wrapped_call(handler. \
connection_made, (nss,), context=context)
except socket.error, e:
self.errorfunc(WARNING,
_("Error handling accepted connection: ") +
str(e))
else:
s = self.single_sockets.get(sock)
if s is None:
if sock == self.wakeupfds[0]:
# Another thread wrote this just to wake us up.
os.read(sock, 1)
continue
s.connected = True
if event & POLLERR:
self._close_socket(s)
continue
if event & (POLLIN | POLLHUP):
s.last_hit = bttime()
try:
data, addr = s.socket.recvfrom(100000)
except socket.error, e:
code, msg = e
if code != EWOULDBLOCK:
self._close_socket(s)
continue
if data == '' and not self.udp_sockets.has_key(s):
self._close_socket(s)
else:
if not self.udp_sockets.has_key(s):
self._make_wrapped_call(s.handler.data_came_in,
(s, data), s)
else:
self._make_wrapped_call(s.handler.data_came_in,
(addr, data), s)
# data_came_in could have closed the socket (s.socket = None)
if event & POLLOUT and s.socket is not None:
s.try_write()
if s.is_flushed():
self._make_wrapped_call(s.handler.connection_flushed,
(s,), s)
def _pop_externally_added(self):
while self.externally_added_tasks:
task = self.externally_added_tasks.pop(0)
self.add_task(*task)
"""Modified for Kamaelia compatiblity - basically allowing the wrapper component to get a 'time-slice' for signal processing"""
def listen_forever(self, wrappercomponent = None):
ret = 0
self.ident = thread.get_ident()
"""The main loop in all its glory"""
while not self.doneflag.isSet() and not ret:
ret = self.listen_once()
if wrappercomponent:
wrappercomponent.checkInboxes()
def listen_once(self, period=1e9):
try:
self._pop_externally_added()
if self.funcs:
period = self.funcs[0][0] - bttime()
if period < 0:
period = 0
events = self.poll.poll(period * timemult)
if self.doneflag.isSet():
return 0
while self.funcs and self.funcs[0][0] <= bttime():
garbage, func, args, context = self.funcs.pop(0)
self._make_wrapped_call(func, args, context=context)
self._close_dead()
self._handle_events(events)
if self.doneflag.isSet():
return 0
self._close_dead()
except error, e:
if self.doneflag.isSet():
return 0
# I can't find a coherent explanation for what the behavior
# should be here, and people report conflicting behavior,
# so I'll just try all the possibilities
code = None
if hasattr(e, '__getitem__'):
code = e[0]
else:
code = e
if code == ENOBUFS:
# log the traceback so we can see where the exception is coming from
print_exc(file = sys.stderr)
self.errorfunc(CRITICAL,
_("Have to exit due to the TCP stack flaking "
"out. Please see the FAQ at %s") % FAQ_URL)
return -1
elif code in (EINTR,):
# add other ignorable error codes here
pass
else:
self.errorfunc(CRITICAL, str(e))
return 0
except KeyboardInterrupt:
print_exc()
return -1
except:
data = StringIO()
print_exc(file=data)
self.errorfunc(CRITICAL, data.getvalue())
return 0
def _make_wrapped_call(self, function, args, socket=None, context=None):
try:
function(*args)
except KeyboardInterrupt:
raise
except Exception, e: # hopefully nothing raises strings
# Incoming sockets can be assigned to a particular torrent during
# a data_came_in call, and it's possible (though not likely) that
# there could be a torrent-specific exception during the same call.
# Therefore read the context after the call.
if socket is not None:
context = socket.context
if self.noisy and context is None:
data = StringIO()
print_exc(file=data)
self.errorfunc(CRITICAL, data.getvalue())
if context is not None:
context.got_exception(e)
def _close_dead(self):
while len(self.dead_from_write) > 0:
old = self.dead_from_write
self.dead_from_write = []
for s in old:
if s.socket is not None:
self._close_socket(s)
def _close_socket(self, s):
sock = s.socket.fileno()
if self.config['close_with_rst']:
s.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, NOLINGER)
s.socket.close()
self.poll.unregister(sock)
del self.single_sockets[sock]
s.socket = None
self._make_wrapped_call(s.handler.connection_lost, (s,), s)
s.handler = None
| sparkslabs/kamaelia_ | Sketches/RJL/bittorrent/BitTorrent/BitTorrent/RawServer.py | Python | apache-2.0 | 19,367 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .copy_source import CopySource
class SparkSource(CopySource):
"""A copy activity Spark Server source.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param source_retry_count: Source retry count. Type: integer (or
Expression with resultType integer).
:type source_retry_count: object
:param source_retry_wait: Source retry wait. Type: string (or Expression
with resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type source_retry_wait: object
:param type: Constant filled by server.
:type type: str
:param query: A query to retrieve data from source. Type: string (or
Expression with resultType string).
:type query: object
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'source_retry_count': {'key': 'sourceRetryCount', 'type': 'object'},
'source_retry_wait': {'key': 'sourceRetryWait', 'type': 'object'},
'type': {'key': 'type', 'type': 'str'},
'query': {'key': 'query', 'type': 'object'},
}
def __init__(self, additional_properties=None, source_retry_count=None, source_retry_wait=None, query=None):
super(SparkSource, self).__init__(additional_properties=additional_properties, source_retry_count=source_retry_count, source_retry_wait=source_retry_wait)
self.query = query
self.type = 'SparkSource'
| lmazuel/azure-sdk-for-python | azure-mgmt-datafactory/azure/mgmt/datafactory/models/spark_source.py | Python | mit | 2,072 |
"""525. Contiguous Array
https://leetcode.com/problems/contiguous-array/
Given a binary array nums, return the maximum length of a contiguous subarray
with an equal number of 0 and 1.
Example 1:
Input: nums = [0,1]
Output: 2
Explanation: [0, 1] is the longest contiguous subarray with an equal number
of 0 and 1.
Example 2:
Input: nums = [0,1,0]
Output: 2
Explanation: [0, 1] (or [1, 0]) is a longest contiguous subarray with equal
number of 0 and 1.
Constraints:
1 <= nums.length <= 10^5
nums[i] is either 0 or 1.
"""
from typing import List
class Solution:
def find_max_length(self, nums: List[int]) -> int:
for i in range(len(nums)):
nums[i] = (1 if nums[i] == 1 else -1) + (nums[i - 1] if i > 0 else 0)
store = {0: -1}
ans = 0
for i in range(len(nums)):
if nums[i] in store:
ans = max(ans, i - store[nums[i]])
else:
store[nums[i]] = i
return ans
| isudox/leetcode-solution | python-algorithm/leetcode/problem_525.py | Python | mit | 971 |
# coding=utf-8
# Copyright 2022 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tests for pywrap_fast_wordpiece_tokenizer_model_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow_text.core.pybinds import pywrap_fast_wordpiece_tokenizer_model_builder
EXPECTED_MODEL_BUFFER_PATH = "third_party/tensorflow_text/python/ops/test_data/fast_wordpiece_tokenizer_model.fb"
class PywrapFastWordpieceBuilderTest(test_util.TensorFlowTestCase):
def test_build(self):
vocab = [
"a", "abc", "abcdefghi", "##de", "##defgxy", "##deh", "##f", "##ghz",
"<unk>"
]
max_bytes_per_token = 100
suffix_indicator = "##"
unk_token = "<unk>"
expected_model_buffer = gfile.GFile(EXPECTED_MODEL_BUFFER_PATH, "rb").read()
self.assertEqual(
pywrap_fast_wordpiece_tokenizer_model_builder
.build_fast_wordpiece_model(
vocab, max_bytes_per_token, suffix_indicator, unk_token, True,
False),
expected_model_buffer)
def test_build_throw_exception_unk_token_not_in_vocab(self):
vocab = [
"a", "abc", "abcdefghi", "##de", "##defgxy", "##deh", "##f", "##ghz"
]
max_bytes_per_token = 100
suffix_indicator = "##"
unk_token = "<unk>"
with self.assertRaisesRegex(RuntimeError,
"Cannot find unk_token in the vocab!"):
(pywrap_fast_wordpiece_tokenizer_model_builder
.build_fast_wordpiece_model(
vocab, max_bytes_per_token, suffix_indicator, unk_token, True,
False))
if __name__ == "__main__":
test.main()
| tensorflow/text | tensorflow_text/core/pybinds/pywrap_fast_wordpiece_tokenizer_model_builder_test.py | Python | apache-2.0 | 2,309 |
# -*- coding: utf-8 -*-
from . import mrp_production
from . import procurement_order
from . import sale_order
| kittiu/sale-workflow | sale_mrp_link/models/__init__.py | Python | agpl-3.0 | 111 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.keras.utils.data_utils import OrderedEnqueuer
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.data_utils import SequenceEnqueuer
from tensorflow.python.keras.utils.generic_utils import class_and_config_for_serialized_keras_object
from tensorflow.python.keras.utils.generic_utils import custom_object_scope
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import get_custom_objects
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.generic_utils import serialize_keras_class_and_config
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras.utils.io_utils import HDF5Matrix
from tensorflow.python.keras.utils.layer_utils import convert_all_kernels_in_model
from tensorflow.python.keras.utils.layer_utils import get_source_inputs
from tensorflow.python.keras.utils.layer_utils import print_summary
from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model
from tensorflow.python.keras.utils.np_utils import normalize
from tensorflow.python.keras.utils.np_utils import to_categorical
from tensorflow.python.keras.utils.vis_utils import model_to_dot
from tensorflow.python.keras.utils.vis_utils import plot_model
del absolute_import
del division
del print_function
| ghchinoy/tensorflow | tensorflow/python/keras/utils/__init__.py | Python | apache-2.0 | 2,463 |
"""
A script to automate installation of tool repositories from a Galaxy Tool Shed
into an instance of Galaxy.
Galaxy instance details and the installed tools can be provided in one of three
ways:
1. In the YAML format via dedicated files (see ``tool_list.yaml.sample`` for a
sample of such a file)
2. On the command line as dedicated script options (see the usage help).
3. As a single composite parameter to the script. The parameter must be a
single, YAML-formatted string with the keys corresponding to the keys
available for use in the YAML formatted file (for example:
`--yaml_tool "{'owner': 'kellrott', 'tool_shed_url':
'https://testtoolshed.g2.bx.psu.edu', 'tool_panel_section_id':
'peak_calling', 'name': 'synapse_interface'}"`).
Only one of the methods can be used with each invocation of the script but if
more than one are provided are provided, precedence will correspond to order
of the items in the list above.
When installing tools, Galaxy expects any `tool_panel_section_id` provided when
installing a tool to already exist in the configuration. If the section
does not exist, the tool will be installed outside any section. See
`shed_tool_conf.xml.sample` in this directory for a sample of such file. Before
running this script to install the tools, make sure to place such file into
Galaxy's configuration directory and set Galaxy configuration option
`tool_config_file` to include it.
Usage:
python install_tool_shed_tools.py [-h]
Required libraries:
bioblend, pyyaml
"""
import datetime as dt
import logging
import time
import yaml
from argparse import ArgumentParser
from bioblend.galaxy import GalaxyInstance
from bioblend.galaxy.toolshed import ToolShedClient
from bioblend.toolshed import ToolShedInstance
from bioblend.galaxy.client import ConnectionError
# Omit (most of the) logging by external libraries
logging.getLogger('bioblend').setLevel(logging.ERROR)
logging.getLogger('requests').setLevel(logging.ERROR)
try:
logging.captureWarnings(True) # Capture HTTPS warngings from urllib3
except AttributeError:
pass
MTS = 'https://toolshed.g2.bx.psu.edu/' # Main Tool Shed
class ProgressConsoleHandler(logging.StreamHandler):
"""
A handler class which allows the cursor to stay on
one line for selected messages
"""
on_same_line = False
def emit(self, record):
try:
msg = self.format(record)
stream = self.stream
same_line = hasattr(record, 'same_line')
if self.on_same_line and not same_line:
stream.write('\r\n')
stream.write(msg)
if same_line:
stream.write('.')
self.on_same_line = True
else:
stream.write('\r\n')
self.on_same_line = False
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def _setup_global_logger():
formatter = logging.Formatter('%(asctime)s %(levelname)-5s - %(message)s')
progress = ProgressConsoleHandler()
file_handler = logging.FileHandler('/tmp/galaxy_tool_install.log')
console = logging.StreamHandler()
console.setFormatter(formatter)
logger = logging.getLogger('test')
logger.setLevel(logging.DEBUG)
logger.addHandler(progress)
logger.addHandler(file_handler)
return logger
def log_tool_install_error(tool, start, end, e, errored_tools):
"""
Log failed tool installations
"""
log.error("\t* Error installing a tool (after %s)! Name: %s," "owner: %s, "
"revision: %s, error: %s" % (tool['name'], str(end - start),
tool['owner'], tool['revision'],
e.body))
errored_tools.append({'name': tool['name'], 'owner': tool['owner'],
'revision': tool['revision'], 'error': e.body})
def log_tool_install_success(tool, start, end, installed_tools):
"""
Log successfull tool installation.
Tools that finish in error still count as successfull installs currently.
"""
installed_tools.append({'name': tool['name'], 'owner': tool['owner'],
'revision': tool['revision']})
log.debug("\tTool %s installed successfully (in %s) at revision %s" %
(tool['name'], str(end - start), tool['revision']))
def load_input_file(tool_list_file='tool_list.yaml'):
"""
Load YAML from the `tool_list_file` and return a dict with the content.
"""
with open(tool_list_file, 'r') as f:
tl = yaml.load(f)
return tl
def dump_to_yaml_file(content, file_name):
"""
Dump YAML-compatible `content` to `file_name`.
"""
with open(file_name, 'w') as f:
yaml.dump(content, f, default_flow_style=False)
def galaxy_instance(url=None, api_key=None):
"""
Get an instance of the `GalaxyInstance` object. If the arguments are not
provided, load the default values using `load_input_file` method.
"""
if not (url and api_key):
tl = load_input_file()
url = tl['galaxy_instance']
api_key = tl['api_key']
return GalaxyInstance(url, api_key)
def tool_shed_client(gi=None):
"""
Get an instance of the `ToolShedClient` on a given Galaxy instance. If no
value is provided for the `galaxy_instance`, use the default provided via
`load_input_file`.
"""
if not gi:
gi = galaxy_instance()
return ToolShedClient(gi)
def the_same_tool(tool_1_info, tool_2_info):
"""
Given two dicts containing info about tools, determine if they are the same
tool.
Each of the dicts must have the following keys: `name`, `owner`, and
(either `tool_shed` or `tool_shed_url`).
"""
t1ts = tool_1_info.get('tool_shed', tool_1_info.get('tool_shed_url', None))
t2ts = tool_2_info.get('tool_shed', tool_2_info.get('tool_shed_url', None))
if tool_1_info.get('name') == tool_2_info.get('name') and \
tool_1_info.get('owner') == tool_2_info.get('owner') and \
(t1ts in t2ts or t2ts in t1ts):
return True
return False
def installed_tool_revisions(gi=None, omit=None):
"""
Get a list of tool revisions installed from a Tool Shed on a Galaxy instance.
Included are all the tool revisions that were installed from a Tool
Shed and are available from `/api/tool_shed_repositories` url on the
given instance of Galaxy.
:type gi: GalaxyInstance object
:param gi: A GalaxyInstance object as retured by `galaxy_instance` method.
:type omit: list of strings
:param omit: A list of strings that, if found in a tool name, will result
in the tool not being included in the returned list.
:rtype: list of dicts
:return: Each dict in the returned list will have the following keys:
`name`, `owner`, `tool_shed_url`, `revisions`.
.. seealso:: this method returns a subset of data returned by
`installed_tools` function
"""
if not omit:
omit = []
tsc = tool_shed_client(gi)
installed_revisions_list = []
itl = tsc.get_repositories()
for it in itl:
if it['status'] == 'Installed':
skip = False
# Check if we already processed this tool and, if so, add the new
# revision to the existing list entry
for ir in installed_revisions_list:
if the_same_tool(it, ir):
ir['revisions'].append(it.get('changeset_revision', None))
skip = True
# Check if the repo name is contained in the 'omit' list
for o in omit:
if o in it['name']:
skip = True
# We have not processed this tool so create a list entry
if not skip:
ti = {'name': it['name'],
'owner': it['owner'],
'revisions': [it.get('changeset_revision', None)],
'tool_shed_url': 'https://' + it['tool_shed']}
installed_revisions_list.append(ti)
return installed_revisions_list
def installed_tools(gi, omit=None):
"""
Get a list of tools on a Galaxy instance.
:type gi: GalaxyInstance object
:param gi: A GalaxyInstance object as retured by `galaxy_instance` method.
:type omit: list of strings
:param omit: A list of strings that, if found in a tool name, will result
in the tool not being included in the returned list.
:rtype: dict
:return: The returned dictionary contains the following keys, each
containing a list of dictionaries:
- `tool_panel_shed_tools` with a list of tools available in the
tool panel that were installed on the target Galaxy instance
from the Tool Shed;
- `tool_panel_custom_tools` with a list of tools available in
the tool panel that were not installed via the Tool Shed;
- `shed_tools` with a list of tools returned from the
`installed_tool_revisions` function and complemented with a
`tool_panel_section_id` key as matched with the list of tools
from the first element of the returned triplet. Note that the
two lists (`shed_tools` and `tool_panel_shed_tools`) are likely
to be different and hence not every element in the `shed_tools`
will have the `tool_panel_section_id`!
.. seealso:: `installed_tool_revisions` (this function also returns the
output of the `installed_tool_revisions` function, as
`shed_tools` key).
"""
if not omit:
omit = []
tp_tools = [] # Tools available in the tool panel and installe via a TS
custom_tools = [] # Tools available in the tool panel but custom-installed
tl = gi.tools.get_tool_panel() # In-panel tool list
for ts in tl: # ts -> tool section
# print "%s (%s): %s" % (ts['name'], ts['id'], len(ts.get('elems', [])))
# Parse the tool panel to ge the the tool lists
for t in ts.get('elems', []):
# Tool ID is either a tool name (in case of custom-installed tools)
# or a URI (in case of Tool Shed-installed tools) so differentiate
# among those
tid = t['id'].split('/')
if len(tid) > 3:
skip = False
# Check if we already encountered this tool
for added_tool in tp_tools:
if tid[3] in added_tool['name']:
skip = True
# Check if the repo name is contained in the 'omit' list
for o in omit:
if o in tid[3]:
skip = True
if not skip:
tp_tools.append({'tool_shed_url': "https://{0}".format(tid[0]),
'owner': tid[2],
'name': tid[3],
'tool_panel_section_id': ts['id']})
# print "\t%s, %s, %s" % (tid[0], tid[2], tid[3])
else:
# print "\t%s" % t['id']
custom_tools.append(t['id'])
# Match tp_tools with the tool list available from the Tool Shed Clients on
# the given Galaxy instance and and add tool section IDs it
ts_tools = installed_tool_revisions(gi, omit) # Tools revisions installed via a TS
for it in ts_tools:
for t in tp_tools:
if the_same_tool(it, t):
it['tool_panel_section_id'] = t['tool_panel_section_id']
return {'tool_panel_shed_tools': tp_tools,
'tool_panel_custom_tools': custom_tools,
'shed_tools': ts_tools}
def _list_tool_categories(tl):
"""
Given a list of dicts `tl` as returned by the `installed_tools` method and
where each list element holds a key `tool_panel_section_id`, return a list
of unique section IDs.
"""
category_list = []
for t in tl:
category_list.append(t.get('id'))
return set(category_list)
def _parse_cli_options():
"""
Parse command line options, returning `parse_args` from `ArgumentParser`.
"""
parser = ArgumentParser(usage="usage: python %(prog)s <options>")
parser.add_argument("-d", "--dbkeysfile",
dest="dbkeys_list_file",
help="Reference genome dbkeys to install (see "
"dbkeys_list.yaml.sample)",)
parser.add_argument("-g", "--galaxy",
dest="galaxy_url",
help="Target Galaxy instance URL/IP address (required "
"if not defined in the tools list file)",)
parser.add_argument("-a", "--apikey",
dest="api_key",
help="Galaxy admin user API key (required if not "
"defined in the tools list file)",)
parser.add_argument("-t", "--toolsfile",
dest="tool_list_file",
help="Tools file to use (see tool_list.yaml.sample)",)
parser.add_argument("-y", "--yaml_tool",
dest="tool_yaml",
help="Install tool represented by yaml string",)
parser.add_argument("--name",
help="The name of the tool to install (only applicable "
"if the tools file is not provided).")
parser.add_argument("--owner",
help="The owner of the tool to install (only applicable "
"if the tools file is not provided).")
parser.add_argument("--section",
dest="tool_panel_section_id",
help="Galaxy tool panel section ID where the tool will "
"be installed (the section must exist in Galaxy; "
"only applicable if the tools file is not provided).")
parser.add_argument("--toolshed",
dest="tool_shed_url",
help="The Tool Shed URL where to install the tool from. "
"This is applicable only if the tool info is "
"provided as an option vs. in the tools file.")
return parser.parse_args()
def _flatten_tools_info(tools_info):
"""
Flatten the dict containing info about what tools to install.
The tool definition YAML file allows multiple revisions to be listed for
the same tool. To enable simple, iterattive processing of the info in this
script, flatten the `tools_info` list to include one entry per tool revision.
:type tools_info: list of dicts
:param tools_info: Each dict in this list should contain info about a tool.
:rtype: list of dicts
:return: Return a list of dicts that correspond to the input argument such
that if an input element contained `revisions` key with multiple
values, those will be returned as separate list items.
"""
def _copy_dict(d):
"""
Iterrate through the dictionary `d` and copy its keys and values
excluding the key `revisions`.
"""
new_d = {}
for k, v in d.iteritems():
if k != 'revisions':
new_d[k] = v
return new_d
flattened_list = []
for tool_info in tools_info:
revisions = tool_info.get('revisions', [])
if len(revisions) > 1:
for revision in revisions:
ti = _copy_dict(tool_info)
ti['revision'] = revision
flattened_list.append(ti)
elif revisions: # A single revisions was defined so keep it
ti = _copy_dict(tool_info)
ti['revision'] = revisions[0]
flattened_list.append(ti)
else: # Revision was not defined at all
flattened_list.append(tool_info)
return flattened_list
def run_data_managers(options):
"""
Run Galaxy Data Manager to download, index, and install reference genome
data into Galaxy.
:type options: OptionParser object
:param options: command line arguments parsed by OptionParser
"""
dbkeys_list_file = options.dbkeys_list_file
kl = load_input_file(dbkeys_list_file) # Input file contents
dbkeys = kl['dbkeys'] # The list of dbkeys to install
dms = kl['data_managers'] # The list of data managers to run
galaxy_url = options.galaxy_url or kl['galaxy_instance']
api_key = options.api_key or kl['api_key']
gi = galaxy_instance(galaxy_url, api_key)
istart = dt.datetime.now()
errored_dms = []
dbkey_counter = 0
for dbkey in dbkeys:
dbkey_counter += 1
dbkey_name = dbkey.get('dbkey')
dm_counter = 0
for dm in dms:
dm_counter += 1
dm_tool = dm.get('id')
# Initate tool installation
start = dt.datetime.now()
log.debug('[dbkey {0}/{1}; DM: {2}/{3}] Installing dbkey {4} with '
'DM {5}'.format(dbkey_counter, len(dbkeys), dm_counter,
len(dms), dbkey_name, dm_tool))
tool_input = dbkey
try:
response = gi.tools.run_tool('', dm_tool, tool_input)
jobs = response.get('jobs', [])
# Check if a job is actually running
if len(jobs) == 0:
log.warning("\t(!) No '{0}' job found for '{1}'".format(dm_tool,
dbkey_name))
errored_dms.append({'dbkey': dbkey_name, 'DM': dm_tool})
else:
# Monitor the job(s)
log.debug("\tJob running", extra={'same_line': True})
done_count = 0
while done_count < len(jobs):
done_count = 0
for job in jobs:
job_id = job.get('id')
job_state = gi.jobs.show_job(job_id).get('state', '')
if job_state == 'ok':
done_count += 1
elif job_state == 'error':
done_count += 1
errored_dms.append({'dbkey': dbkey_name, 'DM': dm_tool})
log.debug("", extra={'same_line': True})
time.sleep(10)
log.debug("\tDbkey '{0}' installed successfully in '{1}'".format(
dbkey.get('dbkey'), dt.datetime.now() - start))
except ConnectionError, e:
response = None
end = dt.datetime.now()
log.error("\t* Error installing dbkey {0} for DM {1} (after {2}): {3}"
.format(dbkey_name, dm_tool, end - start, e.body))
errored_dms.append({'dbkey': dbkey_name, 'DM': dm_tool})
log.info("All dbkeys & DMs listed in '{0}' have been processed.".format(dbkeys_list_file))
log.info("Errored DMs: {0}".format(errored_dms))
log.info("Total run time: {0}".format(dt.datetime.now() - istart))
def install_repository_revision(tool, tsc):
"""
Installs single tool
"""
response = tsc.install_repository_revision(
tool['tool_shed_url'], tool['name'], tool['owner'],
tool['revision'], tool['install_tool_dependencies'],
tool['install_repository_dependencies'],
tool['tool_panel_section_id'],
tool['tool_panel_section_label'])
if isinstance(response, dict) and response.get('status', None) == 'ok':
# This rare case happens if a tool is already installed but
# was not recognised as such in the above check. In such a
# case the return value looks like this:
# {u'status': u'ok', u'message': u'No repositories were
# installed, possibly because the selected repository has
# already been installed.'}
log.debug("\tTool {0} is already installed.".format(tool['name']))
return response
def wait_for_install(tool, tsc, timeout=3600):
"""
If nginx times out, we look into the list of installed repositories
and try to determine if a tool of the same namer/owner is still installing.
Returns True if install finished, returns False when timeout is exceeded.
"""
def install_done(tool, tsc):
itl = tsc.get_repositories()
for it in itl:
if (tool['name'] == it['name']) and (it['owner'] == tool['owner']):
if it['status'] not in ['Installed', 'Error']:
return False
return True
finished = install_done(tool, tsc)
while (not finished) and (timeout > 0):
timeout = timeout - 10
time.sleep(10)
finished = install_done(tool, tsc)
if timeout > 0:
return True
else:
return False
def install_tools(options):
"""
Parse the default input file and proceed to install listed tools.
:type options: OptionParser object
:param options: command line arguments parsed by OptionParser
"""
istart = dt.datetime.now()
tool_list_file = options.tool_list_file
if tool_list_file:
tl = load_input_file(tool_list_file) # Input file contents
tools_info = tl['tools'] # The list of tools to install
elif options.tool_yaml:
tools_info = [yaml.load(options.tool_yaml)]
else:
# An individual tool was specified on the command line
tools_info = [{"owner": options.owner,
"name": options.name,
"tool_panel_section_id": options.tool_panel_section_id,
"tool_shed_url": options.tool_shed_url or MTS}]
galaxy_url = options.galaxy_url or tl.get('galaxy_instance')
api_key = options.api_key or tl.get('api_key')
gi = galaxy_instance(galaxy_url, api_key)
tsc = tool_shed_client(gi)
itl = installed_tool_revisions(gi) # installed tools list
responses = []
errored_tools = []
skipped_tools = []
installed_tools = []
counter = 0
tools_info = _flatten_tools_info(tools_info)
total_num_tools = len(tools_info)
default_err_msg = ('All repositories that you are attempting to install '
'have been previously installed.')
# Process each tool/revision: check if it's already installed or install it
for tool_info in tools_info:
counter += 1
already_installed = False # Reset the flag
tool = {} # Payload for the tool we are installing
# Copy required `tool_info` keys into the `tool` dict
tool['name'] = tool_info.get('name', None)
tool['owner'] = tool_info.get('owner', None)
tool['tool_panel_section_id'] = tool_info.get('tool_panel_section_id', None)
tool['tool_panel_section_label'] = tool_info.get('tool_panel_section_label', None)
# Check if all required tool sections have been provided; if not, skip
# the installation of this tool. Note that data managers are an exception
# but they must contain string `data_manager` within the tool name.
if not tool['name'] or not tool['owner'] or (not (tool['tool_panel_section_id']
or tool['tool_panel_section_label'])
and 'data_manager' not in tool.get('name', '')):
log.error("Missing required tool info field; skipping [name: '{0}'; "
"owner: '{1}'; tool_panel_section_id: '{2}']; tool_panel_section_label: '{3}'"
.format(tool['name'], tool['owner'], tool['tool_panel_section_id'],
tool['tool_panel_section_label']))
continue
# Populate fields that can optionally be provided (if not provided, set
# defaults).
tool['install_tool_dependencies'] = \
tool_info.get('install_tool_dependencies', True)
tool['install_repository_dependencies'] = \
tool_info.get('install_repository_dependencies', True)
tool['tool_shed_url'] = \
tool_info.get('tool_shed_url', MTS)
ts = ToolShedInstance(url=tool['tool_shed_url'])
ts.verify = tool_info['verify_ssl']
# Get the set revision or set it to the latest installable revision
tool['revision'] = tool_info.get('revision', ts.repositories.
get_ordered_installable_revisions
(tool['name'], tool['owner'])[-1])
# Check if the tool@revision is already installed
for installed in itl:
if the_same_tool(installed, tool) and tool['revision'] in installed['revisions']:
log.debug("({0}/{1}) Tool {2} already installed at revision {3}. Skipping."
.format(counter, total_num_tools, tool['name'], tool['revision']))
skipped_tools.append({'name': tool['name'], 'owner': tool['owner'],
'revision': tool['revision']})
already_installed = True
break
if not already_installed:
# Initate tool installation
start = dt.datetime.now()
log.debug('(%s/%s) Installing tool %s from %s to section "%s" at '
'revision %s (TRT: %s)' %
(counter, total_num_tools, tool['name'], tool['owner'],
tool['tool_panel_section_id'] or tool['tool_panel_section_label'],
tool['revision'], dt.datetime.now() - istart))
try:
response = install_repository_revision(tool, tsc)
end = dt.datetime.now()
log_tool_install_success(tool=tool, start=start, end=end,
installed_tools=installed_tools)
except ConnectionError, e:
response = None
end = dt.datetime.now()
if default_err_msg in e.body:
log.debug("\tTool %s already installed (at revision %s)" %
(tool['name'], tool['revision']))
else:
if e.message == "Unexpected response from galaxy: 504":
log.debug("Timeout during install of %s, extending wait to 1h"
% ((tool['name'])))
success = wait_for_install(tool=tool, tsc=tsc, timeout=3600)
if success:
log_tool_install_success(tool=tool, start=start, end=end,
installed_tools=installed_tools)
response = e.body # TODO: find a better response message
else:
log_tool_install_error(tool=tool, start=start, end=end,
e=e, errored_tools=errored_tools)
else:
log_tool_install_error(tool=tool, start=start, end=end,
e=e, errored_tools=errored_tools)
outcome = {'tool': tool, 'response': response, 'duration': str(end - start)}
responses.append(outcome)
log.info("Installed tools ({0}): {1}".format(
len(installed_tools), [(t['name'], t['revision']) for t in installed_tools]))
log.info("Skipped tools ({0}): {1}".format(
len(skipped_tools), [(t['name'], t['revision']) for t in skipped_tools]))
log.info("Errored tools ({0}): {1}".format(
len(errored_tools), [(t['name'], t['revision']) for t in errored_tools]))
log.info("All tools listed in '{0}' have been processed.".format(tool_list_file))
log.info("Total run time: {0}".format(dt.datetime.now() - istart))
if __name__ == "__main__":
global log
log = _setup_global_logger()
options = _parse_cli_options()
if options.tool_list_file or (options.name and options.owner and
options.tool_panel_section_id) or (options.tool_yaml):
install_tools(options)
elif options.dbkeys_list_file:
run_data_managers(options)
else:
log.error("Must provide the tool list file or individual tools info; "
"look at usage.")
| C3BI-pasteur-fr/Galaxy-playbook | galaxy-pasteur/roles/galaxy_tools/files/install_tool_shed_tools.py | Python | gpl-2.0 | 28,649 |
"""Tests for distutils.command.bdist_rpm."""
import unittest
import sys
import os
import tempfile
import shutil
from test.support import run_unittest
from distutils.core import Distribution
from distutils.command.bdist_rpm import bdist_rpm
from distutils.tests import support
from distutils.spawn import find_executable
from distutils import spawn
from distutils.errors import DistutilsExecError
SETUP_PY = """\
from distutils.core import setup
import foo
setup(name='foo', version='0.1', py_modules=['foo'],
url='xxx', author='xxx', author_email='xxx')
"""
class BuildRpmTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def setUp(self):
try:
sys.executable.encode("UTF-8")
except UnicodeEncodeError:
raise unittest.SkipTest("sys.executable is not encodable to UTF-8")
super(BuildRpmTestCase, self).setUp()
self.old_location = os.getcwd()
self.old_sys_argv = sys.argv, sys.argv[:]
def tearDown(self):
os.chdir(self.old_location)
sys.argv = self.old_sys_argv[0]
sys.argv[:] = self.old_sys_argv[1]
super(BuildRpmTestCase, self).tearDown()
# XXX I am unable yet to make this test work without
# spurious sdtout/stderr output under Mac OS X
@unittest.skipUnless(sys.platform.startswith('linux'),
'spurious sdtout/stderr output under Mac OS X')
@unittest.skipIf(find_executable('rpm') is None,
'the rpm command is not found')
@unittest.skipIf(find_executable('rpmbuild') is None,
'the rpmbuild command is not found')
def test_quiet(self):
# let's create a package
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({'name': 'foo', 'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_rpm(dist)
cmd.fix_python = True
# running in quiet mode
cmd.quiet = 1
cmd.ensure_finalized()
cmd.run()
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
# bug #2945: upload ignores bdist_rpm files
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
# XXX I am unable yet to make this test work without
# spurious sdtout/stderr output under Mac OS X
@unittest.skipUnless(sys.platform.startswith('linux'),
'spurious sdtout/stderr output under Mac OS X')
# http://bugs.python.org/issue1533164
@unittest.skipIf(find_executable('rpm') is None,
'the rpm command is not found')
@unittest.skipIf(find_executable('rpmbuild') is None,
'the rpmbuild command is not found')
def test_no_optimize_flag(self):
# let's create a package that brakes bdist_rpm
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({'name': 'foo', 'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_rpm(dist)
cmd.fix_python = True
cmd.quiet = 1
cmd.ensure_finalized()
cmd.run()
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
# bug #2945: upload ignores bdist_rpm files
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
os.remove(os.path.join(pkg_dir, 'dist', 'foo-0.1-1.noarch.rpm'))
def test_suite():
return unittest.makeSuite(BuildRpmTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| Orav/kbengine | kbe/src/lib/python/Lib/distutils/tests/test_bdist_rpm.py | Python | lgpl-3.0 | 5,002 |
"""
>>> basketball_team = ['Sue', 'Tina', 'Maya', 'Diana', 'Pat']
>>> bus = TwilightBus(basketball_team)
>>> bus.drop('Tina')
>>> bus.drop('Pat')
>>> basketball_team
['Sue', 'Maya', 'Diana']
"""
# BEGIN TWILIGHT_BUS_CLASS
class TwilightBus:
"""A bus model that makes passengers vanish"""
def __init__(self, passengers=None):
if passengers is None:
self.passengers = [] # <1>
else:
self.passengers = passengers #<2>
def pick(self, name):
self.passengers.append(name)
def drop(self, name):
self.passengers.remove(name) # <3>
# END TWILIGHT_BUS_CLASS
| Wallace-dyfq/example-code | 08-obj-ref/twilight_bus.py | Python | mit | 628 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.